[llvm] af0ecfc - [RISCV] Generate pseudo instruction li

via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 21 22:04:44 PST 2021


Author: wangpc
Date: 2021-11-22T14:01:37+08:00
New Revision: af0ecfccae82ade32581959d61fe86f573d08def

URL: https://github.com/llvm/llvm-project/commit/af0ecfccae82ade32581959d61fe86f573d08def
DIFF: https://github.com/llvm/llvm-project/commit/af0ecfccae82ade32581959d61fe86f573d08def.diff

LOG: [RISCV] Generate pseudo instruction li

Add an alias of `addi [x], zero, imm` to generate pseudo
instruction li, which makes assembly mush more readable.
For existed tests, users can update them by running script
`llvm/utils/update_llc_test_checks.py`.

Reviewed By: asb

Differential Revision: https://reviews.llvm.org/D112692

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/test/CodeGen/RISCV/addimm-mulimm.ll
    llvm/test/CodeGen/RISCV/alloca.ll
    llvm/test/CodeGen/RISCV/alu32.ll
    llvm/test/CodeGen/RISCV/alu64.ll
    llvm/test/CodeGen/RISCV/analyze-branch.ll
    llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
    llvm/test/CodeGen/RISCV/atomic-load-store.ll
    llvm/test/CodeGen/RISCV/atomic-rmw.ll
    llvm/test/CodeGen/RISCV/atomic-signext.ll
    llvm/test/CodeGen/RISCV/branch-relaxation.ll
    llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
    llvm/test/CodeGen/RISCV/calling-conv-half.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
    llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
    llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
    llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
    llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
    llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
    llvm/test/CodeGen/RISCV/codemodel-lowering.ll
    llvm/test/CodeGen/RISCV/copysign-casts.ll
    llvm/test/CodeGen/RISCV/div.ll
    llvm/test/CodeGen/RISCV/double-arith.ll
    llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
    llvm/test/CodeGen/RISCV/double-br-fcmp.ll
    llvm/test/CodeGen/RISCV/double-calling-conv.ll
    llvm/test/CodeGen/RISCV/double-convert.ll
    llvm/test/CodeGen/RISCV/double-fcmp.ll
    llvm/test/CodeGen/RISCV/double-intrinsics.ll
    llvm/test/CodeGen/RISCV/double-previous-failure.ll
    llvm/test/CodeGen/RISCV/float-arith.ll
    llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
    llvm/test/CodeGen/RISCV/float-br-fcmp.ll
    llvm/test/CodeGen/RISCV/float-convert.ll
    llvm/test/CodeGen/RISCV/float-fcmp.ll
    llvm/test/CodeGen/RISCV/flt-rounds.ll
    llvm/test/CodeGen/RISCV/fp-imm.ll
    llvm/test/CodeGen/RISCV/frame.ll
    llvm/test/CodeGen/RISCV/half-arith.ll
    llvm/test/CodeGen/RISCV/half-br-fcmp.ll
    llvm/test/CodeGen/RISCV/half-convert.ll
    llvm/test/CodeGen/RISCV/half-fcmp.ll
    llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
    llvm/test/CodeGen/RISCV/i32-icmp.ll
    llvm/test/CodeGen/RISCV/imm.ll
    llvm/test/CodeGen/RISCV/indirectbr.ll
    llvm/test/CodeGen/RISCV/jumptable.ll
    llvm/test/CodeGen/RISCV/legalize-fneg.ll
    llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll
    llvm/test/CodeGen/RISCV/mul.ll
    llvm/test/CodeGen/RISCV/pr51206.ll
    llvm/test/CodeGen/RISCV/rem.ll
    llvm/test/CodeGen/RISCV/remat.ll
    llvm/test/CodeGen/RISCV/rotl-rotr.ll
    llvm/test/CodeGen/RISCV/rv32zba.ll
    llvm/test/CodeGen/RISCV/rv32zbb.ll
    llvm/test/CodeGen/RISCV/rv32zbp.ll
    llvm/test/CodeGen/RISCV/rv32zbs.ll
    llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll
    llvm/test/CodeGen/RISCV/rv64m-w-insts-legalization.ll
    llvm/test/CodeGen/RISCV/rv64zba.ll
    llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
    llvm/test/CodeGen/RISCV/rv64zbb.ll
    llvm/test/CodeGen/RISCV/rv64zbp.ll
    llvm/test/CodeGen/RISCV/rv64zbs.ll
    llvm/test/CodeGen/RISCV/rvv/allocate-lmul-2-4-8.ll
    llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
    llvm/test/CodeGen/RISCV/rvv/combine-sats.ll
    llvm/test/CodeGen/RISCV/rvv/combine-splats.ll
    llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll
    llvm/test/CodeGen/RISCV/rvv/memory-args.ll
    llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
    llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll
    llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
    llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/stepvector.ll
    llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll
    llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
    llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
    llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
    llvm/test/CodeGen/RISCV/sadd_sat.ll
    llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
    llvm/test/CodeGen/RISCV/select-cc.ll
    llvm/test/CodeGen/RISCV/select-const.ll
    llvm/test/CodeGen/RISCV/select-constant-xor.ll
    llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
    llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
    llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
    llvm/test/CodeGen/RISCV/shift-masked-shamt.ll
    llvm/test/CodeGen/RISCV/shifts.ll
    llvm/test/CodeGen/RISCV/shrinkwrap.ll
    llvm/test/CodeGen/RISCV/sink-icmp.ll
    llvm/test/CodeGen/RISCV/split-offsets.ll
    llvm/test/CodeGen/RISCV/split-sp-adjust.ll
    llvm/test/CodeGen/RISCV/srem-lkk.ll
    llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
    llvm/test/CodeGen/RISCV/ssub_sat.ll
    llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
    llvm/test/CodeGen/RISCV/stack-slot-size.ll
    llvm/test/CodeGen/RISCV/uadd_sat.ll
    llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
    llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
    llvm/test/CodeGen/RISCV/urem-lkk.ll
    llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
    llvm/test/CodeGen/RISCV/usub_sat.ll
    llvm/test/CodeGen/RISCV/usub_sat_plus.ll
    llvm/test/CodeGen/RISCV/vararg.ll
    llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll
    llvm/test/CodeGen/RISCV/xaluo.ll
    llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll
    llvm/test/MC/RISCV/compress-rv32i.s
    llvm/test/MC/RISCV/numeric-reg-names.s
    llvm/test/MC/RISCV/rv32i-aliases-valid.s
    llvm/test/MC/RISCV/rv64i-aliases-valid.s
    llvm/test/MC/RISCV/rv64zba-aliases-valid.s
    llvm/test/MC/RISCV/rv64zbs-aliases-valid.s
    llvm/test/MC/RISCV/rvi-aliases-valid.s
    llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/riscv_function_name.ll.expected

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index b653928ccea9..6f9cde966132 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -705,6 +705,7 @@ def PseudoLD  : PseudoLoad<"ld">;
 def PseudoSD  : PseudoStore<"sd">;
 } // Predicates = [IsRV64]
 
+def : InstAlias<"li $rd, $imm",  (ADDI GPR:$rd, X0, simm12:$imm)>;
 def : InstAlias<"mv $rd, $rs",   (ADDI GPR:$rd, GPR:$rs,       0)>;
 def : InstAlias<"not $rd, $rs",  (XORI GPR:$rd, GPR:$rs,      -1)>;
 def : InstAlias<"neg $rd, $rs",  (SUB  GPR:$rd,      X0, GPR:$rs)>;

diff  --git a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll
index 399361e7fb39..99912623555c 100644
--- a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll
+++ b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll
@@ -10,14 +10,14 @@
 define i32 @add_mul_combine_accept_a1(i32 %x) {
 ; RV32IMB-LABEL: add_mul_combine_accept_a1:
 ; RV32IMB:       # %bb.0:
-; RV32IMB-NEXT:    addi a1, zero, 29
+; RV32IMB-NEXT:    li a1, 29
 ; RV32IMB-NEXT:    mul a0, a0, a1
 ; RV32IMB-NEXT:    addi a0, a0, 1073
 ; RV32IMB-NEXT:    ret
 ;
 ; RV64IMB-LABEL: add_mul_combine_accept_a1:
 ; RV64IMB:       # %bb.0:
-; RV64IMB-NEXT:    addi a1, zero, 29
+; RV64IMB-NEXT:    li a1, 29
 ; RV64IMB-NEXT:    mulw a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, 1073
 ; RV64IMB-NEXT:    ret
@@ -29,14 +29,14 @@ define i32 @add_mul_combine_accept_a1(i32 %x) {
 define signext i32 @add_mul_combine_accept_a2(i32 signext %x) {
 ; RV32IMB-LABEL: add_mul_combine_accept_a2:
 ; RV32IMB:       # %bb.0:
-; RV32IMB-NEXT:    addi a1, zero, 29
+; RV32IMB-NEXT:    li a1, 29
 ; RV32IMB-NEXT:    mul a0, a0, a1
 ; RV32IMB-NEXT:    addi a0, a0, 1073
 ; RV32IMB-NEXT:    ret
 ;
 ; RV64IMB-LABEL: add_mul_combine_accept_a2:
 ; RV64IMB:       # %bb.0:
-; RV64IMB-NEXT:    addi a1, zero, 29
+; RV64IMB-NEXT:    li a1, 29
 ; RV64IMB-NEXT:    mulw a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, 1073
 ; RV64IMB-NEXT:    ret
@@ -48,7 +48,7 @@ define signext i32 @add_mul_combine_accept_a2(i32 signext %x) {
 define i64 @add_mul_combine_accept_a3(i64 %x) {
 ; RV32IMB-LABEL: add_mul_combine_accept_a3:
 ; RV32IMB:       # %bb.0:
-; RV32IMB-NEXT:    addi a2, zero, 29
+; RV32IMB-NEXT:    li a2, 29
 ; RV32IMB-NEXT:    mul a1, a1, a2
 ; RV32IMB-NEXT:    mulhu a3, a0, a2
 ; RV32IMB-NEXT:    add a1, a3, a1
@@ -60,7 +60,7 @@ define i64 @add_mul_combine_accept_a3(i64 %x) {
 ;
 ; RV64IMB-LABEL: add_mul_combine_accept_a3:
 ; RV64IMB:       # %bb.0:
-; RV64IMB-NEXT:    addi a1, zero, 29
+; RV64IMB-NEXT:    li a1, 29
 ; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addi a0, a0, 1073
 ; RV64IMB-NEXT:    ret
@@ -72,7 +72,7 @@ define i64 @add_mul_combine_accept_a3(i64 %x) {
 define i32 @add_mul_combine_accept_b1(i32 %x) {
 ; RV32IMB-LABEL: add_mul_combine_accept_b1:
 ; RV32IMB:       # %bb.0:
-; RV32IMB-NEXT:    addi a1, zero, 23
+; RV32IMB-NEXT:    li a1, 23
 ; RV32IMB-NEXT:    mul a0, a0, a1
 ; RV32IMB-NEXT:    lui a1, 50
 ; RV32IMB-NEXT:    addi a1, a1, 1119
@@ -81,7 +81,7 @@ define i32 @add_mul_combine_accept_b1(i32 %x) {
 ;
 ; RV64IMB-LABEL: add_mul_combine_accept_b1:
 ; RV64IMB:       # %bb.0:
-; RV64IMB-NEXT:    addi a1, zero, 23
+; RV64IMB-NEXT:    li a1, 23
 ; RV64IMB-NEXT:    mulw a0, a0, a1
 ; RV64IMB-NEXT:    lui a1, 50
 ; RV64IMB-NEXT:    addiw a1, a1, 1119
@@ -95,7 +95,7 @@ define i32 @add_mul_combine_accept_b1(i32 %x) {
 define signext i32 @add_mul_combine_accept_b2(i32 signext %x) {
 ; RV32IMB-LABEL: add_mul_combine_accept_b2:
 ; RV32IMB:       # %bb.0:
-; RV32IMB-NEXT:    addi a1, zero, 23
+; RV32IMB-NEXT:    li a1, 23
 ; RV32IMB-NEXT:    mul a0, a0, a1
 ; RV32IMB-NEXT:    lui a1, 50
 ; RV32IMB-NEXT:    addi a1, a1, 1119
@@ -104,7 +104,7 @@ define signext i32 @add_mul_combine_accept_b2(i32 signext %x) {
 ;
 ; RV64IMB-LABEL: add_mul_combine_accept_b2:
 ; RV64IMB:       # %bb.0:
-; RV64IMB-NEXT:    addi a1, zero, 23
+; RV64IMB-NEXT:    li a1, 23
 ; RV64IMB-NEXT:    mulw a0, a0, a1
 ; RV64IMB-NEXT:    lui a1, 50
 ; RV64IMB-NEXT:    addiw a1, a1, 1119
@@ -118,7 +118,7 @@ define signext i32 @add_mul_combine_accept_b2(i32 signext %x) {
 define i64 @add_mul_combine_accept_b3(i64 %x) {
 ; RV32IMB-LABEL: add_mul_combine_accept_b3:
 ; RV32IMB:       # %bb.0:
-; RV32IMB-NEXT:    addi a2, zero, 23
+; RV32IMB-NEXT:    li a2, 23
 ; RV32IMB-NEXT:    mul a1, a1, a2
 ; RV32IMB-NEXT:    mulhu a3, a0, a2
 ; RV32IMB-NEXT:    add a1, a3, a1
@@ -132,7 +132,7 @@ define i64 @add_mul_combine_accept_b3(i64 %x) {
 ;
 ; RV64IMB-LABEL: add_mul_combine_accept_b3:
 ; RV64IMB:       # %bb.0:
-; RV64IMB-NEXT:    addi a1, zero, 23
+; RV64IMB-NEXT:    li a1, 23
 ; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    lui a1, 50
 ; RV64IMB-NEXT:    addiw a1, a1, 1119
@@ -147,14 +147,14 @@ define i32 @add_mul_combine_reject_a1(i32 %x) {
 ; RV32IMB-LABEL: add_mul_combine_reject_a1:
 ; RV32IMB:       # %bb.0:
 ; RV32IMB-NEXT:    addi a0, a0, 1971
-; RV32IMB-NEXT:    addi a1, zero, 29
+; RV32IMB-NEXT:    li a1, 29
 ; RV32IMB-NEXT:    mul a0, a0, a1
 ; RV32IMB-NEXT:    ret
 ;
 ; RV64IMB-LABEL: add_mul_combine_reject_a1:
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    addiw a0, a0, 1971
-; RV64IMB-NEXT:    addi a1, zero, 29
+; RV64IMB-NEXT:    li a1, 29
 ; RV64IMB-NEXT:    mulw a0, a0, a1
 ; RV64IMB-NEXT:    ret
   %tmp0 = add i32 %x, 1971
@@ -166,14 +166,14 @@ define signext i32 @add_mul_combine_reject_a2(i32 signext %x) {
 ; RV32IMB-LABEL: add_mul_combine_reject_a2:
 ; RV32IMB:       # %bb.0:
 ; RV32IMB-NEXT:    addi a0, a0, 1971
-; RV32IMB-NEXT:    addi a1, zero, 29
+; RV32IMB-NEXT:    li a1, 29
 ; RV32IMB-NEXT:    mul a0, a0, a1
 ; RV32IMB-NEXT:    ret
 ;
 ; RV64IMB-LABEL: add_mul_combine_reject_a2:
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    addiw a0, a0, 1971
-; RV64IMB-NEXT:    addi a1, zero, 29
+; RV64IMB-NEXT:    li a1, 29
 ; RV64IMB-NEXT:    mulw a0, a0, a1
 ; RV64IMB-NEXT:    ret
   %tmp0 = add i32 %x, 1971
@@ -184,7 +184,7 @@ define signext i32 @add_mul_combine_reject_a2(i32 signext %x) {
 define i64 @add_mul_combine_reject_a3(i64 %x) {
 ; RV32IMB-LABEL: add_mul_combine_reject_a3:
 ; RV32IMB:       # %bb.0:
-; RV32IMB-NEXT:    addi a2, zero, 29
+; RV32IMB-NEXT:    li a2, 29
 ; RV32IMB-NEXT:    mul a1, a1, a2
 ; RV32IMB-NEXT:    mulhu a3, a0, a2
 ; RV32IMB-NEXT:    add a1, a3, a1
@@ -199,7 +199,7 @@ define i64 @add_mul_combine_reject_a3(i64 %x) {
 ; RV64IMB-LABEL: add_mul_combine_reject_a3:
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    addi a0, a0, 1971
-; RV64IMB-NEXT:    addi a1, zero, 29
+; RV64IMB-NEXT:    li a1, 29
 ; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    ret
   %tmp0 = add i64 %x, 1971
@@ -250,7 +250,7 @@ define signext i32 @add_mul_combine_reject_c2(i32 signext %x) {
 define i64 @add_mul_combine_reject_c3(i64 %x) {
 ; RV32IMB-LABEL: add_mul_combine_reject_c3:
 ; RV32IMB:       # %bb.0:
-; RV32IMB-NEXT:    addi a2, zero, 73
+; RV32IMB-NEXT:    li a2, 73
 ; RV32IMB-NEXT:    mul a1, a1, a2
 ; RV32IMB-NEXT:    mulhu a3, a0, a2
 ; RV32IMB-NEXT:    add a1, a3, a1
@@ -314,7 +314,7 @@ define signext i32 @add_mul_combine_reject_d2(i32 signext %x) {
 define i64 @add_mul_combine_reject_d3(i64 %x) {
 ; RV32IMB-LABEL: add_mul_combine_reject_d3:
 ; RV32IMB:       # %bb.0:
-; RV32IMB-NEXT:    addi a2, zero, 192
+; RV32IMB-NEXT:    li a2, 192
 ; RV32IMB-NEXT:    mulhu a2, a0, a2
 ; RV32IMB-NEXT:    sh1add a1, a1, a1
 ; RV32IMB-NEXT:    slli a1, a1, 6
@@ -343,14 +343,14 @@ define i32 @add_mul_combine_reject_e1(i32 %x) {
 ; RV32IMB-LABEL: add_mul_combine_reject_e1:
 ; RV32IMB:       # %bb.0:
 ; RV32IMB-NEXT:    addi a0, a0, 1971
-; RV32IMB-NEXT:    addi a1, zero, 29
+; RV32IMB-NEXT:    li a1, 29
 ; RV32IMB-NEXT:    mul a0, a0, a1
 ; RV32IMB-NEXT:    ret
 ;
 ; RV64IMB-LABEL: add_mul_combine_reject_e1:
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    addiw a0, a0, 1971
-; RV64IMB-NEXT:    addi a1, zero, 29
+; RV64IMB-NEXT:    li a1, 29
 ; RV64IMB-NEXT:    mulw a0, a0, a1
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i32 %x, 29
@@ -362,14 +362,14 @@ define signext i32 @add_mul_combine_reject_e2(i32 signext %x) {
 ; RV32IMB-LABEL: add_mul_combine_reject_e2:
 ; RV32IMB:       # %bb.0:
 ; RV32IMB-NEXT:    addi a0, a0, 1971
-; RV32IMB-NEXT:    addi a1, zero, 29
+; RV32IMB-NEXT:    li a1, 29
 ; RV32IMB-NEXT:    mul a0, a0, a1
 ; RV32IMB-NEXT:    ret
 ;
 ; RV64IMB-LABEL: add_mul_combine_reject_e2:
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    addiw a0, a0, 1971
-; RV64IMB-NEXT:    addi a1, zero, 29
+; RV64IMB-NEXT:    li a1, 29
 ; RV64IMB-NEXT:    mulw a0, a0, a1
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i32 %x, 29
@@ -380,7 +380,7 @@ define signext i32 @add_mul_combine_reject_e2(i32 signext %x) {
 define i64 @add_mul_combine_reject_e3(i64 %x) {
 ; RV32IMB-LABEL: add_mul_combine_reject_e3:
 ; RV32IMB:       # %bb.0:
-; RV32IMB-NEXT:    addi a2, zero, 29
+; RV32IMB-NEXT:    li a2, 29
 ; RV32IMB-NEXT:    mul a1, a1, a2
 ; RV32IMB-NEXT:    mulhu a3, a0, a2
 ; RV32IMB-NEXT:    add a1, a3, a1
@@ -395,7 +395,7 @@ define i64 @add_mul_combine_reject_e3(i64 %x) {
 ; RV64IMB-LABEL: add_mul_combine_reject_e3:
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    addi a0, a0, 1971
-; RV64IMB-NEXT:    addi a1, zero, 29
+; RV64IMB-NEXT:    li a1, 29
 ; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i64 %x, 29
@@ -407,7 +407,7 @@ define i32 @add_mul_combine_reject_f1(i32 %x) {
 ; RV32IMB-LABEL: add_mul_combine_reject_f1:
 ; RV32IMB:       # %bb.0:
 ; RV32IMB-NEXT:    addi a0, a0, 1972
-; RV32IMB-NEXT:    addi a1, zero, 29
+; RV32IMB-NEXT:    li a1, 29
 ; RV32IMB-NEXT:    mul a0, a0, a1
 ; RV32IMB-NEXT:    addi a0, a0, 11
 ; RV32IMB-NEXT:    ret
@@ -415,7 +415,7 @@ define i32 @add_mul_combine_reject_f1(i32 %x) {
 ; RV64IMB-LABEL: add_mul_combine_reject_f1:
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    addiw a0, a0, 1972
-; RV64IMB-NEXT:    addi a1, zero, 29
+; RV64IMB-NEXT:    li a1, 29
 ; RV64IMB-NEXT:    mulw a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, 11
 ; RV64IMB-NEXT:    ret
@@ -428,7 +428,7 @@ define signext i32 @add_mul_combine_reject_f2(i32 signext %x) {
 ; RV32IMB-LABEL: add_mul_combine_reject_f2:
 ; RV32IMB:       # %bb.0:
 ; RV32IMB-NEXT:    addi a0, a0, 1972
-; RV32IMB-NEXT:    addi a1, zero, 29
+; RV32IMB-NEXT:    li a1, 29
 ; RV32IMB-NEXT:    mul a0, a0, a1
 ; RV32IMB-NEXT:    addi a0, a0, 11
 ; RV32IMB-NEXT:    ret
@@ -436,7 +436,7 @@ define signext i32 @add_mul_combine_reject_f2(i32 signext %x) {
 ; RV64IMB-LABEL: add_mul_combine_reject_f2:
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    addiw a0, a0, 1972
-; RV64IMB-NEXT:    addi a1, zero, 29
+; RV64IMB-NEXT:    li a1, 29
 ; RV64IMB-NEXT:    mulw a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, 11
 ; RV64IMB-NEXT:    ret
@@ -448,7 +448,7 @@ define signext i32 @add_mul_combine_reject_f2(i32 signext %x) {
 define i64 @add_mul_combine_reject_f3(i64 %x) {
 ; RV32IMB-LABEL: add_mul_combine_reject_f3:
 ; RV32IMB:       # %bb.0:
-; RV32IMB-NEXT:    addi a2, zero, 29
+; RV32IMB-NEXT:    li a2, 29
 ; RV32IMB-NEXT:    mul a1, a1, a2
 ; RV32IMB-NEXT:    mulhu a3, a0, a2
 ; RV32IMB-NEXT:    add a1, a3, a1
@@ -463,7 +463,7 @@ define i64 @add_mul_combine_reject_f3(i64 %x) {
 ; RV64IMB-LABEL: add_mul_combine_reject_f3:
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    addi a0, a0, 1972
-; RV64IMB-NEXT:    addi a1, zero, 29
+; RV64IMB-NEXT:    li a1, 29
 ; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addi a0, a0, 11
 ; RV64IMB-NEXT:    ret
@@ -517,7 +517,7 @@ define signext i32 @add_mul_combine_reject_g2(i32 signext %x) {
 define i64 @add_mul_combine_reject_g3(i64 %x) {
 ; RV32IMB-LABEL: add_mul_combine_reject_g3:
 ; RV32IMB:       # %bb.0:
-; RV32IMB-NEXT:    addi a2, zero, 73
+; RV32IMB-NEXT:    li a2, 73
 ; RV32IMB-NEXT:    mul a1, a1, a2
 ; RV32IMB-NEXT:    mulhu a3, a0, a2
 ; RV32IMB-NEXT:    add a1, a3, a1
@@ -545,7 +545,7 @@ define i64 @add_mul_combine_reject_g3(i64 %x) {
 define i64 @add_mul_combine_infinite_loop(i64 %x) {
 ; RV32IMB-LABEL: add_mul_combine_infinite_loop:
 ; RV32IMB:       # %bb.0:
-; RV32IMB-NEXT:    addi a2, zero, 24
+; RV32IMB-NEXT:    li a2, 24
 ; RV32IMB-NEXT:    mulhu a2, a0, a2
 ; RV32IMB-NEXT:    sh1add a1, a1, a1
 ; RV32IMB-NEXT:    sh3add a1, a1, a2
@@ -561,7 +561,7 @@ define i64 @add_mul_combine_infinite_loop(i64 %x) {
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    addi a0, a0, 86
 ; RV64IMB-NEXT:    sh1add a0, a0, a0
-; RV64IMB-NEXT:    addi a1, zero, -16
+; RV64IMB-NEXT:    li a1, -16
 ; RV64IMB-NEXT:    sh3add a0, a0, a1
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i64 %x, 24

diff  --git a/llvm/test/CodeGen/RISCV/alloca.ll b/llvm/test/CodeGen/RISCV/alloca.ll
index 1c211a09c0ff..95ef6aae2e6b 100644
--- a/llvm/test/CodeGen/RISCV/alloca.ll
+++ b/llvm/test/CodeGen/RISCV/alloca.ll
@@ -76,20 +76,20 @@ define void @alloca_callframe(i32 %n) nounwind {
 ; RV32I-NEXT:    sub a0, sp, a0
 ; RV32I-NEXT:    mv sp, a0
 ; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    addi a1, zero, 12
+; RV32I-NEXT:    li a1, 12
 ; RV32I-NEXT:    sw a1, 12(sp)
-; RV32I-NEXT:    addi a1, zero, 11
+; RV32I-NEXT:    li a1, 11
 ; RV32I-NEXT:    sw a1, 8(sp)
-; RV32I-NEXT:    addi a1, zero, 10
+; RV32I-NEXT:    li a1, 10
 ; RV32I-NEXT:    sw a1, 4(sp)
-; RV32I-NEXT:    addi t0, zero, 9
-; RV32I-NEXT:    addi a1, zero, 2
-; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 5
-; RV32I-NEXT:    addi a5, zero, 6
-; RV32I-NEXT:    addi a6, zero, 7
-; RV32I-NEXT:    addi a7, zero, 8
+; RV32I-NEXT:    li t0, 9
+; RV32I-NEXT:    li a1, 2
+; RV32I-NEXT:    li a2, 3
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 5
+; RV32I-NEXT:    li a5, 6
+; RV32I-NEXT:    li a6, 7
+; RV32I-NEXT:    li a7, 8
 ; RV32I-NEXT:    sw t0, 0(sp)
 ; RV32I-NEXT:    call func at plt
 ; RV32I-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/alu32.ll b/llvm/test/CodeGen/RISCV/alu32.ll
index 242aeecb7197..73b443417969 100644
--- a/llvm/test/CodeGen/RISCV/alu32.ll
+++ b/llvm/test/CodeGen/RISCV/alu32.ll
@@ -192,13 +192,13 @@ define i32 @sub(i32 %a, i32 %b) nounwind {
 define i32 @sub_negative_constant_lhs(i32 %a) nounwind {
 ; RV32I-LABEL: sub_negative_constant_lhs:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, -2
+; RV32I-NEXT:    li a1, -2
 ; RV32I-NEXT:    sub a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: sub_negative_constant_lhs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -2
+; RV64I-NEXT:    li a1, -2
 ; RV64I-NEXT:    subw a0, a1, a0
 ; RV64I-NEXT:    ret
   %1 = sub i32 -2, %a
@@ -222,13 +222,13 @@ define i32 @sll(i32 %a, i32 %b) nounwind {
 define i32 @sll_negative_constant_lhs(i32 %a) nounwind {
 ; RV32I-LABEL: sll_negative_constant_lhs:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    sll a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: sll_negative_constant_lhs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    sllw a0, a1, a0
 ; RV64I-NEXT:    ret
   %1 = shl i32 -1, %a
@@ -300,13 +300,13 @@ define i32 @srl(i32 %a, i32 %b) nounwind {
 define i32 @srl_negative_constant_lhs(i32 %a) nounwind {
 ; RV32I-LABEL: srl_negative_constant_lhs:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    srl a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: srl_negative_constant_lhs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    srlw a0, a1, a0
 ; RV64I-NEXT:    ret
   %1 = lshr i32 -1, %a

diff  --git a/llvm/test/CodeGen/RISCV/alu64.ll b/llvm/test/CodeGen/RISCV/alu64.ll
index 197420d58106..a64ad80a3ab0 100644
--- a/llvm/test/CodeGen/RISCV/alu64.ll
+++ b/llvm/test/CodeGen/RISCV/alu64.ll
@@ -39,11 +39,11 @@ define i64 @slti(i64 %a) nounwind {
 ; RV32I-NEXT:    beqz a1, .LBB1_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    slti a0, a1, 0
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB1_2:
 ; RV32I-NEXT:    sltiu a0, a0, 2
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
   %1 = icmp slt i64 %a, 2
   %2 = zext i1 %1 to i64
@@ -60,12 +60,12 @@ define i64 @sltiu(i64 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    beqz a1, .LBB2_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    mv a0, zero
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a0, 0
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB2_2:
 ; RV32I-NEXT:    sltiu a0, a0, 3
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
   %1 = icmp ult i64 %a, 3
   %2 = zext i1 %1 to i64
@@ -109,7 +109,7 @@ define i64 @andi(i64 %a) nounwind {
 ; RV32I-LABEL: andi:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 6
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
   %1 = and i64 %a, 6
   ret i64 %1
@@ -215,11 +215,11 @@ define i64 @sll(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    bltz a3, .LBB11_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sll a1, a0, a3
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB11_2:
 ; RV32I-NEXT:    sll a1, a1, a2
-; RV32I-NEXT:    addi a3, zero, 31
+; RV32I-NEXT:    li a3, 31
 ; RV32I-NEXT:    sub a3, a3, a2
 ; RV32I-NEXT:    srli a4, a0, 1
 ; RV32I-NEXT:    srl a3, a4, a3
@@ -241,11 +241,11 @@ define i64 @slt(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    beq a1, a3, .LBB12_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    slt a0, a1, a3
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB12_2:
 ; RV32I-NEXT:    sltu a0, a0, a2
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
   %1 = icmp slt i64 %a, %b
   %2 = zext i1 %1 to i64
@@ -263,11 +263,11 @@ define i64 @sltu(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    beq a1, a3, .LBB13_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sltu a0, a1, a3
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB13_2:
 ; RV32I-NEXT:    sltu a0, a0, a2
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
   %1 = icmp ult i64 %a, %b
   %2 = zext i1 %1 to i64
@@ -301,11 +301,11 @@ define i64 @srl(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    bltz a3, .LBB15_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srl a0, a1, a3
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB15_2:
 ; RV32I-NEXT:    srl a0, a0, a2
-; RV32I-NEXT:    addi a3, zero, 31
+; RV32I-NEXT:    li a3, 31
 ; RV32I-NEXT:    sub a3, a3, a2
 ; RV32I-NEXT:    slli a4, a1, 1
 ; RV32I-NEXT:    sll a3, a4, a3
@@ -332,7 +332,7 @@ define i64 @sra(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB16_2:
 ; RV32I-NEXT:    srl a0, a0, a2
-; RV32I-NEXT:    addi a3, zero, 31
+; RV32I-NEXT:    li a3, 31
 ; RV32I-NEXT:    sub a3, a3, a2
 ; RV32I-NEXT:    slli a4, a1, 1
 ; RV32I-NEXT:    sll a3, a4, a3

diff  --git a/llvm/test/CodeGen/RISCV/analyze-branch.ll b/llvm/test/CodeGen/RISCV/analyze-branch.ll
index a932fcb47e24..e33e6b65c6f1 100644
--- a/llvm/test/CodeGen/RISCV/analyze-branch.ll
+++ b/llvm/test/CodeGen/RISCV/analyze-branch.ll
@@ -17,7 +17,7 @@ define void @test_bcc_fallthrough_taken(i32 %in) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 42
+; RV32I-NEXT:    li a1, 42
 ; RV32I-NEXT:    bne a0, a1, .LBB0_3
 ; RV32I-NEXT:  # %bb.1: # %true
 ; RV32I-NEXT:    call test_true at plt
@@ -49,7 +49,7 @@ define void @test_bcc_fallthrough_nottaken(i32 %in) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 42
+; RV32I-NEXT:    li a1, 42
 ; RV32I-NEXT:    beq a0, a1, .LBB1_3
 ; RV32I-NEXT:  # %bb.1: # %false
 ; RV32I-NEXT:    call test_false at plt

diff  --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
index 236c422dc60e..53ae05dd055e 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
@@ -15,8 +15,8 @@ define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -26,7 +26,7 @@ define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a3, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a4, a4, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -51,8 +51,8 @@ define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -62,7 +62,7 @@ define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -91,8 +91,8 @@ define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -102,7 +102,7 @@ define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a3, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a4, a4, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -127,8 +127,8 @@ define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -138,7 +138,7 @@ define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -167,8 +167,8 @@ define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -178,7 +178,7 @@ define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a3, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a4, a4, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -203,8 +203,8 @@ define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -214,7 +214,7 @@ define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -243,8 +243,8 @@ define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
-; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 3
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -254,7 +254,7 @@ define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a3, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a4, a4, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -279,8 +279,8 @@ define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
-; RV64I-NEXT:    addi a3, zero, 3
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 3
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -290,7 +290,7 @@ define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -319,8 +319,8 @@ define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
-; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 3
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -330,7 +330,7 @@ define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a3, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a4, a4, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -355,8 +355,8 @@ define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
-; RV64I-NEXT:    addi a3, zero, 3
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 3
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -366,7 +366,7 @@ define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -395,8 +395,8 @@ define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -406,7 +406,7 @@ define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a3, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a4, a4, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -431,8 +431,8 @@ define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -442,7 +442,7 @@ define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -471,8 +471,8 @@ define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -482,7 +482,7 @@ define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a3, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a4, a4, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -507,8 +507,8 @@ define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -518,7 +518,7 @@ define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -547,8 +547,8 @@ define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -558,7 +558,7 @@ define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a3, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a4, a4, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -583,8 +583,8 @@ define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -594,7 +594,7 @@ define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -623,8 +623,8 @@ define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -634,7 +634,7 @@ define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a3, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a4, a4, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -659,8 +659,8 @@ define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -670,7 +670,7 @@ define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -699,8 +699,8 @@ define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sb a1, 11(sp)
 ; RV32I-NEXT:    addi a1, sp, 11
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -710,7 +710,7 @@ define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a3, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a4, a4, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -735,8 +735,8 @@ define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sb a1, 7(sp)
 ; RV64I-NEXT:    addi a1, sp, 7
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -746,7 +746,7 @@ define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -775,8 +775,8 @@ define void @cmpxchg_i16_monotonic_monotonic(i16* %ptr, i16 %cmp, i16 %val) noun
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -812,8 +812,8 @@ define void @cmpxchg_i16_monotonic_monotonic(i16* %ptr, i16 %cmp, i16 %val) noun
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -853,8 +853,8 @@ define void @cmpxchg_i16_acquire_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -890,8 +890,8 @@ define void @cmpxchg_i16_acquire_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -931,8 +931,8 @@ define void @cmpxchg_i16_acquire_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -968,8 +968,8 @@ define void @cmpxchg_i16_acquire_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1009,8 +1009,8 @@ define void @cmpxchg_i16_release_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 3
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1046,8 +1046,8 @@ define void @cmpxchg_i16_release_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 3
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 3
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1087,8 +1087,8 @@ define void @cmpxchg_i16_release_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 3
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1124,8 +1124,8 @@ define void @cmpxchg_i16_release_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 3
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 3
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1165,8 +1165,8 @@ define void @cmpxchg_i16_acq_rel_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1202,8 +1202,8 @@ define void @cmpxchg_i16_acq_rel_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1243,8 +1243,8 @@ define void @cmpxchg_i16_acq_rel_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1280,8 +1280,8 @@ define void @cmpxchg_i16_acq_rel_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1321,8 +1321,8 @@ define void @cmpxchg_i16_seq_cst_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1358,8 +1358,8 @@ define void @cmpxchg_i16_seq_cst_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1399,8 +1399,8 @@ define void @cmpxchg_i16_seq_cst_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1436,8 +1436,8 @@ define void @cmpxchg_i16_seq_cst_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1477,8 +1477,8 @@ define void @cmpxchg_i16_seq_cst_seq_cst(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1514,8 +1514,8 @@ define void @cmpxchg_i16_seq_cst_seq_cst(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1555,8 +1555,8 @@ define void @cmpxchg_i32_monotonic_monotonic(i32* %ptr, i32 %cmp, i32 %val) noun
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1579,8 +1579,8 @@ define void @cmpxchg_i32_monotonic_monotonic(i32* %ptr, i32 %cmp, i32 %val) noun
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1608,8 +1608,8 @@ define void @cmpxchg_i32_acquire_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1632,8 +1632,8 @@ define void @cmpxchg_i32_acquire_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1661,8 +1661,8 @@ define void @cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1685,8 +1685,8 @@ define void @cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1714,8 +1714,8 @@ define void @cmpxchg_i32_release_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 3
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1738,8 +1738,8 @@ define void @cmpxchg_i32_release_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    addi a3, zero, 3
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 3
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1767,8 +1767,8 @@ define void @cmpxchg_i32_release_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a3, zero, 3
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 3
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1791,8 +1791,8 @@ define void @cmpxchg_i32_release_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    addi a3, zero, 3
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 3
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1820,8 +1820,8 @@ define void @cmpxchg_i32_acq_rel_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1844,8 +1844,8 @@ define void @cmpxchg_i32_acq_rel_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1873,8 +1873,8 @@ define void @cmpxchg_i32_acq_rel_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1897,8 +1897,8 @@ define void @cmpxchg_i32_acq_rel_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1926,8 +1926,8 @@ define void @cmpxchg_i32_seq_cst_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1950,8 +1950,8 @@ define void @cmpxchg_i32_seq_cst_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwi
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1979,8 +1979,8 @@ define void @cmpxchg_i32_seq_cst_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2003,8 +2003,8 @@ define void @cmpxchg_i32_seq_cst_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2032,8 +2032,8 @@ define void @cmpxchg_i32_seq_cst_seq_cst(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw a1, 8(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2056,8 +2056,8 @@ define void @cmpxchg_i32_seq_cst_seq_cst(i32* %ptr, i32 %cmp, i32 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sw a1, 4(sp)
 ; RV64I-NEXT:    addi a1, sp, 4
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2088,8 +2088,8 @@ define void @cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %cmp, i64 %val) noun
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a4
-; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a4, 0
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2104,8 +2104,8 @@ define void @cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %cmp, i64 %val) noun
 ; RV32IA-NEXT:    mv a1, sp
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a4
-; RV32IA-NEXT:    mv a4, zero
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -2117,8 +2117,8 @@ define void @cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %cmp, i64 %val) noun
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2147,10 +2147,10 @@ define void @cmpxchg_i64_acquire_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2164,10 +2164,10 @@ define void @cmpxchg_i64_acquire_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
 ; RV32IA-NEXT:    mv a1, sp
-; RV32IA-NEXT:    addi a4, zero, 2
+; RV32IA-NEXT:    li a4, 2
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -2179,8 +2179,8 @@ define void @cmpxchg_i64_acquire_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2209,8 +2209,8 @@ define void @cmpxchg_i64_acquire_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    li a4, 2
+; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
@@ -2226,8 +2226,8 @@ define void @cmpxchg_i64_acquire_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
 ; RV32IA-NEXT:    mv a1, sp
-; RV32IA-NEXT:    addi a4, zero, 2
-; RV32IA-NEXT:    addi a5, zero, 2
+; RV32IA-NEXT:    li a4, 2
+; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
@@ -2241,8 +2241,8 @@ define void @cmpxchg_i64_acquire_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2271,10 +2271,10 @@ define void @cmpxchg_i64_release_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a4, zero, 3
+; RV32I-NEXT:    li a4, 3
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2288,10 +2288,10 @@ define void @cmpxchg_i64_release_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
 ; RV32IA-NEXT:    mv a1, sp
-; RV32IA-NEXT:    addi a4, zero, 3
+; RV32IA-NEXT:    li a4, 3
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -2303,8 +2303,8 @@ define void @cmpxchg_i64_release_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 3
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 3
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2333,8 +2333,8 @@ define void @cmpxchg_i64_release_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a4, zero, 3
-; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    li a4, 3
+; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
@@ -2350,8 +2350,8 @@ define void @cmpxchg_i64_release_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
 ; RV32IA-NEXT:    mv a1, sp
-; RV32IA-NEXT:    addi a4, zero, 3
-; RV32IA-NEXT:    addi a5, zero, 2
+; RV32IA-NEXT:    li a4, 3
+; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
@@ -2365,8 +2365,8 @@ define void @cmpxchg_i64_release_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 3
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 3
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2395,10 +2395,10 @@ define void @cmpxchg_i64_acq_rel_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a4, zero, 4
+; RV32I-NEXT:    li a4, 4
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2412,10 +2412,10 @@ define void @cmpxchg_i64_acq_rel_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
 ; RV32IA-NEXT:    mv a1, sp
-; RV32IA-NEXT:    addi a4, zero, 4
+; RV32IA-NEXT:    li a4, 4
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -2427,8 +2427,8 @@ define void @cmpxchg_i64_acq_rel_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2457,8 +2457,8 @@ define void @cmpxchg_i64_acq_rel_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a4, zero, 4
-; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    li a4, 4
+; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
@@ -2474,8 +2474,8 @@ define void @cmpxchg_i64_acq_rel_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
 ; RV32IA-NEXT:    mv a1, sp
-; RV32IA-NEXT:    addi a4, zero, 4
-; RV32IA-NEXT:    addi a5, zero, 2
+; RV32IA-NEXT:    li a4, 4
+; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
@@ -2489,8 +2489,8 @@ define void @cmpxchg_i64_acq_rel_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2519,10 +2519,10 @@ define void @cmpxchg_i64_seq_cst_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a5
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2536,10 +2536,10 @@ define void @cmpxchg_i64_seq_cst_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
 ; RV32IA-NEXT:    mv a1, sp
-; RV32IA-NEXT:    addi a4, zero, 5
+; RV32IA-NEXT:    li a4, 5
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a5
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -2551,8 +2551,8 @@ define void @cmpxchg_i64_seq_cst_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2581,8 +2581,8 @@ define void @cmpxchg_i64_seq_cst_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a4, zero, 5
-; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    li a4, 5
+; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
@@ -2598,8 +2598,8 @@ define void @cmpxchg_i64_seq_cst_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
 ; RV32IA-NEXT:    mv a1, sp
-; RV32IA-NEXT:    addi a4, zero, 5
-; RV32IA-NEXT:    addi a5, zero, 2
+; RV32IA-NEXT:    li a4, 5
+; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
@@ -2613,8 +2613,8 @@ define void @cmpxchg_i64_seq_cst_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2643,8 +2643,8 @@ define void @cmpxchg_i64_seq_cst_seq_cst(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32I-NEXT:    sw a2, 4(sp)
 ; RV32I-NEXT:    sw a1, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a4, zero, 5
-; RV32I-NEXT:    addi a5, zero, 5
+; RV32I-NEXT:    li a4, 5
+; RV32I-NEXT:    li a5, 5
 ; RV32I-NEXT:    mv a2, a3
 ; RV32I-NEXT:    mv a3, a6
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
@@ -2660,8 +2660,8 @@ define void @cmpxchg_i64_seq_cst_seq_cst(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV32IA-NEXT:    sw a2, 4(sp)
 ; RV32IA-NEXT:    sw a1, 0(sp)
 ; RV32IA-NEXT:    mv a1, sp
-; RV32IA-NEXT:    addi a4, zero, 5
-; RV32IA-NEXT:    addi a5, zero, 5
+; RV32IA-NEXT:    li a4, 5
+; RV32IA-NEXT:    li a5, 5
 ; RV32IA-NEXT:    mv a2, a3
 ; RV32IA-NEXT:    mv a3, a6
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
@@ -2675,8 +2675,8 @@ define void @cmpxchg_i64_seq_cst_seq_cst(i64* %ptr, i64 %cmp, i64 %val) nounwind
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd a1, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/atomic-load-store.ll
index 60a65d2f214b..ddf6f6bd5881 100644
--- a/llvm/test/CodeGen/RISCV/atomic-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-load-store.ll
@@ -13,7 +13,7 @@ define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_load_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -28,7 +28,7 @@ define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_load_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -47,7 +47,7 @@ define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_load_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -62,7 +62,7 @@ define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_load_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -81,7 +81,7 @@ define i8 @atomic_load_i8_acquire(i8 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 2
+; RV32I-NEXT:    li a1, 2
 ; RV32I-NEXT:    call __atomic_load_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -97,7 +97,7 @@ define i8 @atomic_load_i8_acquire(i8 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    li a1, 2
 ; RV64I-NEXT:    call __atomic_load_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -117,7 +117,7 @@ define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    li a1, 5
 ; RV32I-NEXT:    call __atomic_load_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -134,7 +134,7 @@ define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    li a1, 5
 ; RV64I-NEXT:    call __atomic_load_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -155,7 +155,7 @@ define i16 @atomic_load_i16_unordered(i16 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_load_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -170,7 +170,7 @@ define i16 @atomic_load_i16_unordered(i16 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_load_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -189,7 +189,7 @@ define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_load_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -204,7 +204,7 @@ define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_load_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -223,7 +223,7 @@ define i16 @atomic_load_i16_acquire(i16 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 2
+; RV32I-NEXT:    li a1, 2
 ; RV32I-NEXT:    call __atomic_load_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -239,7 +239,7 @@ define i16 @atomic_load_i16_acquire(i16 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    li a1, 2
 ; RV64I-NEXT:    call __atomic_load_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -259,7 +259,7 @@ define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    li a1, 5
 ; RV32I-NEXT:    call __atomic_load_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -276,7 +276,7 @@ define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    li a1, 5
 ; RV64I-NEXT:    call __atomic_load_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -297,7 +297,7 @@ define i32 @atomic_load_i32_unordered(i32 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_load_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -312,7 +312,7 @@ define i32 @atomic_load_i32_unordered(i32 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_load_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -331,7 +331,7 @@ define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_load_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -346,7 +346,7 @@ define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_load_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -365,7 +365,7 @@ define i32 @atomic_load_i32_acquire(i32 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 2
+; RV32I-NEXT:    li a1, 2
 ; RV32I-NEXT:    call __atomic_load_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -381,7 +381,7 @@ define i32 @atomic_load_i32_acquire(i32 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    li a1, 2
 ; RV64I-NEXT:    call __atomic_load_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -401,7 +401,7 @@ define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    li a1, 5
 ; RV32I-NEXT:    call __atomic_load_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -418,7 +418,7 @@ define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    li a1, 5
 ; RV64I-NEXT:    call __atomic_load_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -439,7 +439,7 @@ define i64 @atomic_load_i64_unordered(i64 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_load_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -449,7 +449,7 @@ define i64 @atomic_load_i64_unordered(i64 *%a) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a1, zero
+; RV32IA-NEXT:    li a1, 0
 ; RV32IA-NEXT:    call __atomic_load_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -459,7 +459,7 @@ define i64 @atomic_load_i64_unordered(i64 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_load_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -478,7 +478,7 @@ define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_load_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -488,7 +488,7 @@ define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a1, zero
+; RV32IA-NEXT:    li a1, 0
 ; RV32IA-NEXT:    call __atomic_load_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -498,7 +498,7 @@ define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_load_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -517,7 +517,7 @@ define i64 @atomic_load_i64_acquire(i64 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 2
+; RV32I-NEXT:    li a1, 2
 ; RV32I-NEXT:    call __atomic_load_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -527,7 +527,7 @@ define i64 @atomic_load_i64_acquire(i64 *%a) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a1, zero, 2
+; RV32IA-NEXT:    li a1, 2
 ; RV32IA-NEXT:    call __atomic_load_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -537,7 +537,7 @@ define i64 @atomic_load_i64_acquire(i64 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    li a1, 2
 ; RV64I-NEXT:    call __atomic_load_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -557,7 +557,7 @@ define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    li a1, 5
 ; RV32I-NEXT:    call __atomic_load_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -567,7 +567,7 @@ define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a1, zero, 5
+; RV32IA-NEXT:    li a1, 5
 ; RV32IA-NEXT:    call __atomic_load_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -577,7 +577,7 @@ define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    li a1, 5
 ; RV64I-NEXT:    call __atomic_load_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -598,7 +598,7 @@ define void @atomic_store_i8_unordered(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_store_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -613,7 +613,7 @@ define void @atomic_store_i8_unordered(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_store_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -632,7 +632,7 @@ define void @atomic_store_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_store_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -647,7 +647,7 @@ define void @atomic_store_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_store_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -666,7 +666,7 @@ define void @atomic_store_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_store_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -682,7 +682,7 @@ define void @atomic_store_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_store_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -702,7 +702,7 @@ define void @atomic_store_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_store_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -718,7 +718,7 @@ define void @atomic_store_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_store_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -738,7 +738,7 @@ define void @atomic_store_i16_unordered(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_store_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -753,7 +753,7 @@ define void @atomic_store_i16_unordered(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_store_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -772,7 +772,7 @@ define void @atomic_store_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_store_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -787,7 +787,7 @@ define void @atomic_store_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_store_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -806,7 +806,7 @@ define void @atomic_store_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_store_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -822,7 +822,7 @@ define void @atomic_store_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_store_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -842,7 +842,7 @@ define void @atomic_store_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_store_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -858,7 +858,7 @@ define void @atomic_store_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_store_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -878,7 +878,7 @@ define void @atomic_store_i32_unordered(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_store_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -893,7 +893,7 @@ define void @atomic_store_i32_unordered(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_store_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -912,7 +912,7 @@ define void @atomic_store_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_store_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -927,7 +927,7 @@ define void @atomic_store_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_store_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -946,7 +946,7 @@ define void @atomic_store_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_store_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -962,7 +962,7 @@ define void @atomic_store_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_store_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -982,7 +982,7 @@ define void @atomic_store_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_store_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -998,7 +998,7 @@ define void @atomic_store_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_store_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1018,7 +1018,7 @@ define void @atomic_store_i64_unordered(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_store_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1028,7 +1028,7 @@ define void @atomic_store_i64_unordered(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_store_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -1038,7 +1038,7 @@ define void @atomic_store_i64_unordered(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_store_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1057,7 +1057,7 @@ define void @atomic_store_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_store_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1067,7 +1067,7 @@ define void @atomic_store_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_store_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -1077,7 +1077,7 @@ define void @atomic_store_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_store_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1096,7 +1096,7 @@ define void @atomic_store_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    call __atomic_store_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1106,7 +1106,7 @@ define void @atomic_store_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 3
+; RV32IA-NEXT:    li a3, 3
 ; RV32IA-NEXT:    call __atomic_store_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -1116,7 +1116,7 @@ define void @atomic_store_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_store_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1136,7 +1136,7 @@ define void @atomic_store_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    call __atomic_store_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1146,7 +1146,7 @@ define void @atomic_store_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 5
+; RV32IA-NEXT:    li a3, 5
 ; RV32IA-NEXT:    call __atomic_store_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -1156,7 +1156,7 @@ define void @atomic_store_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_store_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index eaf650a9739b..6f64d8a2f10e 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -13,7 +13,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -23,7 +23,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -43,7 +43,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -53,7 +53,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -77,7 +77,7 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -87,7 +87,7 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -107,7 +107,7 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -117,7 +117,7 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -141,7 +141,7 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -151,7 +151,7 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -171,7 +171,7 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -181,7 +181,7 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -205,7 +205,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -215,7 +215,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -235,7 +235,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -245,7 +245,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -269,7 +269,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_exchange_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -279,7 +279,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -299,7 +299,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_exchange_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -309,7 +309,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -333,7 +333,7 @@ define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -343,7 +343,7 @@ define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -363,7 +363,7 @@ define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -373,7 +373,7 @@ define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -397,7 +397,7 @@ define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -407,7 +407,7 @@ define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -427,7 +427,7 @@ define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -437,7 +437,7 @@ define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -461,7 +461,7 @@ define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -471,7 +471,7 @@ define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -491,7 +491,7 @@ define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -501,7 +501,7 @@ define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -525,7 +525,7 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -535,7 +535,7 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -555,7 +555,7 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -565,7 +565,7 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -589,7 +589,7 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -599,7 +599,7 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -619,7 +619,7 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -629,7 +629,7 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -653,7 +653,7 @@ define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -663,7 +663,7 @@ define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -683,7 +683,7 @@ define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -693,7 +693,7 @@ define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -717,7 +717,7 @@ define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -727,7 +727,7 @@ define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -747,7 +747,7 @@ define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -757,7 +757,7 @@ define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -781,7 +781,7 @@ define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -791,7 +791,7 @@ define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -811,7 +811,7 @@ define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -821,7 +821,7 @@ define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -845,7 +845,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -855,7 +855,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -875,7 +875,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -885,7 +885,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -909,7 +909,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -919,7 +919,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -939,7 +939,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -949,7 +949,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -973,7 +973,7 @@ define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -983,7 +983,7 @@ define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    not a3, a3
 ; RV32IA-NEXT:    andi a1, a1, 255
@@ -997,7 +997,7 @@ define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1007,7 +1007,7 @@ define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -1025,7 +1025,7 @@ define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1035,7 +1035,7 @@ define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    not a3, a3
 ; RV32IA-NEXT:    andi a1, a1, 255
@@ -1049,7 +1049,7 @@ define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1059,7 +1059,7 @@ define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -1077,7 +1077,7 @@ define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1087,7 +1087,7 @@ define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    not a3, a3
 ; RV32IA-NEXT:    andi a1, a1, 255
@@ -1101,7 +1101,7 @@ define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1111,7 +1111,7 @@ define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -1129,7 +1129,7 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1139,7 +1139,7 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    not a3, a3
 ; RV32IA-NEXT:    andi a1, a1, 255
@@ -1153,7 +1153,7 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1163,7 +1163,7 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -1181,7 +1181,7 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1191,7 +1191,7 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    not a3, a3
 ; RV32IA-NEXT:    andi a1, a1, 255
@@ -1205,7 +1205,7 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1215,7 +1215,7 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -1233,7 +1233,7 @@ define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1243,7 +1243,7 @@ define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -1264,7 +1264,7 @@ define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1274,7 +1274,7 @@ define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -1299,7 +1299,7 @@ define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1309,7 +1309,7 @@ define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -1330,7 +1330,7 @@ define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1340,7 +1340,7 @@ define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -1365,7 +1365,7 @@ define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1375,7 +1375,7 @@ define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -1396,7 +1396,7 @@ define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1406,7 +1406,7 @@ define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -1431,7 +1431,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1441,7 +1441,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -1462,7 +1462,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1472,7 +1472,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -1497,7 +1497,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1507,7 +1507,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -1528,7 +1528,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1538,7 +1538,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -1563,7 +1563,7 @@ define i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1583,7 +1583,7 @@ define i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1607,7 +1607,7 @@ define i8 @atomicrmw_or_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1627,7 +1627,7 @@ define i8 @atomicrmw_or_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1651,7 +1651,7 @@ define i8 @atomicrmw_or_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1671,7 +1671,7 @@ define i8 @atomicrmw_or_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1695,7 +1695,7 @@ define i8 @atomicrmw_or_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1715,7 +1715,7 @@ define i8 @atomicrmw_or_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1739,7 +1739,7 @@ define i8 @atomicrmw_or_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1759,7 +1759,7 @@ define i8 @atomicrmw_or_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1783,7 +1783,7 @@ define i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1803,7 +1803,7 @@ define i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1827,7 +1827,7 @@ define i8 @atomicrmw_xor_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1847,7 +1847,7 @@ define i8 @atomicrmw_xor_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1871,7 +1871,7 @@ define i8 @atomicrmw_xor_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1891,7 +1891,7 @@ define i8 @atomicrmw_xor_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1915,7 +1915,7 @@ define i8 @atomicrmw_xor_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1935,7 +1935,7 @@ define i8 @atomicrmw_xor_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1959,7 +1959,7 @@ define i8 @atomicrmw_xor_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1979,7 +1979,7 @@ define i8 @atomicrmw_xor_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2017,8 +2017,8 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB35_4
@@ -2046,12 +2046,12 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
 ; RV32IA-NEXT:    andi a3, a0, 24
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a7, a4, a0
 ; RV32IA-NEXT:    slli a1, a1, 24
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 24
+; RV32IA-NEXT:    li a5, 24
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB35_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a6)
@@ -2089,8 +2089,8 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB35_4
@@ -2118,12 +2118,12 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
 ; RV64IA-NEXT:    andi a3, a0, 24
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a7, a4, a0
 ; RV64IA-NEXT:    slli a1, a1, 56
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 56
+; RV64IA-NEXT:    li a5, 56
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB35_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a6)
@@ -2164,8 +2164,8 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB36_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
@@ -2194,12 +2194,12 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
 ; RV32IA-NEXT:    andi a3, a0, 24
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a7, a4, a0
 ; RV32IA-NEXT:    slli a1, a1, 24
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 24
+; RV32IA-NEXT:    li a5, 24
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB36_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a5, (a6)
@@ -2236,8 +2236,8 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB36_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
@@ -2266,12 +2266,12 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
 ; RV64IA-NEXT:    andi a3, a0, 24
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a7, a4, a0
 ; RV64IA-NEXT:    slli a1, a1, 56
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 56
+; RV64IA-NEXT:    li a5, 56
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB36_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a5, (a6)
@@ -2312,9 +2312,9 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB37_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB37_4
@@ -2342,12 +2342,12 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
 ; RV32IA-NEXT:    andi a3, a0, 24
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a7, a4, a0
 ; RV32IA-NEXT:    slli a1, a1, 24
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 24
+; RV32IA-NEXT:    li a5, 24
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB37_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a6)
@@ -2384,9 +2384,9 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB37_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB37_4
@@ -2414,12 +2414,12 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
 ; RV64IA-NEXT:    andi a3, a0, 24
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a7, a4, a0
 ; RV64IA-NEXT:    slli a1, a1, 56
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 56
+; RV64IA-NEXT:    li a5, 56
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB37_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a6)
@@ -2460,8 +2460,8 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB38_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
@@ -2490,12 +2490,12 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
 ; RV32IA-NEXT:    andi a3, a0, 24
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a7, a4, a0
 ; RV32IA-NEXT:    slli a1, a1, 24
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 24
+; RV32IA-NEXT:    li a5, 24
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB38_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a5, (a6)
@@ -2532,8 +2532,8 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB38_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
@@ -2562,12 +2562,12 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
 ; RV64IA-NEXT:    andi a3, a0, 24
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a7, a4, a0
 ; RV64IA-NEXT:    slli a1, a1, 56
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 56
+; RV64IA-NEXT:    li a5, 56
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB38_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a5, (a6)
@@ -2608,8 +2608,8 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB39_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
@@ -2638,12 +2638,12 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
 ; RV32IA-NEXT:    andi a3, a0, 24
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a7, a4, a0
 ; RV32IA-NEXT:    slli a1, a1, 24
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 24
+; RV32IA-NEXT:    li a5, 24
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB39_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a5, (a6)
@@ -2680,8 +2680,8 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB39_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
@@ -2710,12 +2710,12 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
 ; RV64IA-NEXT:    andi a3, a0, 24
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a7, a4, a0
 ; RV64IA-NEXT:    slli a1, a1, 56
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 56
+; RV64IA-NEXT:    li a5, 56
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB39_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a5, (a6)
@@ -2757,8 +2757,8 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB40_4
@@ -2786,12 +2786,12 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
 ; RV32IA-NEXT:    andi a3, a0, 24
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a7, a4, a0
 ; RV32IA-NEXT:    slli a1, a1, 24
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 24
+; RV32IA-NEXT:    li a5, 24
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a6)
@@ -2829,8 +2829,8 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB40_4
@@ -2858,12 +2858,12 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
 ; RV64IA-NEXT:    andi a3, a0, 24
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a7, a4, a0
 ; RV64IA-NEXT:    slli a1, a1, 56
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 56
+; RV64IA-NEXT:    li a5, 56
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a6)
@@ -2904,8 +2904,8 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB41_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
@@ -2934,12 +2934,12 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
 ; RV32IA-NEXT:    andi a3, a0, 24
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a7, a4, a0
 ; RV32IA-NEXT:    slli a1, a1, 24
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 24
+; RV32IA-NEXT:    li a5, 24
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a5, (a6)
@@ -2976,8 +2976,8 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB41_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
@@ -3006,12 +3006,12 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
 ; RV64IA-NEXT:    andi a3, a0, 24
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a7, a4, a0
 ; RV64IA-NEXT:    slli a1, a1, 56
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 56
+; RV64IA-NEXT:    li a5, 56
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a5, (a6)
@@ -3052,9 +3052,9 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB42_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB42_4
@@ -3082,12 +3082,12 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
 ; RV32IA-NEXT:    andi a3, a0, 24
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a7, a4, a0
 ; RV32IA-NEXT:    slli a1, a1, 24
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 24
+; RV32IA-NEXT:    li a5, 24
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB42_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a6)
@@ -3124,9 +3124,9 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB42_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB42_4
@@ -3154,12 +3154,12 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
 ; RV64IA-NEXT:    andi a3, a0, 24
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a7, a4, a0
 ; RV64IA-NEXT:    slli a1, a1, 56
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 56
+; RV64IA-NEXT:    li a5, 56
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB42_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a6)
@@ -3200,8 +3200,8 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB43_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
@@ -3230,12 +3230,12 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
 ; RV32IA-NEXT:    andi a3, a0, 24
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a7, a4, a0
 ; RV32IA-NEXT:    slli a1, a1, 24
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 24
+; RV32IA-NEXT:    li a5, 24
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB43_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a5, (a6)
@@ -3272,8 +3272,8 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB43_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
@@ -3302,12 +3302,12 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
 ; RV64IA-NEXT:    andi a3, a0, 24
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a7, a4, a0
 ; RV64IA-NEXT:    slli a1, a1, 56
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 56
+; RV64IA-NEXT:    li a5, 56
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB43_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a5, (a6)
@@ -3348,8 +3348,8 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB44_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
@@ -3378,12 +3378,12 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
 ; RV32IA-NEXT:    andi a3, a0, 24
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a7, a4, a0
 ; RV32IA-NEXT:    slli a1, a1, 24
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 24
+; RV32IA-NEXT:    li a5, 24
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a5, (a6)
@@ -3420,8 +3420,8 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB44_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
@@ -3450,12 +3450,12 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
 ; RV64IA-NEXT:    andi a3, a0, 24
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a7, a4, a0
 ; RV64IA-NEXT:    slli a1, a1, 56
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 56
+; RV64IA-NEXT:    li a5, 56
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a5, (a6)
@@ -3496,8 +3496,8 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB45_4
@@ -3523,7 +3523,7 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -3560,8 +3560,8 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB45_4
@@ -3587,7 +3587,7 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -3627,8 +3627,8 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB46_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
@@ -3655,7 +3655,7 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -3691,8 +3691,8 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB46_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
@@ -3719,7 +3719,7 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -3759,9 +3759,9 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB47_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB47_4
@@ -3787,7 +3787,7 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -3823,9 +3823,9 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB47_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB47_4
@@ -3851,7 +3851,7 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -3891,8 +3891,8 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB48_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
@@ -3919,7 +3919,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -3955,8 +3955,8 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB48_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
@@ -3983,7 +3983,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -4023,8 +4023,8 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB49_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
@@ -4051,7 +4051,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -4087,8 +4087,8 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB49_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
@@ -4115,7 +4115,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -4156,8 +4156,8 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB50_4
@@ -4183,7 +4183,7 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -4220,8 +4220,8 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB50_4
@@ -4247,7 +4247,7 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -4287,8 +4287,8 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB51_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
@@ -4315,7 +4315,7 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -4351,8 +4351,8 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB51_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
@@ -4379,7 +4379,7 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -4419,9 +4419,9 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB52_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB52_4
@@ -4447,7 +4447,7 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -4483,9 +4483,9 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB52_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB52_4
@@ -4511,7 +4511,7 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -4551,8 +4551,8 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB53_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
@@ -4579,7 +4579,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -4615,8 +4615,8 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB53_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
@@ -4643,7 +4643,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -4683,8 +4683,8 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB54_2 Depth=1
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
@@ -4711,7 +4711,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -4747,8 +4747,8 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB54_2 Depth=1
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
@@ -4775,7 +4775,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -4803,7 +4803,7 @@ define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -4834,7 +4834,7 @@ define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -4869,7 +4869,7 @@ define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -4900,7 +4900,7 @@ define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -4935,7 +4935,7 @@ define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -4966,7 +4966,7 @@ define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5001,7 +5001,7 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5032,7 +5032,7 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5067,7 +5067,7 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_exchange_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5098,7 +5098,7 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_exchange_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5133,7 +5133,7 @@ define i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5164,7 +5164,7 @@ define i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5199,7 +5199,7 @@ define i16 @atomicrmw_add_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5230,7 +5230,7 @@ define i16 @atomicrmw_add_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5265,7 +5265,7 @@ define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5296,7 +5296,7 @@ define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5331,7 +5331,7 @@ define i16 @atomicrmw_add_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5362,7 +5362,7 @@ define i16 @atomicrmw_add_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5397,7 +5397,7 @@ define i16 @atomicrmw_add_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5428,7 +5428,7 @@ define i16 @atomicrmw_add_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5463,7 +5463,7 @@ define i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5494,7 +5494,7 @@ define i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5529,7 +5529,7 @@ define i16 @atomicrmw_sub_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5560,7 +5560,7 @@ define i16 @atomicrmw_sub_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5595,7 +5595,7 @@ define i16 @atomicrmw_sub_i16_release(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5626,7 +5626,7 @@ define i16 @atomicrmw_sub_i16_release(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5661,7 +5661,7 @@ define i16 @atomicrmw_sub_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5692,7 +5692,7 @@ define i16 @atomicrmw_sub_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5727,7 +5727,7 @@ define i16 @atomicrmw_sub_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5758,7 +5758,7 @@ define i16 @atomicrmw_sub_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5793,7 +5793,7 @@ define i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5818,7 +5818,7 @@ define i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5847,7 +5847,7 @@ define i16 @atomicrmw_and_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5872,7 +5872,7 @@ define i16 @atomicrmw_and_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5901,7 +5901,7 @@ define i16 @atomicrmw_and_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5926,7 +5926,7 @@ define i16 @atomicrmw_and_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -5955,7 +5955,7 @@ define i16 @atomicrmw_and_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -5980,7 +5980,7 @@ define i16 @atomicrmw_and_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6009,7 +6009,7 @@ define i16 @atomicrmw_and_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6034,7 +6034,7 @@ define i16 @atomicrmw_and_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6063,7 +6063,7 @@ define i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6095,7 +6095,7 @@ define i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6131,7 +6131,7 @@ define i16 @atomicrmw_nand_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6163,7 +6163,7 @@ define i16 @atomicrmw_nand_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6199,7 +6199,7 @@ define i16 @atomicrmw_nand_i16_release(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6231,7 +6231,7 @@ define i16 @atomicrmw_nand_i16_release(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6267,7 +6267,7 @@ define i16 @atomicrmw_nand_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6299,7 +6299,7 @@ define i16 @atomicrmw_nand_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6335,7 +6335,7 @@ define i16 @atomicrmw_nand_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6367,7 +6367,7 @@ define i16 @atomicrmw_nand_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6403,7 +6403,7 @@ define i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6425,7 +6425,7 @@ define i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6451,7 +6451,7 @@ define i16 @atomicrmw_or_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6473,7 +6473,7 @@ define i16 @atomicrmw_or_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6499,7 +6499,7 @@ define i16 @atomicrmw_or_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6521,7 +6521,7 @@ define i16 @atomicrmw_or_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6547,7 +6547,7 @@ define i16 @atomicrmw_or_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6569,7 +6569,7 @@ define i16 @atomicrmw_or_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6595,7 +6595,7 @@ define i16 @atomicrmw_or_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6617,7 +6617,7 @@ define i16 @atomicrmw_or_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6643,7 +6643,7 @@ define i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6665,7 +6665,7 @@ define i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6691,7 +6691,7 @@ define i16 @atomicrmw_xor_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6713,7 +6713,7 @@ define i16 @atomicrmw_xor_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6739,7 +6739,7 @@ define i16 @atomicrmw_xor_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6761,7 +6761,7 @@ define i16 @atomicrmw_xor_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6787,7 +6787,7 @@ define i16 @atomicrmw_xor_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6809,7 +6809,7 @@ define i16 @atomicrmw_xor_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6835,7 +6835,7 @@ define i16 @atomicrmw_xor_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -6857,7 +6857,7 @@ define i16 @atomicrmw_xor_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -6897,8 +6897,8 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB90_4
@@ -6932,7 +6932,7 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    slli a1, a1, 16
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 16
+; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB90_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a6)
@@ -6970,8 +6970,8 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB90_4
@@ -7005,7 +7005,7 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    slli a1, a1, 48
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 48
+; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB90_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a6)
@@ -7046,8 +7046,8 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB91_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
@@ -7082,7 +7082,7 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    slli a1, a1, 16
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 16
+; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB91_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a5, (a6)
@@ -7119,8 +7119,8 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB91_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
@@ -7155,7 +7155,7 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    slli a1, a1, 48
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 48
+; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB91_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a5, (a6)
@@ -7196,9 +7196,9 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB92_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB92_4
@@ -7232,7 +7232,7 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    slli a1, a1, 16
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 16
+; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB92_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a6)
@@ -7269,9 +7269,9 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB92_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB92_4
@@ -7305,7 +7305,7 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    slli a1, a1, 48
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 48
+; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB92_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a6)
@@ -7346,8 +7346,8 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB93_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
@@ -7382,7 +7382,7 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    slli a1, a1, 16
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 16
+; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB93_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a5, (a6)
@@ -7419,8 +7419,8 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB93_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
@@ -7455,7 +7455,7 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    slli a1, a1, 48
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 48
+; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB93_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a5, (a6)
@@ -7496,8 +7496,8 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB94_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
@@ -7532,7 +7532,7 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    slli a1, a1, 16
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 16
+; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB94_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a5, (a6)
@@ -7569,8 +7569,8 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB94_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
@@ -7605,7 +7605,7 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    slli a1, a1, 48
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 48
+; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB94_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a5, (a6)
@@ -7647,8 +7647,8 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB95_4
@@ -7682,7 +7682,7 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    slli a1, a1, 16
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 16
+; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB95_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a6)
@@ -7720,8 +7720,8 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB95_4
@@ -7755,7 +7755,7 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    slli a1, a1, 48
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 48
+; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB95_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a6)
@@ -7796,8 +7796,8 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB96_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
@@ -7832,7 +7832,7 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    slli a1, a1, 16
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 16
+; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB96_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a5, (a6)
@@ -7869,8 +7869,8 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB96_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
@@ -7905,7 +7905,7 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    slli a1, a1, 48
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 48
+; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB96_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a5, (a6)
@@ -7946,9 +7946,9 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB97_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB97_4
@@ -7982,7 +7982,7 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    slli a1, a1, 16
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 16
+; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB97_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a6)
@@ -8019,9 +8019,9 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB97_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB97_4
@@ -8055,7 +8055,7 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    slli a1, a1, 48
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 48
+; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB97_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a6)
@@ -8096,8 +8096,8 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB98_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
@@ -8132,7 +8132,7 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    slli a1, a1, 16
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 16
+; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB98_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aq a5, (a6)
@@ -8169,8 +8169,8 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB98_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
@@ -8205,7 +8205,7 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    slli a1, a1, 48
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 48
+; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB98_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aq a5, (a6)
@@ -8246,8 +8246,8 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB99_2 Depth=1
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
@@ -8282,7 +8282,7 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    slli a1, a1, 16
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 16
+; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB99_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w.aqrl a5, (a6)
@@ -8319,8 +8319,8 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB99_2 Depth=1
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
@@ -8355,7 +8355,7 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    slli a1, a1, 48
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 48
+; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB99_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w.aqrl a5, (a6)
@@ -8399,8 +8399,8 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB100_4
@@ -8468,8 +8468,8 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB100_4
@@ -8540,8 +8540,8 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB101_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
@@ -8609,8 +8609,8 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB101_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
@@ -8682,9 +8682,9 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB102_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB102_4
@@ -8751,9 +8751,9 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB102_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB102_4
@@ -8824,8 +8824,8 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB103_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
@@ -8893,8 +8893,8 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB103_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
@@ -8966,8 +8966,8 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB104_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
@@ -9035,8 +9035,8 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB104_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
@@ -9109,8 +9109,8 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB105_4
@@ -9178,8 +9178,8 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB105_4
@@ -9250,8 +9250,8 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB106_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
@@ -9319,8 +9319,8 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB106_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
@@ -9392,9 +9392,9 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB107_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB107_4
@@ -9461,9 +9461,9 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB107_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB107_4
@@ -9534,8 +9534,8 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB108_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
@@ -9603,8 +9603,8 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB108_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
@@ -9676,8 +9676,8 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB109_2 Depth=1
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
@@ -9745,8 +9745,8 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB109_2 Depth=1
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
@@ -9803,7 +9803,7 @@ define i32 @atomicrmw_xchg_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -9818,7 +9818,7 @@ define i32 @atomicrmw_xchg_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -9837,7 +9837,7 @@ define i32 @atomicrmw_xchg_i32_acquire(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -9852,7 +9852,7 @@ define i32 @atomicrmw_xchg_i32_acquire(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -9871,7 +9871,7 @@ define i32 @atomicrmw_xchg_i32_release(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -9886,7 +9886,7 @@ define i32 @atomicrmw_xchg_i32_release(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -9905,7 +9905,7 @@ define i32 @atomicrmw_xchg_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -9920,7 +9920,7 @@ define i32 @atomicrmw_xchg_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -9939,7 +9939,7 @@ define i32 @atomicrmw_xchg_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -9954,7 +9954,7 @@ define i32 @atomicrmw_xchg_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_exchange_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -9973,7 +9973,7 @@ define i32 @atomicrmw_add_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -9988,7 +9988,7 @@ define i32 @atomicrmw_add_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10007,7 +10007,7 @@ define i32 @atomicrmw_add_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10022,7 +10022,7 @@ define i32 @atomicrmw_add_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10041,7 +10041,7 @@ define i32 @atomicrmw_add_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10056,7 +10056,7 @@ define i32 @atomicrmw_add_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10075,7 +10075,7 @@ define i32 @atomicrmw_add_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10090,7 +10090,7 @@ define i32 @atomicrmw_add_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10109,7 +10109,7 @@ define i32 @atomicrmw_add_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10124,7 +10124,7 @@ define i32 @atomicrmw_add_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10143,7 +10143,7 @@ define i32 @atomicrmw_sub_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10159,7 +10159,7 @@ define i32 @atomicrmw_sub_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10179,7 +10179,7 @@ define i32 @atomicrmw_sub_i32_acquire(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10195,7 +10195,7 @@ define i32 @atomicrmw_sub_i32_acquire(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10215,7 +10215,7 @@ define i32 @atomicrmw_sub_i32_release(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10231,7 +10231,7 @@ define i32 @atomicrmw_sub_i32_release(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10251,7 +10251,7 @@ define i32 @atomicrmw_sub_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10267,7 +10267,7 @@ define i32 @atomicrmw_sub_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10287,7 +10287,7 @@ define i32 @atomicrmw_sub_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10303,7 +10303,7 @@ define i32 @atomicrmw_sub_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10323,7 +10323,7 @@ define i32 @atomicrmw_and_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10338,7 +10338,7 @@ define i32 @atomicrmw_and_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10357,7 +10357,7 @@ define i32 @atomicrmw_and_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10372,7 +10372,7 @@ define i32 @atomicrmw_and_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10391,7 +10391,7 @@ define i32 @atomicrmw_and_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10406,7 +10406,7 @@ define i32 @atomicrmw_and_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10425,7 +10425,7 @@ define i32 @atomicrmw_and_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10440,7 +10440,7 @@ define i32 @atomicrmw_and_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10459,7 +10459,7 @@ define i32 @atomicrmw_and_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10474,7 +10474,7 @@ define i32 @atomicrmw_and_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10493,7 +10493,7 @@ define i32 @atomicrmw_nand_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10515,7 +10515,7 @@ define i32 @atomicrmw_nand_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10541,7 +10541,7 @@ define i32 @atomicrmw_nand_i32_acquire(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10563,7 +10563,7 @@ define i32 @atomicrmw_nand_i32_acquire(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10589,7 +10589,7 @@ define i32 @atomicrmw_nand_i32_release(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10611,7 +10611,7 @@ define i32 @atomicrmw_nand_i32_release(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10637,7 +10637,7 @@ define i32 @atomicrmw_nand_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10659,7 +10659,7 @@ define i32 @atomicrmw_nand_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10685,7 +10685,7 @@ define i32 @atomicrmw_nand_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10707,7 +10707,7 @@ define i32 @atomicrmw_nand_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10733,7 +10733,7 @@ define i32 @atomicrmw_or_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10748,7 +10748,7 @@ define i32 @atomicrmw_or_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10767,7 +10767,7 @@ define i32 @atomicrmw_or_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10782,7 +10782,7 @@ define i32 @atomicrmw_or_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10801,7 +10801,7 @@ define i32 @atomicrmw_or_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10816,7 +10816,7 @@ define i32 @atomicrmw_or_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10835,7 +10835,7 @@ define i32 @atomicrmw_or_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10850,7 +10850,7 @@ define i32 @atomicrmw_or_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10869,7 +10869,7 @@ define i32 @atomicrmw_or_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10884,7 +10884,7 @@ define i32 @atomicrmw_or_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10903,7 +10903,7 @@ define i32 @atomicrmw_xor_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10918,7 +10918,7 @@ define i32 @atomicrmw_xor_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10937,7 +10937,7 @@ define i32 @atomicrmw_xor_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10952,7 +10952,7 @@ define i32 @atomicrmw_xor_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -10971,7 +10971,7 @@ define i32 @atomicrmw_xor_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 3
+; RV32I-NEXT:    li a2, 3
 ; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -10986,7 +10986,7 @@ define i32 @atomicrmw_xor_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -11005,7 +11005,7 @@ define i32 @atomicrmw_xor_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 4
+; RV32I-NEXT:    li a2, 4
 ; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -11020,7 +11020,7 @@ define i32 @atomicrmw_xor_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -11039,7 +11039,7 @@ define i32 @atomicrmw_xor_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
+; RV32I-NEXT:    li a2, 5
 ; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -11054,7 +11054,7 @@ define i32 @atomicrmw_xor_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -11084,8 +11084,8 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB145_4
@@ -11127,8 +11127,8 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB145_4
@@ -11173,8 +11173,8 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB146_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
@@ -11216,8 +11216,8 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB146_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
@@ -11263,9 +11263,9 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB147_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB147_4
@@ -11306,9 +11306,9 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB147_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB147_4
@@ -11353,8 +11353,8 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB148_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
@@ -11396,8 +11396,8 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB148_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
@@ -11443,8 +11443,8 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB149_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
@@ -11486,8 +11486,8 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB149_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
@@ -11534,8 +11534,8 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB150_4
@@ -11577,8 +11577,8 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB150_4
@@ -11623,8 +11623,8 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB151_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
@@ -11666,8 +11666,8 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB151_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
@@ -11713,9 +11713,9 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB152_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB152_4
@@ -11756,9 +11756,9 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB152_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB152_4
@@ -11803,8 +11803,8 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB153_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
@@ -11846,8 +11846,8 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB153_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
@@ -11893,8 +11893,8 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB154_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
@@ -11936,8 +11936,8 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB154_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
@@ -11984,8 +11984,8 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB155_4
@@ -12027,8 +12027,8 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB155_4
@@ -12073,8 +12073,8 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB156_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
@@ -12116,8 +12116,8 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB156_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
@@ -12163,9 +12163,9 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB157_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB157_4
@@ -12206,9 +12206,9 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB157_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB157_4
@@ -12253,8 +12253,8 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB158_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
@@ -12296,8 +12296,8 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB158_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
@@ -12343,8 +12343,8 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB159_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
@@ -12386,8 +12386,8 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB159_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
@@ -12434,8 +12434,8 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB160_4
@@ -12477,8 +12477,8 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB160_4
@@ -12523,8 +12523,8 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB161_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 2
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 2
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
@@ -12566,8 +12566,8 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB161_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
@@ -12613,9 +12613,9 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB162_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB162_4
@@ -12656,9 +12656,9 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB162_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB162_4
@@ -12703,8 +12703,8 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB163_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 2
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
@@ -12746,8 +12746,8 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB163_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
@@ -12793,8 +12793,8 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    # in Loop: Header=BB164_2 Depth=1
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
-; RV32I-NEXT:    addi a3, zero, 5
-; RV32I-NEXT:    addi a4, zero, 5
+; RV32I-NEXT:    li a3, 5
+; RV32I-NEXT:    li a4, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
@@ -12836,8 +12836,8 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB164_2 Depth=1
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
@@ -12873,7 +12873,7 @@ define i64 @atomicrmw_xchg_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_exchange_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -12883,7 +12883,7 @@ define i64 @atomicrmw_xchg_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_exchange_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -12893,7 +12893,7 @@ define i64 @atomicrmw_xchg_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -12912,7 +12912,7 @@ define i64 @atomicrmw_xchg_i64_acquire(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    call __atomic_exchange_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -12922,7 +12922,7 @@ define i64 @atomicrmw_xchg_i64_acquire(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 2
+; RV32IA-NEXT:    li a3, 2
 ; RV32IA-NEXT:    call __atomic_exchange_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -12932,7 +12932,7 @@ define i64 @atomicrmw_xchg_i64_acquire(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -12951,7 +12951,7 @@ define i64 @atomicrmw_xchg_i64_release(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    call __atomic_exchange_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -12961,7 +12961,7 @@ define i64 @atomicrmw_xchg_i64_release(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 3
+; RV32IA-NEXT:    li a3, 3
 ; RV32IA-NEXT:    call __atomic_exchange_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -12971,7 +12971,7 @@ define i64 @atomicrmw_xchg_i64_release(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -12990,7 +12990,7 @@ define i64 @atomicrmw_xchg_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    call __atomic_exchange_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13000,7 +13000,7 @@ define i64 @atomicrmw_xchg_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 4
+; RV32IA-NEXT:    li a3, 4
 ; RV32IA-NEXT:    call __atomic_exchange_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13010,7 +13010,7 @@ define i64 @atomicrmw_xchg_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13029,7 +13029,7 @@ define i64 @atomicrmw_xchg_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    call __atomic_exchange_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13039,7 +13039,7 @@ define i64 @atomicrmw_xchg_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 5
+; RV32IA-NEXT:    li a3, 5
 ; RV32IA-NEXT:    call __atomic_exchange_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13049,7 +13049,7 @@ define i64 @atomicrmw_xchg_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13068,7 +13068,7 @@ define i64 @atomicrmw_add_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13078,7 +13078,7 @@ define i64 @atomicrmw_add_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13088,7 +13088,7 @@ define i64 @atomicrmw_add_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13107,7 +13107,7 @@ define i64 @atomicrmw_add_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13117,7 +13117,7 @@ define i64 @atomicrmw_add_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 2
+; RV32IA-NEXT:    li a3, 2
 ; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13127,7 +13127,7 @@ define i64 @atomicrmw_add_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13146,7 +13146,7 @@ define i64 @atomicrmw_add_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13156,7 +13156,7 @@ define i64 @atomicrmw_add_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 3
+; RV32IA-NEXT:    li a3, 3
 ; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13166,7 +13166,7 @@ define i64 @atomicrmw_add_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13185,7 +13185,7 @@ define i64 @atomicrmw_add_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13195,7 +13195,7 @@ define i64 @atomicrmw_add_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 4
+; RV32IA-NEXT:    li a3, 4
 ; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13205,7 +13205,7 @@ define i64 @atomicrmw_add_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13224,7 +13224,7 @@ define i64 @atomicrmw_add_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13234,7 +13234,7 @@ define i64 @atomicrmw_add_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 5
+; RV32IA-NEXT:    li a3, 5
 ; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13244,7 +13244,7 @@ define i64 @atomicrmw_add_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13263,7 +13263,7 @@ define i64 @atomicrmw_sub_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13273,7 +13273,7 @@ define i64 @atomicrmw_sub_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13283,7 +13283,7 @@ define i64 @atomicrmw_sub_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13303,7 +13303,7 @@ define i64 @atomicrmw_sub_i64_acquire(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13313,7 +13313,7 @@ define i64 @atomicrmw_sub_i64_acquire(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 2
+; RV32IA-NEXT:    li a3, 2
 ; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13323,7 +13323,7 @@ define i64 @atomicrmw_sub_i64_acquire(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13343,7 +13343,7 @@ define i64 @atomicrmw_sub_i64_release(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13353,7 +13353,7 @@ define i64 @atomicrmw_sub_i64_release(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 3
+; RV32IA-NEXT:    li a3, 3
 ; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13363,7 +13363,7 @@ define i64 @atomicrmw_sub_i64_release(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13383,7 +13383,7 @@ define i64 @atomicrmw_sub_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13393,7 +13393,7 @@ define i64 @atomicrmw_sub_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 4
+; RV32IA-NEXT:    li a3, 4
 ; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13403,7 +13403,7 @@ define i64 @atomicrmw_sub_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13423,7 +13423,7 @@ define i64 @atomicrmw_sub_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13433,7 +13433,7 @@ define i64 @atomicrmw_sub_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 5
+; RV32IA-NEXT:    li a3, 5
 ; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13443,7 +13443,7 @@ define i64 @atomicrmw_sub_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13463,7 +13463,7 @@ define i64 @atomicrmw_and_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13473,7 +13473,7 @@ define i64 @atomicrmw_and_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13483,7 +13483,7 @@ define i64 @atomicrmw_and_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13502,7 +13502,7 @@ define i64 @atomicrmw_and_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13512,7 +13512,7 @@ define i64 @atomicrmw_and_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 2
+; RV32IA-NEXT:    li a3, 2
 ; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13522,7 +13522,7 @@ define i64 @atomicrmw_and_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13541,7 +13541,7 @@ define i64 @atomicrmw_and_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13551,7 +13551,7 @@ define i64 @atomicrmw_and_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 3
+; RV32IA-NEXT:    li a3, 3
 ; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13561,7 +13561,7 @@ define i64 @atomicrmw_and_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13580,7 +13580,7 @@ define i64 @atomicrmw_and_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13590,7 +13590,7 @@ define i64 @atomicrmw_and_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 4
+; RV32IA-NEXT:    li a3, 4
 ; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13600,7 +13600,7 @@ define i64 @atomicrmw_and_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13619,7 +13619,7 @@ define i64 @atomicrmw_and_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13629,7 +13629,7 @@ define i64 @atomicrmw_and_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 5
+; RV32IA-NEXT:    li a3, 5
 ; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13639,7 +13639,7 @@ define i64 @atomicrmw_and_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13658,7 +13658,7 @@ define i64 @atomicrmw_nand_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13668,7 +13668,7 @@ define i64 @atomicrmw_nand_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13678,7 +13678,7 @@ define i64 @atomicrmw_nand_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13704,7 +13704,7 @@ define i64 @atomicrmw_nand_i64_acquire(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13714,7 +13714,7 @@ define i64 @atomicrmw_nand_i64_acquire(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 2
+; RV32IA-NEXT:    li a3, 2
 ; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13724,7 +13724,7 @@ define i64 @atomicrmw_nand_i64_acquire(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13750,7 +13750,7 @@ define i64 @atomicrmw_nand_i64_release(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13760,7 +13760,7 @@ define i64 @atomicrmw_nand_i64_release(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 3
+; RV32IA-NEXT:    li a3, 3
 ; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13770,7 +13770,7 @@ define i64 @atomicrmw_nand_i64_release(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13796,7 +13796,7 @@ define i64 @atomicrmw_nand_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13806,7 +13806,7 @@ define i64 @atomicrmw_nand_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 4
+; RV32IA-NEXT:    li a3, 4
 ; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13816,7 +13816,7 @@ define i64 @atomicrmw_nand_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13842,7 +13842,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13852,7 +13852,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 5
+; RV32IA-NEXT:    li a3, 5
 ; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13862,7 +13862,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13888,7 +13888,7 @@ define i64 @atomicrmw_or_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13898,7 +13898,7 @@ define i64 @atomicrmw_or_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13908,7 +13908,7 @@ define i64 @atomicrmw_or_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13927,7 +13927,7 @@ define i64 @atomicrmw_or_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13937,7 +13937,7 @@ define i64 @atomicrmw_or_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 2
+; RV32IA-NEXT:    li a3, 2
 ; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13947,7 +13947,7 @@ define i64 @atomicrmw_or_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -13966,7 +13966,7 @@ define i64 @atomicrmw_or_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -13976,7 +13976,7 @@ define i64 @atomicrmw_or_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 3
+; RV32IA-NEXT:    li a3, 3
 ; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -13986,7 +13986,7 @@ define i64 @atomicrmw_or_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -14005,7 +14005,7 @@ define i64 @atomicrmw_or_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -14015,7 +14015,7 @@ define i64 @atomicrmw_or_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 4
+; RV32IA-NEXT:    li a3, 4
 ; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -14025,7 +14025,7 @@ define i64 @atomicrmw_or_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -14044,7 +14044,7 @@ define i64 @atomicrmw_or_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -14054,7 +14054,7 @@ define i64 @atomicrmw_or_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 5
+; RV32IA-NEXT:    li a3, 5
 ; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -14064,7 +14064,7 @@ define i64 @atomicrmw_or_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -14083,7 +14083,7 @@ define i64 @atomicrmw_xor_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -14093,7 +14093,7 @@ define i64 @atomicrmw_xor_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -14103,7 +14103,7 @@ define i64 @atomicrmw_xor_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -14122,7 +14122,7 @@ define i64 @atomicrmw_xor_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 2
+; RV32I-NEXT:    li a3, 2
 ; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -14132,7 +14132,7 @@ define i64 @atomicrmw_xor_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 2
+; RV32IA-NEXT:    li a3, 2
 ; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -14142,7 +14142,7 @@ define i64 @atomicrmw_xor_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -14161,7 +14161,7 @@ define i64 @atomicrmw_xor_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 3
+; RV32I-NEXT:    li a3, 3
 ; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -14171,7 +14171,7 @@ define i64 @atomicrmw_xor_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 3
+; RV32IA-NEXT:    li a3, 3
 ; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -14181,7 +14181,7 @@ define i64 @atomicrmw_xor_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -14200,7 +14200,7 @@ define i64 @atomicrmw_xor_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 4
+; RV32I-NEXT:    li a3, 4
 ; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -14210,7 +14210,7 @@ define i64 @atomicrmw_xor_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 4
+; RV32IA-NEXT:    li a3, 4
 ; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -14220,7 +14220,7 @@ define i64 @atomicrmw_xor_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 4
+; RV64I-NEXT:    li a2, 4
 ; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -14239,7 +14239,7 @@ define i64 @atomicrmw_xor_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a3, zero, 5
+; RV32I-NEXT:    li a3, 5
 ; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -14249,7 +14249,7 @@ define i64 @atomicrmw_xor_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    addi a3, zero, 5
+; RV32IA-NEXT:    li a3, 5
 ; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -14259,7 +14259,7 @@ define i64 @atomicrmw_xor_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a2, zero, 5
+; RV64I-NEXT:    li a2, 5
 ; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -14293,8 +14293,8 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a4, 0
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
@@ -14347,8 +14347,8 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    mv a4, zero
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
@@ -14397,8 +14397,8 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB200_4
@@ -14445,8 +14445,8 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    li a4, 2
+; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
@@ -14499,8 +14499,8 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 2
-; RV32IA-NEXT:    addi a5, zero, 2
+; RV32IA-NEXT:    li a4, 2
+; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
@@ -14549,8 +14549,8 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB201_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
@@ -14598,9 +14598,9 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 3
+; RV32I-NEXT:    li a4, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
@@ -14652,9 +14652,9 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 3
+; RV32IA-NEXT:    li a4, 3
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
@@ -14702,9 +14702,9 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB202_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB202_4
@@ -14751,8 +14751,8 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 4
-; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    li a4, 4
+; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
@@ -14805,8 +14805,8 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 4
-; RV32IA-NEXT:    addi a5, zero, 2
+; RV32IA-NEXT:    li a4, 4
+; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
@@ -14855,8 +14855,8 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB203_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
@@ -14904,8 +14904,8 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 5
-; RV32I-NEXT:    addi a5, zero, 5
+; RV32I-NEXT:    li a4, 5
+; RV32I-NEXT:    li a5, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
@@ -14958,8 +14958,8 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 5
-; RV32IA-NEXT:    addi a5, zero, 5
+; RV32IA-NEXT:    li a4, 5
+; RV32IA-NEXT:    li a5, 5
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
@@ -15008,8 +15008,8 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB204_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
@@ -15058,8 +15058,8 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a4, 0
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
@@ -15113,8 +15113,8 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    mv a4, zero
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
@@ -15164,8 +15164,8 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB205_4
@@ -15212,8 +15212,8 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    li a4, 2
+; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
@@ -15267,8 +15267,8 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 2
-; RV32IA-NEXT:    addi a5, zero, 2
+; RV32IA-NEXT:    li a4, 2
+; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
@@ -15318,8 +15318,8 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB206_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
@@ -15367,9 +15367,9 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 3
+; RV32I-NEXT:    li a4, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
@@ -15422,9 +15422,9 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 3
+; RV32IA-NEXT:    li a4, 3
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
@@ -15473,9 +15473,9 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB207_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB207_4
@@ -15522,8 +15522,8 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 4
-; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    li a4, 4
+; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
@@ -15577,8 +15577,8 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 4
-; RV32IA-NEXT:    addi a5, zero, 2
+; RV32IA-NEXT:    li a4, 4
+; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
@@ -15628,8 +15628,8 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB208_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
@@ -15677,8 +15677,8 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 5
-; RV32I-NEXT:    addi a5, zero, 5
+; RV32I-NEXT:    li a4, 5
+; RV32I-NEXT:    li a5, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
@@ -15732,8 +15732,8 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 5
-; RV32IA-NEXT:    addi a5, zero, 5
+; RV32IA-NEXT:    li a4, 5
+; RV32IA-NEXT:    li a5, 5
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
@@ -15783,8 +15783,8 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB209_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
@@ -15833,8 +15833,8 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a4, 0
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
@@ -15887,8 +15887,8 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    mv a4, zero
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
@@ -15937,8 +15937,8 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB210_4
@@ -15985,8 +15985,8 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    li a4, 2
+; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
@@ -16039,8 +16039,8 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 2
-; RV32IA-NEXT:    addi a5, zero, 2
+; RV32IA-NEXT:    li a4, 2
+; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
@@ -16089,8 +16089,8 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB211_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
@@ -16138,9 +16138,9 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 3
+; RV32I-NEXT:    li a4, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
@@ -16192,9 +16192,9 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 3
+; RV32IA-NEXT:    li a4, 3
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
@@ -16242,9 +16242,9 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB212_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB212_4
@@ -16291,8 +16291,8 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 4
-; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    li a4, 4
+; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
@@ -16345,8 +16345,8 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 4
-; RV32IA-NEXT:    addi a5, zero, 2
+; RV32IA-NEXT:    li a4, 4
+; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
@@ -16395,8 +16395,8 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB213_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
@@ -16444,8 +16444,8 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 5
-; RV32I-NEXT:    addi a5, zero, 5
+; RV32I-NEXT:    li a4, 5
+; RV32I-NEXT:    li a5, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
@@ -16498,8 +16498,8 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 5
-; RV32IA-NEXT:    addi a5, zero, 5
+; RV32IA-NEXT:    li a4, 5
+; RV32IA-NEXT:    li a5, 5
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
@@ -16548,8 +16548,8 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB214_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
@@ -16598,8 +16598,8 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a4, 0
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
@@ -16653,8 +16653,8 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    mv a4, zero
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
@@ -16704,8 +16704,8 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB215_4
@@ -16752,8 +16752,8 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 2
-; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    li a4, 2
+; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
@@ -16807,8 +16807,8 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 2
-; RV32IA-NEXT:    addi a5, zero, 2
+; RV32IA-NEXT:    li a4, 2
+; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
@@ -16858,8 +16858,8 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB216_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 2
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 2
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
@@ -16907,9 +16907,9 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 3
+; RV32I-NEXT:    li a4, 3
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
@@ -16962,9 +16962,9 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 3
+; RV32IA-NEXT:    li a4, 3
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
@@ -17013,9 +17013,9 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB217_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 3
+; RV64I-NEXT:    li a3, 3
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB217_4
@@ -17062,8 +17062,8 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 4
-; RV32I-NEXT:    addi a5, zero, 2
+; RV32I-NEXT:    li a4, 4
+; RV32I-NEXT:    li a5, 2
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
@@ -17117,8 +17117,8 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 4
-; RV32IA-NEXT:    addi a5, zero, 2
+; RV32IA-NEXT:    li a4, 4
+; RV32IA-NEXT:    li a5, 2
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
@@ -17168,8 +17168,8 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB218_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 2
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 2
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
@@ -17217,8 +17217,8 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a4, 8(sp)
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
-; RV32I-NEXT:    addi a4, zero, 5
-; RV32I-NEXT:    addi a5, zero, 5
+; RV32I-NEXT:    li a4, 5
+; RV32I-NEXT:    li a5, 5
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
@@ -17272,8 +17272,8 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a4, 8(sp)
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
-; RV32IA-NEXT:    addi a4, zero, 5
-; RV32IA-NEXT:    addi a5, zero, 5
+; RV32IA-NEXT:    li a4, 5
+; RV32IA-NEXT:    li a5, 5
 ; RV32IA-NEXT:    mv a0, s0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
@@ -17323,8 +17323,8 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    # in Loop: Header=BB219_2 Depth=1
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
-; RV64I-NEXT:    addi a3, zero, 5
-; RV64I-NEXT:    addi a4, zero, 5
+; RV64I-NEXT:    li a3, 5
+; RV64I-NEXT:    li a4, 5
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)

diff  --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll
index e7ec2bf88866..2fb815ab2dab 100644
--- a/llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -13,7 +13,7 @@ define signext i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_load_1 at plt
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
@@ -30,7 +30,7 @@ define signext i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_load_1 at plt
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
@@ -51,7 +51,7 @@ define signext i16 @atomic_load_i16_unordered(i16 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_load_2 at plt
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
@@ -68,7 +68,7 @@ define signext i16 @atomic_load_i16_unordered(i16 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_load_2 at plt
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
@@ -89,7 +89,7 @@ define signext i32 @atomic_load_i32_unordered(i32 *%a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __atomic_load_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -104,7 +104,7 @@ define signext i32 @atomic_load_i32_unordered(i32 *%a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __atomic_load_4 at plt
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -125,7 +125,7 @@ define signext i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_exchange_1 at plt
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
@@ -137,7 +137,7 @@ define signext i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -159,7 +159,7 @@ define signext i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_exchange_1 at plt
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
@@ -171,7 +171,7 @@ define signext i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -197,7 +197,7 @@ define signext i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_add_1 at plt
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
@@ -209,7 +209,7 @@ define signext i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -231,7 +231,7 @@ define signext i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_add_1 at plt
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
@@ -243,7 +243,7 @@ define signext i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -269,7 +269,7 @@ define signext i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_sub_1 at plt
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
@@ -281,7 +281,7 @@ define signext i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -303,7 +303,7 @@ define signext i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_sub_1 at plt
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
@@ -315,7 +315,7 @@ define signext i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -341,7 +341,7 @@ define signext i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_and_1 at plt
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
@@ -353,7 +353,7 @@ define signext i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    not a3, a3
 ; RV32IA-NEXT:    andi a1, a1, 255
@@ -369,7 +369,7 @@ define signext i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_and_1 at plt
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
@@ -381,7 +381,7 @@ define signext i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    not a3, a3
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -401,7 +401,7 @@ define signext i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_nand_1 at plt
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
@@ -413,7 +413,7 @@ define signext i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -436,7 +436,7 @@ define signext i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_nand_1 at plt
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
@@ -448,7 +448,7 @@ define signext i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -475,7 +475,7 @@ define signext i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_or_1 at plt
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
@@ -499,7 +499,7 @@ define signext i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_or_1 at plt
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
@@ -527,7 +527,7 @@ define signext i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_xor_1 at plt
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
@@ -551,7 +551,7 @@ define signext i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_xor_1 at plt
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
@@ -593,8 +593,8 @@ define signext i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB10_4
@@ -623,12 +623,12 @@ define signext i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
 ; RV32IA-NEXT:    andi a3, a0, 24
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a7, a4, a0
 ; RV32IA-NEXT:    slli a1, a1, 24
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 24
+; RV32IA-NEXT:    li a5, 24
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a6)
@@ -668,8 +668,8 @@ define signext i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB10_4
@@ -698,12 +698,12 @@ define signext i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
 ; RV64IA-NEXT:    andi a3, a0, 24
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a7, a4, a0
 ; RV64IA-NEXT:    slli a1, a1, 56
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 56
+; RV64IA-NEXT:    li a5, 56
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a6)
@@ -747,8 +747,8 @@ define signext i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB11_4
@@ -777,12 +777,12 @@ define signext i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
 ; RV32IA-NEXT:    andi a3, a0, 24
-; RV32IA-NEXT:    addi a4, zero, 255
+; RV32IA-NEXT:    li a4, 255
 ; RV32IA-NEXT:    sll a7, a4, a0
 ; RV32IA-NEXT:    slli a1, a1, 24
 ; RV32IA-NEXT:    srai a1, a1, 24
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 24
+; RV32IA-NEXT:    li a5, 24
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a6)
@@ -822,8 +822,8 @@ define signext i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB11_4
@@ -852,12 +852,12 @@ define signext i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slli a0, a0, 3
 ; RV64IA-NEXT:    andi a3, a0, 24
-; RV64IA-NEXT:    addi a4, zero, 255
+; RV64IA-NEXT:    li a4, 255
 ; RV64IA-NEXT:    sllw a7, a4, a0
 ; RV64IA-NEXT:    slli a1, a1, 56
 ; RV64IA-NEXT:    srai a1, a1, 56
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 56
+; RV64IA-NEXT:    li a5, 56
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a6)
@@ -900,8 +900,8 @@ define signext i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB12_4
@@ -928,7 +928,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -967,8 +967,8 @@ define signext i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB12_4
@@ -995,7 +995,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -1038,8 +1038,8 @@ define signext i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32I-NEXT:    sb a3, 15(sp)
 ; RV32I-NEXT:    addi a1, sp, 15
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV32I-NEXT:    lb a3, 15(sp)
 ; RV32I-NEXT:    bnez a0, .LBB13_4
@@ -1066,7 +1066,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a6, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    addi a3, zero, 255
+; RV32IA-NEXT:    li a3, 255
 ; RV32IA-NEXT:    sll a3, a3, a0
 ; RV32IA-NEXT:    andi a1, a1, 255
 ; RV32IA-NEXT:    sll a1, a1, a0
@@ -1105,8 +1105,8 @@ define signext i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64I-NEXT:    sb a3, 15(sp)
 ; RV64I-NEXT:    addi a1, sp, 15
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_1 at plt
 ; RV64I-NEXT:    lb a3, 15(sp)
 ; RV64I-NEXT:    bnez a0, .LBB13_4
@@ -1133,7 +1133,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    addi a3, zero, 255
+; RV64IA-NEXT:    li a3, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
@@ -1163,7 +1163,7 @@ define signext i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_exchange_2 at plt
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
@@ -1198,7 +1198,7 @@ define signext i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_exchange_2 at plt
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
@@ -1237,7 +1237,7 @@ define signext i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_add_2 at plt
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
@@ -1272,7 +1272,7 @@ define signext i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_add_2 at plt
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
@@ -1311,7 +1311,7 @@ define signext i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_sub_2 at plt
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
@@ -1346,7 +1346,7 @@ define signext i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_sub_2 at plt
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
@@ -1385,7 +1385,7 @@ define signext i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_and_2 at plt
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
@@ -1414,7 +1414,7 @@ define signext i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_and_2 at plt
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
@@ -1447,7 +1447,7 @@ define signext i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_nand_2 at plt
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
@@ -1483,7 +1483,7 @@ define signext i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_nand_2 at plt
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
@@ -1523,7 +1523,7 @@ define signext i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_or_2 at plt
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
@@ -1549,7 +1549,7 @@ define signext i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_or_2 at plt
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
@@ -1579,7 +1579,7 @@ define signext i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_xor_2 at plt
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
@@ -1605,7 +1605,7 @@ define signext i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_xor_2 at plt
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
@@ -1649,8 +1649,8 @@ define signext i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB21_4
@@ -1685,7 +1685,7 @@ define signext i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    slli a1, a1, 16
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 16
+; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a6)
@@ -1725,8 +1725,8 @@ define signext i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB21_4
@@ -1761,7 +1761,7 @@ define signext i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    slli a1, a1, 48
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 48
+; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a6)
@@ -1805,8 +1805,8 @@ define signext i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    sh a3, 14(sp)
 ; RV32I-NEXT:    addi a1, sp, 14
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a3, 14(sp)
 ; RV32I-NEXT:    bnez a0, .LBB22_4
@@ -1841,7 +1841,7 @@ define signext i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32IA-NEXT:    slli a1, a1, 16
 ; RV32IA-NEXT:    srai a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
-; RV32IA-NEXT:    addi a5, zero, 16
+; RV32IA-NEXT:    li a5, 16
 ; RV32IA-NEXT:    sub a3, a5, a3
 ; RV32IA-NEXT:  .LBB22_1: # =>This Inner Loop Header: Depth=1
 ; RV32IA-NEXT:    lr.w a5, (a6)
@@ -1881,8 +1881,8 @@ define signext i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    sh a3, 14(sp)
 ; RV64I-NEXT:    addi a1, sp, 14
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a3, 14(sp)
 ; RV64I-NEXT:    bnez a0, .LBB22_4
@@ -1917,7 +1917,7 @@ define signext i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-NEXT:    slli a1, a1, 48
 ; RV64IA-NEXT:    srai a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
-; RV64IA-NEXT:    addi a5, zero, 48
+; RV64IA-NEXT:    li a5, 48
 ; RV64IA-NEXT:    sub a3, a5, a3
 ; RV64IA-NEXT:  .LBB22_1: # =>This Inner Loop Header: Depth=1
 ; RV64IA-NEXT:    lr.w a5, (a6)
@@ -1963,8 +1963,8 @@ define signext i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB23_4
@@ -2035,8 +2035,8 @@ define signext i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB23_4
@@ -2111,8 +2111,8 @@ define signext i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV32I-NEXT:    sh a1, 10(sp)
 ; RV32I-NEXT:    addi a1, sp, 10
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV32I-NEXT:    lh a1, 10(sp)
 ; RV32I-NEXT:    bnez a0, .LBB24_4
@@ -2183,8 +2183,8 @@ define signext i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64I-NEXT:    sh a1, 6(sp)
 ; RV64I-NEXT:    addi a1, sp, 6
 ; RV64I-NEXT:    mv a0, s3
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_2 at plt
 ; RV64I-NEXT:    lh a1, 6(sp)
 ; RV64I-NEXT:    bnez a0, .LBB24_4
@@ -2243,7 +2243,7 @@ define signext i32 @atomicrmw_xchg_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_exchange_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2258,7 +2258,7 @@ define signext i32 @atomicrmw_xchg_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_exchange_4 at plt
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -2278,7 +2278,7 @@ define signext i32 @atomicrmw_add_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_add_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2293,7 +2293,7 @@ define signext i32 @atomicrmw_add_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_add_4 at plt
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -2313,7 +2313,7 @@ define signext i32 @atomicrmw_sub_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_sub_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2329,7 +2329,7 @@ define signext i32 @atomicrmw_sub_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_sub_4 at plt
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -2350,7 +2350,7 @@ define signext i32 @atomicrmw_and_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_and_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2365,7 +2365,7 @@ define signext i32 @atomicrmw_and_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_and_4 at plt
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -2385,7 +2385,7 @@ define signext i32 @atomicrmw_nand_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_nand_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2407,7 +2407,7 @@ define signext i32 @atomicrmw_nand_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_nand_4 at plt
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -2434,7 +2434,7 @@ define signext i32 @atomicrmw_or_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_or_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2449,7 +2449,7 @@ define signext i32 @atomicrmw_or_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_or_4 at plt
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -2469,7 +2469,7 @@ define signext i32 @atomicrmw_xor_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __atomic_fetch_xor_4 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2484,7 +2484,7 @@ define signext i32 @atomicrmw_xor_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_xor_4 at plt
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -2515,8 +2515,8 @@ define signext i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB32_4
@@ -2558,8 +2558,8 @@ define signext i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB32_4
@@ -2605,8 +2605,8 @@ define signext i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB33_4
@@ -2648,8 +2648,8 @@ define signext i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB33_4
@@ -2695,8 +2695,8 @@ define signext i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB34_4
@@ -2738,8 +2738,8 @@ define signext i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB34_4
@@ -2785,8 +2785,8 @@ define signext i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV32I-NEXT:    sw a3, 0(sp)
 ; RV32I-NEXT:    mv a1, sp
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a3, zero
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV32I-NEXT:    lw a3, 0(sp)
 ; RV32I-NEXT:    bnez a0, .LBB35_4
@@ -2828,8 +2828,8 @@ define signext i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; RV64I-NEXT:    sw a3, 12(sp)
 ; RV64I-NEXT:    addi a1, sp, 12
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_4 at plt
 ; RV64I-NEXT:    lw a3, 12(sp)
 ; RV64I-NEXT:    bnez a0, .LBB35_4
@@ -2864,7 +2864,7 @@ define signext i64 @atomicrmw_xchg_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_exchange_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2874,7 +2874,7 @@ define signext i64 @atomicrmw_xchg_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_exchange_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -2884,7 +2884,7 @@ define signext i64 @atomicrmw_xchg_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_exchange_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2903,7 +2903,7 @@ define signext i64 @atomicrmw_add_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2913,7 +2913,7 @@ define signext i64 @atomicrmw_add_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -2923,7 +2923,7 @@ define signext i64 @atomicrmw_add_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_add_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2942,7 +2942,7 @@ define signext i64 @atomicrmw_sub_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2952,7 +2952,7 @@ define signext i64 @atomicrmw_sub_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -2962,7 +2962,7 @@ define signext i64 @atomicrmw_sub_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_sub_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2982,7 +2982,7 @@ define signext i64 @atomicrmw_and_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2992,7 +2992,7 @@ define signext i64 @atomicrmw_and_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -3002,7 +3002,7 @@ define signext i64 @atomicrmw_and_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_and_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -3021,7 +3021,7 @@ define signext i64 @atomicrmw_nand_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -3031,7 +3031,7 @@ define signext i64 @atomicrmw_nand_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -3041,7 +3041,7 @@ define signext i64 @atomicrmw_nand_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_nand_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -3067,7 +3067,7 @@ define signext i64 @atomicrmw_or_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -3077,7 +3077,7 @@ define signext i64 @atomicrmw_or_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -3087,7 +3087,7 @@ define signext i64 @atomicrmw_or_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_or_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -3106,7 +3106,7 @@ define signext i64 @atomicrmw_xor_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -3116,7 +3116,7 @@ define signext i64 @atomicrmw_xor_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    addi sp, sp, -16
 ; RV32IA-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IA-NEXT:    mv a3, zero
+; RV32IA-NEXT:    li a3, 0
 ; RV32IA-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV32IA-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IA-NEXT:    addi sp, sp, 16
@@ -3126,7 +3126,7 @@ define signext i64 @atomicrmw_xor_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call __atomic_fetch_xor_8 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -3160,8 +3160,8 @@ define signext i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a4, 0
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
@@ -3214,8 +3214,8 @@ define signext i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    mv a4, zero
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
@@ -3264,8 +3264,8 @@ define signext i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB43_4
@@ -3313,8 +3313,8 @@ define signext i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a4, 0
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
@@ -3368,8 +3368,8 @@ define signext i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    mv a4, zero
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
@@ -3419,8 +3419,8 @@ define signext i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB44_4
@@ -3468,8 +3468,8 @@ define signext i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a4, 0
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
@@ -3522,8 +3522,8 @@ define signext i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    mv a4, zero
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
@@ -3572,8 +3572,8 @@ define signext i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB45_4
@@ -3621,8 +3621,8 @@ define signext i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32I-NEXT:    sw a5, 12(sp)
 ; RV32I-NEXT:    addi a1, sp, 8
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a4, zero
-; RV32I-NEXT:    mv a5, zero
+; RV32I-NEXT:    li a4, 0
+; RV32I-NEXT:    li a5, 0
 ; RV32I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32I-NEXT:    lw a5, 12(sp)
 ; RV32I-NEXT:    lw a4, 8(sp)
@@ -3676,8 +3676,8 @@ define signext i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV32IA-NEXT:    sw a5, 12(sp)
 ; RV32IA-NEXT:    addi a1, sp, 8
 ; RV32IA-NEXT:    mv a0, s0
-; RV32IA-NEXT:    mv a4, zero
-; RV32IA-NEXT:    mv a5, zero
+; RV32IA-NEXT:    li a4, 0
+; RV32IA-NEXT:    li a5, 0
 ; RV32IA-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV32IA-NEXT:    lw a5, 12(sp)
 ; RV32IA-NEXT:    lw a4, 8(sp)
@@ -3727,8 +3727,8 @@ define signext i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; RV64I-NEXT:    sd a3, 0(sp)
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a3, zero
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a3, 0
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call __atomic_compare_exchange_8 at plt
 ; RV64I-NEXT:    ld a3, 0(sp)
 ; RV64I-NEXT:    bnez a0, .LBB46_4

diff  --git a/llvm/test/CodeGen/RISCV/branch-relaxation.ll b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
index 5925e17ae407..c0166c217bad 100644
--- a/llvm/test/CodeGen/RISCV/branch-relaxation.ll
+++ b/llvm/test/CodeGen/RISCV/branch-relaxation.ll
@@ -42,12 +42,12 @@ define i32 @relax_jal(i1 %a) nounwind {
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    .zero 1048576
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB1_2: # %jmp
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    ret
   br i1 %a, label %iftrue, label %jmp
 

diff  --git a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
index acf6903b3626..1ca0b2e13384 100644
--- a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
@@ -104,7 +104,7 @@ define i64 @test_bswap_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    lui a2, 4080
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    srli a2, a0, 8
-; RV64I-NEXT:    addi a3, zero, 255
+; RV64I-NEXT:    li a3, 255
 ; RV64I-NEXT:    slli a4, a3, 24
 ; RV64I-NEXT:    and a2, a2, a4
 ; RV64I-NEXT:    or a1, a2, a1
@@ -154,7 +154,7 @@ define i8 @test_cttz_i8(i8 %a) nounwind {
 ; RV32I-NEXT:    andi a0, a0, 15
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB3_2:
-; RV32I-NEXT:    addi a0, zero, 8
+; RV32I-NEXT:    li a0, 8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_cttz_i8:
@@ -177,7 +177,7 @@ define i8 @test_cttz_i8(i8 %a) nounwind {
 ; RV64I-NEXT:    andi a0, a0, 15
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB3_2:
-; RV64I-NEXT:    addi a0, zero, 8
+; RV64I-NEXT:    li a0, 8
 ; RV64I-NEXT:    ret
   %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false)
   ret i8 %tmp
@@ -218,7 +218,7 @@ define i16 @test_cttz_i16(i16 %a) nounwind {
 ; RV32I-NEXT:    srli a0, a0, 8
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB4_2:
-; RV32I-NEXT:    addi a0, zero, 16
+; RV32I-NEXT:    li a0, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_cttz_i16:
@@ -255,7 +255,7 @@ define i16 @test_cttz_i16(i16 %a) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 8
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB4_2:
-; RV64I-NEXT:    addi a0, zero, 16
+; RV64I-NEXT:    li a0, 16
 ; RV64I-NEXT:    ret
   %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false)
   ret i16 %tmp
@@ -295,7 +295,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB5_2:
-; RV32I-NEXT:    addi a0, zero, 32
+; RV32I-NEXT:    li a0, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_cttz_i32:
@@ -332,7 +332,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB5_2:
-; RV64I-NEXT:    addi a0, zero, 32
+; RV64I-NEXT:    li a0, 32
 ; RV64I-NEXT:    ret
   %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false)
   ret i32 %tmp
@@ -380,7 +380,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB6_2:
-; RV32I-NEXT:    addi a0, zero, 32
+; RV32I-NEXT:    li a0, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_ctlz_i32:
@@ -425,7 +425,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB6_2:
-; RV64I-NEXT:    addi a0, zero, 32
+; RV64I-NEXT:    li a0, 32
 ; RV64I-NEXT:    ret
   %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
   ret i32 %tmp
@@ -492,7 +492,7 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:  .LBB7_2:
 ; RV32I-NEXT:    srli a0, s2, 24
 ; RV32I-NEXT:  .LBB7_3:
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
@@ -559,7 +559,7 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB7_2:
-; RV64I-NEXT:    addi a0, zero, 64
+; RV64I-NEXT:    li a0, 64
 ; RV64I-NEXT:    ret
   %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false)
   ret i64 %tmp
@@ -792,7 +792,7 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
 ; RV32I-NEXT:  .LBB11_2:
 ; RV32I-NEXT:    srli a0, s2, 24
 ; RV32I-NEXT:  .LBB11_3:
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
@@ -967,7 +967,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    add a0, a0, s5
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
@@ -1083,7 +1083,7 @@ define i64 @test_parity_i64(i64 %a) {
 ; RV32I-NEXT:    srli a1, a0, 1
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    andi a0, a0, 1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_parity_i64:

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
index 65be3f13b93a..e637a952aaf9 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
@@ -116,7 +116,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    lui a1, 4
 ; RV32I-NEXT:    call callee_half_in_regs at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -127,7 +127,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    lui a1, 4
 ; RV64I-NEXT:    call callee_half_in_regs at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -138,7 +138,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    addi a0, zero, 1
+; RV32IF-NEXT:    li a0, 1
 ; RV32IF-NEXT:    lui a1, 1048564
 ; RV32IF-NEXT:    call callee_half_in_regs at plt
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -152,7 +152,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV64IF-NEXT:    lui a0, %hi(.LCPI1_0)
 ; RV64IF-NEXT:    flw ft0, %lo(.LCPI1_0)(a0)
 ; RV64IF-NEXT:    fmv.x.w a1, ft0
-; RV64IF-NEXT:    addi a0, zero, 1
+; RV64IF-NEXT:    li a0, 1
 ; RV64IF-NEXT:    call callee_half_in_regs at plt
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
@@ -164,7 +164,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV32-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32F-NEXT:    lui a0, %hi(.LCPI1_0)
 ; RV32-ILP32F-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
-; RV32-ILP32F-NEXT:    addi a0, zero, 1
+; RV32-ILP32F-NEXT:    li a0, 1
 ; RV32-ILP32F-NEXT:    call callee_half_in_regs at plt
 ; RV32-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32F-NEXT:    addi sp, sp, 16
@@ -176,7 +176,7 @@ define i32 @caller_half_in_regs() nounwind {
 ; RV64-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-LP64F-NEXT:    lui a0, %hi(.LCPI1_0)
 ; RV64-LP64F-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
-; RV64-LP64F-NEXT:    addi a0, zero, 1
+; RV64-LP64F-NEXT:    li a0, 1
 ; RV64-LP64F-NEXT:    call callee_half_in_regs at plt
 ; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64F-NEXT:    addi sp, sp, 16
@@ -289,14 +289,14 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lui a0, 5
 ; RV32I-NEXT:    addi t0, a0, -1792
-; RV32I-NEXT:    addi a0, zero, 1
-; RV32I-NEXT:    addi a1, zero, 2
-; RV32I-NEXT:    addi a2, zero, 3
-; RV32I-NEXT:    addi a3, zero, 4
-; RV32I-NEXT:    addi a4, zero, 5
-; RV32I-NEXT:    addi a5, zero, 6
-; RV32I-NEXT:    addi a6, zero, 7
-; RV32I-NEXT:    addi a7, zero, 8
+; RV32I-NEXT:    li a0, 1
+; RV32I-NEXT:    li a1, 2
+; RV32I-NEXT:    li a2, 3
+; RV32I-NEXT:    li a3, 4
+; RV32I-NEXT:    li a4, 5
+; RV32I-NEXT:    li a5, 6
+; RV32I-NEXT:    li a6, 7
+; RV32I-NEXT:    li a7, 8
 ; RV32I-NEXT:    sw t0, 0(sp)
 ; RV32I-NEXT:    call callee_half_on_stack at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -309,14 +309,14 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lui a0, 5
 ; RV64I-NEXT:    addiw t0, a0, -1792
-; RV64I-NEXT:    addi a0, zero, 1
-; RV64I-NEXT:    addi a1, zero, 2
-; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 5
-; RV64I-NEXT:    addi a5, zero, 6
-; RV64I-NEXT:    addi a6, zero, 7
-; RV64I-NEXT:    addi a7, zero, 8
+; RV64I-NEXT:    li a0, 1
+; RV64I-NEXT:    li a1, 2
+; RV64I-NEXT:    li a2, 3
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 5
+; RV64I-NEXT:    li a5, 6
+; RV64I-NEXT:    li a6, 7
+; RV64I-NEXT:    li a7, 8
 ; RV64I-NEXT:    sd t0, 0(sp)
 ; RV64I-NEXT:    call callee_half_on_stack at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -329,14 +329,14 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    lui a0, 1048565
 ; RV32IF-NEXT:    addi t0, a0, -1792
-; RV32IF-NEXT:    addi a0, zero, 1
-; RV32IF-NEXT:    addi a1, zero, 2
-; RV32IF-NEXT:    addi a2, zero, 3
-; RV32IF-NEXT:    addi a3, zero, 4
-; RV32IF-NEXT:    addi a4, zero, 5
-; RV32IF-NEXT:    addi a5, zero, 6
-; RV32IF-NEXT:    addi a6, zero, 7
-; RV32IF-NEXT:    addi a7, zero, 8
+; RV32IF-NEXT:    li a0, 1
+; RV32IF-NEXT:    li a1, 2
+; RV32IF-NEXT:    li a2, 3
+; RV32IF-NEXT:    li a3, 4
+; RV32IF-NEXT:    li a4, 5
+; RV32IF-NEXT:    li a5, 6
+; RV32IF-NEXT:    li a6, 7
+; RV32IF-NEXT:    li a7, 8
 ; RV32IF-NEXT:    sw t0, 0(sp)
 ; RV32IF-NEXT:    call callee_half_on_stack at plt
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -349,14 +349,14 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IF-NEXT:    lui a0, 1048565
 ; RV64IF-NEXT:    addiw t0, a0, -1792
-; RV64IF-NEXT:    addi a0, zero, 1
-; RV64IF-NEXT:    addi a1, zero, 2
-; RV64IF-NEXT:    addi a2, zero, 3
-; RV64IF-NEXT:    addi a3, zero, 4
-; RV64IF-NEXT:    addi a4, zero, 5
-; RV64IF-NEXT:    addi a5, zero, 6
-; RV64IF-NEXT:    addi a6, zero, 7
-; RV64IF-NEXT:    addi a7, zero, 8
+; RV64IF-NEXT:    li a0, 1
+; RV64IF-NEXT:    li a1, 2
+; RV64IF-NEXT:    li a2, 3
+; RV64IF-NEXT:    li a3, 4
+; RV64IF-NEXT:    li a4, 5
+; RV64IF-NEXT:    li a5, 6
+; RV64IF-NEXT:    li a6, 7
+; RV64IF-NEXT:    li a7, 8
 ; RV64IF-NEXT:    sw t0, 0(sp)
 ; RV64IF-NEXT:    call callee_half_on_stack at plt
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -369,14 +369,14 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV32-ILP32F-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32F-NEXT:    lui a0, %hi(.LCPI3_0)
 ; RV32-ILP32F-NEXT:    flw fa0, %lo(.LCPI3_0)(a0)
-; RV32-ILP32F-NEXT:    addi a0, zero, 1
-; RV32-ILP32F-NEXT:    addi a1, zero, 2
-; RV32-ILP32F-NEXT:    addi a2, zero, 3
-; RV32-ILP32F-NEXT:    addi a3, zero, 4
-; RV32-ILP32F-NEXT:    addi a4, zero, 5
-; RV32-ILP32F-NEXT:    addi a5, zero, 6
-; RV32-ILP32F-NEXT:    addi a6, zero, 7
-; RV32-ILP32F-NEXT:    addi a7, zero, 8
+; RV32-ILP32F-NEXT:    li a0, 1
+; RV32-ILP32F-NEXT:    li a1, 2
+; RV32-ILP32F-NEXT:    li a2, 3
+; RV32-ILP32F-NEXT:    li a3, 4
+; RV32-ILP32F-NEXT:    li a4, 5
+; RV32-ILP32F-NEXT:    li a5, 6
+; RV32-ILP32F-NEXT:    li a6, 7
+; RV32-ILP32F-NEXT:    li a7, 8
 ; RV32-ILP32F-NEXT:    call callee_half_on_stack at plt
 ; RV32-ILP32F-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32F-NEXT:    addi sp, sp, 16
@@ -388,14 +388,14 @@ define i32 @caller_half_on_stack() nounwind {
 ; RV64-LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-LP64F-NEXT:    lui a0, %hi(.LCPI3_0)
 ; RV64-LP64F-NEXT:    flw fa0, %lo(.LCPI3_0)(a0)
-; RV64-LP64F-NEXT:    addi a0, zero, 1
-; RV64-LP64F-NEXT:    addi a1, zero, 2
-; RV64-LP64F-NEXT:    addi a2, zero, 3
-; RV64-LP64F-NEXT:    addi a3, zero, 4
-; RV64-LP64F-NEXT:    addi a4, zero, 5
-; RV64-LP64F-NEXT:    addi a5, zero, 6
-; RV64-LP64F-NEXT:    addi a6, zero, 7
-; RV64-LP64F-NEXT:    addi a7, zero, 8
+; RV64-LP64F-NEXT:    li a0, 1
+; RV64-LP64F-NEXT:    li a1, 2
+; RV64-LP64F-NEXT:    li a2, 3
+; RV64-LP64F-NEXT:    li a3, 4
+; RV64-LP64F-NEXT:    li a4, 5
+; RV64-LP64F-NEXT:    li a5, 6
+; RV64-LP64F-NEXT:    li a6, 7
+; RV64-LP64F-NEXT:    li a7, 8
 ; RV64-LP64F-NEXT:    call callee_half_on_stack at plt
 ; RV64-LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-LP64F-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
index c9d59e229bd8..dfd04ebdd763 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
@@ -60,9 +60,9 @@ define i32 @caller_double_in_regs() nounwind {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
 ; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT:    addi a0, zero, 1
+; RV32I-FPELIM-NEXT:    li a0, 1
 ; RV32I-FPELIM-NEXT:    lui a2, 262144
-; RV32I-FPELIM-NEXT:    mv a1, zero
+; RV32I-FPELIM-NEXT:    li a1, 0
 ; RV32I-FPELIM-NEXT:    call callee_double_in_regs at plt
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
@@ -74,9 +74,9 @@ define i32 @caller_double_in_regs() nounwind {
 ; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
-; RV32I-WITHFP-NEXT:    addi a0, zero, 1
+; RV32I-WITHFP-NEXT:    li a0, 1
 ; RV32I-WITHFP-NEXT:    lui a2, 262144
-; RV32I-WITHFP-NEXT:    mv a1, zero
+; RV32I-WITHFP-NEXT:    li a1, 0
 ; RV32I-WITHFP-NEXT:    call callee_double_in_regs at plt
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -145,11 +145,11 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -64
 ; RV32I-FPELIM-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT:    addi a0, zero, 18
+; RV32I-FPELIM-NEXT:    li a0, 18
 ; RV32I-FPELIM-NEXT:    sw a0, 24(sp)
-; RV32I-FPELIM-NEXT:    addi a0, zero, 17
+; RV32I-FPELIM-NEXT:    li a0, 17
 ; RV32I-FPELIM-NEXT:    sw a0, 20(sp)
-; RV32I-FPELIM-NEXT:    addi a0, zero, 16
+; RV32I-FPELIM-NEXT:    li a0, 16
 ; RV32I-FPELIM-NEXT:    sw a0, 16(sp)
 ; RV32I-FPELIM-NEXT:    lui a0, 262236
 ; RV32I-FPELIM-NEXT:    addi a0, a0, 655
@@ -157,7 +157,7 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-FPELIM-NEXT:    lui a0, 377487
 ; RV32I-FPELIM-NEXT:    addi a0, a0, 1475
 ; RV32I-FPELIM-NEXT:    sw a0, 8(sp)
-; RV32I-FPELIM-NEXT:    addi a0, zero, 15
+; RV32I-FPELIM-NEXT:    li a0, 15
 ; RV32I-FPELIM-NEXT:    sw a0, 0(sp)
 ; RV32I-FPELIM-NEXT:    lui a0, 262153
 ; RV32I-FPELIM-NEXT:    addi a0, a0, 491
@@ -172,13 +172,13 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-FPELIM-NEXT:    addi t0, a0, 1311
 ; RV32I-FPELIM-NEXT:    lui a0, 688509
 ; RV32I-FPELIM-NEXT:    addi a5, a0, -2048
-; RV32I-FPELIM-NEXT:    addi a0, zero, 1
-; RV32I-FPELIM-NEXT:    addi a1, zero, 11
+; RV32I-FPELIM-NEXT:    li a0, 1
+; RV32I-FPELIM-NEXT:    li a1, 11
 ; RV32I-FPELIM-NEXT:    addi a2, sp, 32
-; RV32I-FPELIM-NEXT:    addi a3, zero, 12
-; RV32I-FPELIM-NEXT:    addi a4, zero, 13
-; RV32I-FPELIM-NEXT:    addi a6, zero, 4
-; RV32I-FPELIM-NEXT:    addi a7, zero, 14
+; RV32I-FPELIM-NEXT:    li a3, 12
+; RV32I-FPELIM-NEXT:    li a4, 13
+; RV32I-FPELIM-NEXT:    li a6, 4
+; RV32I-FPELIM-NEXT:    li a7, 14
 ; RV32I-FPELIM-NEXT:    sw t0, 32(sp)
 ; RV32I-FPELIM-NEXT:    call callee_aligned_stack at plt
 ; RV32I-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
@@ -191,11 +191,11 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-WITHFP-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 64
-; RV32I-WITHFP-NEXT:    addi a0, zero, 18
+; RV32I-WITHFP-NEXT:    li a0, 18
 ; RV32I-WITHFP-NEXT:    sw a0, 24(sp)
-; RV32I-WITHFP-NEXT:    addi a0, zero, 17
+; RV32I-WITHFP-NEXT:    li a0, 17
 ; RV32I-WITHFP-NEXT:    sw a0, 20(sp)
-; RV32I-WITHFP-NEXT:    addi a0, zero, 16
+; RV32I-WITHFP-NEXT:    li a0, 16
 ; RV32I-WITHFP-NEXT:    sw a0, 16(sp)
 ; RV32I-WITHFP-NEXT:    lui a0, 262236
 ; RV32I-WITHFP-NEXT:    addi a0, a0, 655
@@ -203,7 +203,7 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-WITHFP-NEXT:    lui a0, 377487
 ; RV32I-WITHFP-NEXT:    addi a0, a0, 1475
 ; RV32I-WITHFP-NEXT:    sw a0, 8(sp)
-; RV32I-WITHFP-NEXT:    addi a0, zero, 15
+; RV32I-WITHFP-NEXT:    li a0, 15
 ; RV32I-WITHFP-NEXT:    sw a0, 0(sp)
 ; RV32I-WITHFP-NEXT:    lui a0, 262153
 ; RV32I-WITHFP-NEXT:    addi a0, a0, 491
@@ -218,13 +218,13 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-WITHFP-NEXT:    addi t0, a0, 1311
 ; RV32I-WITHFP-NEXT:    lui a0, 688509
 ; RV32I-WITHFP-NEXT:    addi a5, a0, -2048
-; RV32I-WITHFP-NEXT:    addi a0, zero, 1
-; RV32I-WITHFP-NEXT:    addi a1, zero, 11
+; RV32I-WITHFP-NEXT:    li a0, 1
+; RV32I-WITHFP-NEXT:    li a1, 11
 ; RV32I-WITHFP-NEXT:    addi a2, s0, -32
-; RV32I-WITHFP-NEXT:    addi a3, zero, 12
-; RV32I-WITHFP-NEXT:    addi a4, zero, 13
-; RV32I-WITHFP-NEXT:    addi a6, zero, 4
-; RV32I-WITHFP-NEXT:    addi a7, zero, 14
+; RV32I-WITHFP-NEXT:    li a3, 12
+; RV32I-WITHFP-NEXT:    li a4, 13
+; RV32I-WITHFP-NEXT:    li a6, 4
+; RV32I-WITHFP-NEXT:    li a7, 14
 ; RV32I-WITHFP-NEXT:    sw t0, -32(s0)
 ; RV32I-WITHFP-NEXT:    call callee_aligned_stack at plt
 ; RV32I-WITHFP-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
@@ -242,7 +242,7 @@ define double @callee_small_scalar_ret() nounwind {
 ; RV32I-FPELIM-LABEL: callee_small_scalar_ret:
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    lui a1, 261888
-; RV32I-FPELIM-NEXT:    mv a0, zero
+; RV32I-FPELIM-NEXT:    li a0, 0
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: callee_small_scalar_ret:
@@ -252,7 +252,7 @@ define double @callee_small_scalar_ret() nounwind {
 ; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lui a1, 261888
-; RV32I-WITHFP-NEXT:    mv a0, zero
+; RV32I-WITHFP-NEXT:    li a0, 0
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index 978bc9965de7..7bf1dee701f9 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -51,9 +51,9 @@ define i32 @caller_i64_in_regs() nounwind {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
 ; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT:    addi a0, zero, 1
-; RV32I-FPELIM-NEXT:    addi a1, zero, 2
-; RV32I-FPELIM-NEXT:    mv a2, zero
+; RV32I-FPELIM-NEXT:    li a0, 1
+; RV32I-FPELIM-NEXT:    li a1, 2
+; RV32I-FPELIM-NEXT:    li a2, 0
 ; RV32I-FPELIM-NEXT:    call callee_i64_in_regs at plt
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
@@ -65,9 +65,9 @@ define i32 @caller_i64_in_regs() nounwind {
 ; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
-; RV32I-WITHFP-NEXT:    addi a0, zero, 1
-; RV32I-WITHFP-NEXT:    addi a1, zero, 2
-; RV32I-WITHFP-NEXT:    mv a2, zero
+; RV32I-WITHFP-NEXT:    li a0, 1
+; RV32I-WITHFP-NEXT:    li a1, 2
+; RV32I-WITHFP-NEXT:    li a2, 0
 ; RV32I-WITHFP-NEXT:    call callee_i64_in_regs at plt
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -144,17 +144,17 @@ define i32 @caller_many_scalars() nounwind {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
 ; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT:    addi a0, zero, 8
+; RV32I-FPELIM-NEXT:    li a0, 8
 ; RV32I-FPELIM-NEXT:    sw a0, 4(sp)
-; RV32I-FPELIM-NEXT:    addi a0, zero, 1
-; RV32I-FPELIM-NEXT:    addi a1, zero, 2
-; RV32I-FPELIM-NEXT:    addi a2, zero, 3
-; RV32I-FPELIM-NEXT:    addi a3, zero, 4
-; RV32I-FPELIM-NEXT:    addi a5, zero, 5
-; RV32I-FPELIM-NEXT:    addi a6, zero, 6
-; RV32I-FPELIM-NEXT:    addi a7, zero, 7
+; RV32I-FPELIM-NEXT:    li a0, 1
+; RV32I-FPELIM-NEXT:    li a1, 2
+; RV32I-FPELIM-NEXT:    li a2, 3
+; RV32I-FPELIM-NEXT:    li a3, 4
+; RV32I-FPELIM-NEXT:    li a5, 5
+; RV32I-FPELIM-NEXT:    li a6, 6
+; RV32I-FPELIM-NEXT:    li a7, 7
 ; RV32I-FPELIM-NEXT:    sw zero, 0(sp)
-; RV32I-FPELIM-NEXT:    mv a4, zero
+; RV32I-FPELIM-NEXT:    li a4, 0
 ; RV32I-FPELIM-NEXT:    call callee_many_scalars at plt
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
@@ -166,17 +166,17 @@ define i32 @caller_many_scalars() nounwind {
 ; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
-; RV32I-WITHFP-NEXT:    addi a0, zero, 8
+; RV32I-WITHFP-NEXT:    li a0, 8
 ; RV32I-WITHFP-NEXT:    sw a0, 4(sp)
-; RV32I-WITHFP-NEXT:    addi a0, zero, 1
-; RV32I-WITHFP-NEXT:    addi a1, zero, 2
-; RV32I-WITHFP-NEXT:    addi a2, zero, 3
-; RV32I-WITHFP-NEXT:    addi a3, zero, 4
-; RV32I-WITHFP-NEXT:    addi a5, zero, 5
-; RV32I-WITHFP-NEXT:    addi a6, zero, 6
-; RV32I-WITHFP-NEXT:    addi a7, zero, 7
+; RV32I-WITHFP-NEXT:    li a0, 1
+; RV32I-WITHFP-NEXT:    li a1, 2
+; RV32I-WITHFP-NEXT:    li a2, 3
+; RV32I-WITHFP-NEXT:    li a3, 4
+; RV32I-WITHFP-NEXT:    li a5, 5
+; RV32I-WITHFP-NEXT:    li a6, 6
+; RV32I-WITHFP-NEXT:    li a7, 7
 ; RV32I-WITHFP-NEXT:    sw zero, 0(sp)
-; RV32I-WITHFP-NEXT:    mv a4, zero
+; RV32I-WITHFP-NEXT:    li a4, 0
 ; RV32I-WITHFP-NEXT:    call callee_many_scalars at plt
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -255,7 +255,7 @@ define i32 @caller_large_scalars() nounwind {
 ; RV32I-FPELIM-NEXT:    sw zero, 36(sp)
 ; RV32I-FPELIM-NEXT:    sw zero, 32(sp)
 ; RV32I-FPELIM-NEXT:    sw zero, 28(sp)
-; RV32I-FPELIM-NEXT:    addi a2, zero, 1
+; RV32I-FPELIM-NEXT:    li a2, 1
 ; RV32I-FPELIM-NEXT:    addi a0, sp, 24
 ; RV32I-FPELIM-NEXT:    mv a1, sp
 ; RV32I-FPELIM-NEXT:    sw a2, 24(sp)
@@ -278,7 +278,7 @@ define i32 @caller_large_scalars() nounwind {
 ; RV32I-WITHFP-NEXT:    sw zero, -12(s0)
 ; RV32I-WITHFP-NEXT:    sw zero, -16(s0)
 ; RV32I-WITHFP-NEXT:    sw zero, -20(s0)
-; RV32I-WITHFP-NEXT:    addi a2, zero, 1
+; RV32I-WITHFP-NEXT:    li a2, 1
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -24
 ; RV32I-WITHFP-NEXT:    addi a1, s0, -48
 ; RV32I-WITHFP-NEXT:    sw a2, -24(s0)
@@ -357,7 +357,7 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind {
 ; RV32I-FPELIM-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    addi a0, sp, 16
 ; RV32I-FPELIM-NEXT:    sw a0, 4(sp)
-; RV32I-FPELIM-NEXT:    addi a0, zero, 9
+; RV32I-FPELIM-NEXT:    li a0, 9
 ; RV32I-FPELIM-NEXT:    sw a0, 0(sp)
 ; RV32I-FPELIM-NEXT:    lui a0, 524272
 ; RV32I-FPELIM-NEXT:    sw a0, 28(sp)
@@ -367,14 +367,14 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind {
 ; RV32I-FPELIM-NEXT:    sw zero, 52(sp)
 ; RV32I-FPELIM-NEXT:    sw zero, 48(sp)
 ; RV32I-FPELIM-NEXT:    sw zero, 44(sp)
-; RV32I-FPELIM-NEXT:    addi t0, zero, 8
-; RV32I-FPELIM-NEXT:    addi a0, zero, 1
-; RV32I-FPELIM-NEXT:    addi a1, zero, 2
-; RV32I-FPELIM-NEXT:    addi a2, zero, 3
-; RV32I-FPELIM-NEXT:    addi a3, zero, 4
-; RV32I-FPELIM-NEXT:    addi a4, zero, 5
-; RV32I-FPELIM-NEXT:    addi a5, zero, 6
-; RV32I-FPELIM-NEXT:    addi a6, zero, 7
+; RV32I-FPELIM-NEXT:    li t0, 8
+; RV32I-FPELIM-NEXT:    li a0, 1
+; RV32I-FPELIM-NEXT:    li a1, 2
+; RV32I-FPELIM-NEXT:    li a2, 3
+; RV32I-FPELIM-NEXT:    li a3, 4
+; RV32I-FPELIM-NEXT:    li a4, 5
+; RV32I-FPELIM-NEXT:    li a5, 6
+; RV32I-FPELIM-NEXT:    li a6, 7
 ; RV32I-FPELIM-NEXT:    addi a7, sp, 40
 ; RV32I-FPELIM-NEXT:    sw t0, 40(sp)
 ; RV32I-FPELIM-NEXT:    call callee_large_scalars_exhausted_regs at plt
@@ -390,7 +390,7 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind {
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 64
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -48
 ; RV32I-WITHFP-NEXT:    sw a0, 4(sp)
-; RV32I-WITHFP-NEXT:    addi a0, zero, 9
+; RV32I-WITHFP-NEXT:    li a0, 9
 ; RV32I-WITHFP-NEXT:    sw a0, 0(sp)
 ; RV32I-WITHFP-NEXT:    lui a0, 524272
 ; RV32I-WITHFP-NEXT:    sw a0, -36(s0)
@@ -400,14 +400,14 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind {
 ; RV32I-WITHFP-NEXT:    sw zero, -12(s0)
 ; RV32I-WITHFP-NEXT:    sw zero, -16(s0)
 ; RV32I-WITHFP-NEXT:    sw zero, -20(s0)
-; RV32I-WITHFP-NEXT:    addi t0, zero, 8
-; RV32I-WITHFP-NEXT:    addi a0, zero, 1
-; RV32I-WITHFP-NEXT:    addi a1, zero, 2
-; RV32I-WITHFP-NEXT:    addi a2, zero, 3
-; RV32I-WITHFP-NEXT:    addi a3, zero, 4
-; RV32I-WITHFP-NEXT:    addi a4, zero, 5
-; RV32I-WITHFP-NEXT:    addi a5, zero, 6
-; RV32I-WITHFP-NEXT:    addi a6, zero, 7
+; RV32I-WITHFP-NEXT:    li t0, 8
+; RV32I-WITHFP-NEXT:    li a0, 1
+; RV32I-WITHFP-NEXT:    li a1, 2
+; RV32I-WITHFP-NEXT:    li a2, 3
+; RV32I-WITHFP-NEXT:    li a3, 4
+; RV32I-WITHFP-NEXT:    li a4, 5
+; RV32I-WITHFP-NEXT:    li a5, 6
+; RV32I-WITHFP-NEXT:    li a6, 7
 ; RV32I-WITHFP-NEXT:    addi a7, s0, -24
 ; RV32I-WITHFP-NEXT:    sw t0, -24(s0)
 ; RV32I-WITHFP-NEXT:    call callee_large_scalars_exhausted_regs at plt
@@ -493,8 +493,8 @@ define i32 @caller_small_coerced_struct() nounwind {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
 ; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT:    addi a0, zero, 1
-; RV32I-FPELIM-NEXT:    addi a1, zero, 2
+; RV32I-FPELIM-NEXT:    li a0, 1
+; RV32I-FPELIM-NEXT:    li a1, 2
 ; RV32I-FPELIM-NEXT:    call callee_small_coerced_struct at plt
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
@@ -506,8 +506,8 @@ define i32 @caller_small_coerced_struct() nounwind {
 ; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
-; RV32I-WITHFP-NEXT:    addi a0, zero, 1
-; RV32I-WITHFP-NEXT:    addi a1, zero, 2
+; RV32I-WITHFP-NEXT:    li a0, 1
+; RV32I-WITHFP-NEXT:    li a1, 2
 ; RV32I-WITHFP-NEXT:    call callee_small_coerced_struct at plt
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -555,13 +555,13 @@ define i32 @caller_large_struct() nounwind {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -48
 ; RV32I-FPELIM-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT:    addi a0, zero, 1
+; RV32I-FPELIM-NEXT:    li a0, 1
 ; RV32I-FPELIM-NEXT:    sw a0, 24(sp)
-; RV32I-FPELIM-NEXT:    addi a1, zero, 2
+; RV32I-FPELIM-NEXT:    li a1, 2
 ; RV32I-FPELIM-NEXT:    sw a1, 28(sp)
-; RV32I-FPELIM-NEXT:    addi a2, zero, 3
+; RV32I-FPELIM-NEXT:    li a2, 3
 ; RV32I-FPELIM-NEXT:    sw a2, 32(sp)
-; RV32I-FPELIM-NEXT:    addi a3, zero, 4
+; RV32I-FPELIM-NEXT:    li a3, 4
 ; RV32I-FPELIM-NEXT:    sw a3, 36(sp)
 ; RV32I-FPELIM-NEXT:    sw a0, 8(sp)
 ; RV32I-FPELIM-NEXT:    sw a1, 12(sp)
@@ -579,13 +579,13 @@ define i32 @caller_large_struct() nounwind {
 ; RV32I-WITHFP-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 48
-; RV32I-WITHFP-NEXT:    addi a0, zero, 1
+; RV32I-WITHFP-NEXT:    li a0, 1
 ; RV32I-WITHFP-NEXT:    sw a0, -24(s0)
-; RV32I-WITHFP-NEXT:    addi a1, zero, 2
+; RV32I-WITHFP-NEXT:    li a1, 2
 ; RV32I-WITHFP-NEXT:    sw a1, -20(s0)
-; RV32I-WITHFP-NEXT:    addi a2, zero, 3
+; RV32I-WITHFP-NEXT:    li a2, 3
 ; RV32I-WITHFP-NEXT:    sw a2, -16(s0)
-; RV32I-WITHFP-NEXT:    addi a3, zero, 4
+; RV32I-WITHFP-NEXT:    li a3, 4
 ; RV32I-WITHFP-NEXT:    sw a3, -12(s0)
 ; RV32I-WITHFP-NEXT:    sw a0, -40(s0)
 ; RV32I-WITHFP-NEXT:    sw a1, -36(s0)
@@ -669,16 +669,16 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -64
 ; RV32I-FPELIM-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT:    addi a0, zero, 19
+; RV32I-FPELIM-NEXT:    li a0, 19
 ; RV32I-FPELIM-NEXT:    sw a0, 24(sp)
-; RV32I-FPELIM-NEXT:    addi a0, zero, 18
+; RV32I-FPELIM-NEXT:    li a0, 18
 ; RV32I-FPELIM-NEXT:    sw a0, 20(sp)
-; RV32I-FPELIM-NEXT:    addi a0, zero, 17
+; RV32I-FPELIM-NEXT:    li a0, 17
 ; RV32I-FPELIM-NEXT:    sw a0, 16(sp)
 ; RV32I-FPELIM-NEXT:    sw zero, 12(sp)
-; RV32I-FPELIM-NEXT:    addi a0, zero, 16
+; RV32I-FPELIM-NEXT:    li a0, 16
 ; RV32I-FPELIM-NEXT:    sw a0, 8(sp)
-; RV32I-FPELIM-NEXT:    addi a0, zero, 15
+; RV32I-FPELIM-NEXT:    li a0, 15
 ; RV32I-FPELIM-NEXT:    sw a0, 0(sp)
 ; RV32I-FPELIM-NEXT:    lui a0, 262153
 ; RV32I-FPELIM-NEXT:    addi a0, a0, 491
@@ -693,13 +693,13 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-FPELIM-NEXT:    addi t0, a0, 1311
 ; RV32I-FPELIM-NEXT:    lui a0, 688509
 ; RV32I-FPELIM-NEXT:    addi a5, a0, -2048
-; RV32I-FPELIM-NEXT:    addi a0, zero, 1
-; RV32I-FPELIM-NEXT:    addi a1, zero, 11
+; RV32I-FPELIM-NEXT:    li a0, 1
+; RV32I-FPELIM-NEXT:    li a1, 11
 ; RV32I-FPELIM-NEXT:    addi a2, sp, 32
-; RV32I-FPELIM-NEXT:    addi a3, zero, 12
-; RV32I-FPELIM-NEXT:    addi a4, zero, 13
-; RV32I-FPELIM-NEXT:    addi a6, zero, 4
-; RV32I-FPELIM-NEXT:    addi a7, zero, 14
+; RV32I-FPELIM-NEXT:    li a3, 12
+; RV32I-FPELIM-NEXT:    li a4, 13
+; RV32I-FPELIM-NEXT:    li a6, 4
+; RV32I-FPELIM-NEXT:    li a7, 14
 ; RV32I-FPELIM-NEXT:    sw t0, 32(sp)
 ; RV32I-FPELIM-NEXT:    call callee_aligned_stack at plt
 ; RV32I-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
@@ -712,16 +712,16 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-WITHFP-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 64
-; RV32I-WITHFP-NEXT:    addi a0, zero, 19
+; RV32I-WITHFP-NEXT:    li a0, 19
 ; RV32I-WITHFP-NEXT:    sw a0, 24(sp)
-; RV32I-WITHFP-NEXT:    addi a0, zero, 18
+; RV32I-WITHFP-NEXT:    li a0, 18
 ; RV32I-WITHFP-NEXT:    sw a0, 20(sp)
-; RV32I-WITHFP-NEXT:    addi a0, zero, 17
+; RV32I-WITHFP-NEXT:    li a0, 17
 ; RV32I-WITHFP-NEXT:    sw a0, 16(sp)
 ; RV32I-WITHFP-NEXT:    sw zero, 12(sp)
-; RV32I-WITHFP-NEXT:    addi a0, zero, 16
+; RV32I-WITHFP-NEXT:    li a0, 16
 ; RV32I-WITHFP-NEXT:    sw a0, 8(sp)
-; RV32I-WITHFP-NEXT:    addi a0, zero, 15
+; RV32I-WITHFP-NEXT:    li a0, 15
 ; RV32I-WITHFP-NEXT:    sw a0, 0(sp)
 ; RV32I-WITHFP-NEXT:    lui a0, 262153
 ; RV32I-WITHFP-NEXT:    addi a0, a0, 491
@@ -736,13 +736,13 @@ define void @caller_aligned_stack() nounwind {
 ; RV32I-WITHFP-NEXT:    addi t0, a0, 1311
 ; RV32I-WITHFP-NEXT:    lui a0, 688509
 ; RV32I-WITHFP-NEXT:    addi a5, a0, -2048
-; RV32I-WITHFP-NEXT:    addi a0, zero, 1
-; RV32I-WITHFP-NEXT:    addi a1, zero, 11
+; RV32I-WITHFP-NEXT:    li a0, 1
+; RV32I-WITHFP-NEXT:    li a1, 11
 ; RV32I-WITHFP-NEXT:    addi a2, s0, -32
-; RV32I-WITHFP-NEXT:    addi a3, zero, 12
-; RV32I-WITHFP-NEXT:    addi a4, zero, 13
-; RV32I-WITHFP-NEXT:    addi a6, zero, 4
-; RV32I-WITHFP-NEXT:    addi a7, zero, 14
+; RV32I-WITHFP-NEXT:    li a3, 12
+; RV32I-WITHFP-NEXT:    li a4, 13
+; RV32I-WITHFP-NEXT:    li a6, 4
+; RV32I-WITHFP-NEXT:    li a7, 14
 ; RV32I-WITHFP-NEXT:    sw t0, -32(s0)
 ; RV32I-WITHFP-NEXT:    call callee_aligned_stack at plt
 ; RV32I-WITHFP-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
@@ -763,7 +763,7 @@ define i64 @callee_small_scalar_ret() nounwind {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    lui a0, 466866
 ; RV32I-FPELIM-NEXT:    addi a0, a0, 1677
-; RV32I-FPELIM-NEXT:    addi a1, zero, 287
+; RV32I-FPELIM-NEXT:    li a1, 287
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: callee_small_scalar_ret:
@@ -774,7 +774,7 @@ define i64 @callee_small_scalar_ret() nounwind {
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lui a0, 466866
 ; RV32I-WITHFP-NEXT:    addi a0, a0, 1677
-; RV32I-WITHFP-NEXT:    addi a1, zero, 287
+; RV32I-WITHFP-NEXT:    li a1, 287
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
@@ -830,8 +830,8 @@ define i32 @caller_small_scalar_ret() nounwind {
 define %struct.small @callee_small_struct_ret() nounwind {
 ; RV32I-FPELIM-LABEL: callee_small_struct_ret:
 ; RV32I-FPELIM:       # %bb.0:
-; RV32I-FPELIM-NEXT:    addi a0, zero, 1
-; RV32I-FPELIM-NEXT:    mv a1, zero
+; RV32I-FPELIM-NEXT:    li a0, 1
+; RV32I-FPELIM-NEXT:    li a1, 0
 ; RV32I-FPELIM-NEXT:    ret
 ;
 ; RV32I-WITHFP-LABEL: callee_small_struct_ret:
@@ -840,8 +840,8 @@ define %struct.small @callee_small_struct_ret() nounwind {
 ; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
-; RV32I-WITHFP-NEXT:    addi a0, zero, 1
-; RV32I-WITHFP-NEXT:    mv a1, zero
+; RV32I-WITHFP-NEXT:    li a0, 1
+; RV32I-WITHFP-NEXT:    li a1, 0
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 16
@@ -942,13 +942,13 @@ define void @caller_large_scalar_ret() nounwind {
 define void @callee_large_struct_ret(%struct.large* noalias sret(%struct.large) %agg.result) nounwind {
 ; RV32I-FPELIM-LABEL: callee_large_struct_ret:
 ; RV32I-FPELIM:       # %bb.0:
-; RV32I-FPELIM-NEXT:    addi a1, zero, 1
+; RV32I-FPELIM-NEXT:    li a1, 1
 ; RV32I-FPELIM-NEXT:    sw a1, 0(a0)
-; RV32I-FPELIM-NEXT:    addi a1, zero, 2
+; RV32I-FPELIM-NEXT:    li a1, 2
 ; RV32I-FPELIM-NEXT:    sw a1, 4(a0)
-; RV32I-FPELIM-NEXT:    addi a1, zero, 3
+; RV32I-FPELIM-NEXT:    li a1, 3
 ; RV32I-FPELIM-NEXT:    sw a1, 8(a0)
-; RV32I-FPELIM-NEXT:    addi a1, zero, 4
+; RV32I-FPELIM-NEXT:    li a1, 4
 ; RV32I-FPELIM-NEXT:    sw a1, 12(a0)
 ; RV32I-FPELIM-NEXT:    ret
 ;
@@ -958,13 +958,13 @@ define void @callee_large_struct_ret(%struct.large* noalias sret(%struct.large)
 ; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
-; RV32I-WITHFP-NEXT:    addi a1, zero, 1
+; RV32I-WITHFP-NEXT:    li a1, 1
 ; RV32I-WITHFP-NEXT:    sw a1, 0(a0)
-; RV32I-WITHFP-NEXT:    addi a1, zero, 2
+; RV32I-WITHFP-NEXT:    li a1, 2
 ; RV32I-WITHFP-NEXT:    sw a1, 4(a0)
-; RV32I-WITHFP-NEXT:    addi a1, zero, 3
+; RV32I-WITHFP-NEXT:    li a1, 3
 ; RV32I-WITHFP-NEXT:    sw a1, 8(a0)
-; RV32I-WITHFP-NEXT:    addi a1, zero, 4
+; RV32I-WITHFP-NEXT:    li a1, 4
 ; RV32I-WITHFP-NEXT:    sw a1, 12(a0)
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
index 67d3bde9444b..8279f4d8abc7 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
@@ -53,7 +53,7 @@ define i32 @caller_float_in_regs() nounwind {
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
 ; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT:    addi a0, zero, 1
+; RV32I-FPELIM-NEXT:    li a0, 1
 ; RV32I-FPELIM-NEXT:    lui a1, 262144
 ; RV32I-FPELIM-NEXT:    call callee_float_in_regs at plt
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -66,7 +66,7 @@ define i32 @caller_float_in_regs() nounwind {
 ; RV32I-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
-; RV32I-WITHFP-NEXT:    addi a0, zero, 1
+; RV32I-WITHFP-NEXT:    li a0, 1
 ; RV32I-WITHFP-NEXT:    lui a1, 262144
 ; RV32I-WITHFP-NEXT:    call callee_float_in_regs at plt
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -108,15 +108,15 @@ define i32 @caller_float_on_stack() nounwind {
 ; RV32I-FPELIM-NEXT:    addi sp, sp, -16
 ; RV32I-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-FPELIM-NEXT:    lui a1, 264704
-; RV32I-FPELIM-NEXT:    addi a0, zero, 1
-; RV32I-FPELIM-NEXT:    addi a2, zero, 2
-; RV32I-FPELIM-NEXT:    addi a4, zero, 3
-; RV32I-FPELIM-NEXT:    addi a6, zero, 4
+; RV32I-FPELIM-NEXT:    li a0, 1
+; RV32I-FPELIM-NEXT:    li a2, 2
+; RV32I-FPELIM-NEXT:    li a4, 3
+; RV32I-FPELIM-NEXT:    li a6, 4
 ; RV32I-FPELIM-NEXT:    sw a1, 0(sp)
-; RV32I-FPELIM-NEXT:    mv a1, zero
-; RV32I-FPELIM-NEXT:    mv a3, zero
-; RV32I-FPELIM-NEXT:    mv a5, zero
-; RV32I-FPELIM-NEXT:    mv a7, zero
+; RV32I-FPELIM-NEXT:    li a1, 0
+; RV32I-FPELIM-NEXT:    li a3, 0
+; RV32I-FPELIM-NEXT:    li a5, 0
+; RV32I-FPELIM-NEXT:    li a7, 0
 ; RV32I-FPELIM-NEXT:    call callee_float_on_stack at plt
 ; RV32I-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 16
@@ -129,15 +129,15 @@ define i32 @caller_float_on_stack() nounwind {
 ; RV32I-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lui a1, 264704
-; RV32I-WITHFP-NEXT:    addi a0, zero, 1
-; RV32I-WITHFP-NEXT:    addi a2, zero, 2
-; RV32I-WITHFP-NEXT:    addi a4, zero, 3
-; RV32I-WITHFP-NEXT:    addi a6, zero, 4
+; RV32I-WITHFP-NEXT:    li a0, 1
+; RV32I-WITHFP-NEXT:    li a2, 2
+; RV32I-WITHFP-NEXT:    li a4, 3
+; RV32I-WITHFP-NEXT:    li a6, 4
 ; RV32I-WITHFP-NEXT:    sw a1, 0(sp)
-; RV32I-WITHFP-NEXT:    mv a1, zero
-; RV32I-WITHFP-NEXT:    mv a3, zero
-; RV32I-WITHFP-NEXT:    mv a5, zero
-; RV32I-WITHFP-NEXT:    mv a7, zero
+; RV32I-WITHFP-NEXT:    li a1, 0
+; RV32I-WITHFP-NEXT:    li a3, 0
+; RV32I-WITHFP-NEXT:    li a5, 0
+; RV32I-WITHFP-NEXT:    li a7, 0
 ; RV32I-WITHFP-NEXT:    call callee_float_on_stack at plt
 ; RV32I-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
index ee08a851a70b..3a4e4a94cc7f 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
@@ -24,7 +24,7 @@ define i32 @caller_double_in_fpr() nounwind {
 ; RV32-ILP32D-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32D-NEXT:    lui a0, %hi(.LCPI1_0)
 ; RV32-ILP32D-NEXT:    fld fa0, %lo(.LCPI1_0)(a0)
-; RV32-ILP32D-NEXT:    addi a0, zero, 1
+; RV32-ILP32D-NEXT:    li a0, 1
 ; RV32-ILP32D-NEXT:    call callee_double_in_fpr at plt
 ; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
@@ -51,18 +51,18 @@ define i32 @caller_double_in_fpr_exhausted_gprs() nounwind {
 ; RV32-ILP32D:       # %bb.0:
 ; RV32-ILP32D-NEXT:    addi sp, sp, -16
 ; RV32-ILP32D-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32D-NEXT:    addi a1, zero, 5
+; RV32-ILP32D-NEXT:    li a1, 5
 ; RV32-ILP32D-NEXT:    lui a0, %hi(.LCPI3_0)
 ; RV32-ILP32D-NEXT:    fld fa0, %lo(.LCPI3_0)(a0)
-; RV32-ILP32D-NEXT:    addi a0, zero, 1
-; RV32-ILP32D-NEXT:    addi a2, zero, 2
-; RV32-ILP32D-NEXT:    addi a4, zero, 3
-; RV32-ILP32D-NEXT:    addi a6, zero, 4
+; RV32-ILP32D-NEXT:    li a0, 1
+; RV32-ILP32D-NEXT:    li a2, 2
+; RV32-ILP32D-NEXT:    li a4, 3
+; RV32-ILP32D-NEXT:    li a6, 4
 ; RV32-ILP32D-NEXT:    sw a1, 0(sp)
-; RV32-ILP32D-NEXT:    mv a1, zero
-; RV32-ILP32D-NEXT:    mv a3, zero
-; RV32-ILP32D-NEXT:    mv a5, zero
-; RV32-ILP32D-NEXT:    mv a7, zero
+; RV32-ILP32D-NEXT:    li a1, 0
+; RV32-ILP32D-NEXT:    li a3, 0
+; RV32-ILP32D-NEXT:    li a5, 0
+; RV32-ILP32D-NEXT:    li a7, 0
 ; RV32-ILP32D-NEXT:    call callee_double_in_fpr_exhausted_gprs at plt
 ; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
@@ -113,7 +113,7 @@ define i32 @caller_double_in_gpr_exhausted_fprs() nounwind {
 ; RV32-ILP32D-NEXT:    lui a0, %hi(.LCPI5_7)
 ; RV32-ILP32D-NEXT:    fld fa7, %lo(.LCPI5_7)(a0)
 ; RV32-ILP32D-NEXT:    lui a1, 262688
-; RV32-ILP32D-NEXT:    mv a0, zero
+; RV32-ILP32D-NEXT:    li a0, 0
 ; RV32-ILP32D-NEXT:    call callee_double_in_gpr_exhausted_fprs at plt
 ; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
@@ -164,15 +164,15 @@ define i32 @caller_double_in_gpr_and_stack_almost_exhausted_gprs_fprs() nounwind
 ; RV32-ILP32D-NEXT:    fld fa6, %lo(.LCPI7_6)(a0)
 ; RV32-ILP32D-NEXT:    lui a0, %hi(.LCPI7_7)
 ; RV32-ILP32D-NEXT:    fld fa7, %lo(.LCPI7_7)(a0)
-; RV32-ILP32D-NEXT:    addi a0, zero, 1
-; RV32-ILP32D-NEXT:    addi a2, zero, 3
-; RV32-ILP32D-NEXT:    addi a4, zero, 5
-; RV32-ILP32D-NEXT:    addi a6, zero, 7
+; RV32-ILP32D-NEXT:    li a0, 1
+; RV32-ILP32D-NEXT:    li a2, 3
+; RV32-ILP32D-NEXT:    li a4, 5
+; RV32-ILP32D-NEXT:    li a6, 7
 ; RV32-ILP32D-NEXT:    sw a1, 0(sp)
-; RV32-ILP32D-NEXT:    mv a1, zero
-; RV32-ILP32D-NEXT:    mv a3, zero
-; RV32-ILP32D-NEXT:    mv a5, zero
-; RV32-ILP32D-NEXT:    mv a7, zero
+; RV32-ILP32D-NEXT:    li a1, 0
+; RV32-ILP32D-NEXT:    li a3, 0
+; RV32-ILP32D-NEXT:    li a5, 0
+; RV32-ILP32D-NEXT:    li a7, 0
 ; RV32-ILP32D-NEXT:    call callee_double_in_gpr_and_stack_almost_exhausted_gprs_fprs at plt
 ; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16
@@ -221,15 +221,15 @@ define i32 @caller_double_on_stack_exhausted_gprs_fprs() nounwind {
 ; RV32-ILP32D-NEXT:    fld fa6, %lo(.LCPI9_6)(a0)
 ; RV32-ILP32D-NEXT:    lui a0, %hi(.LCPI9_7)
 ; RV32-ILP32D-NEXT:    fld fa7, %lo(.LCPI9_7)(a0)
-; RV32-ILP32D-NEXT:    addi a0, zero, 1
-; RV32-ILP32D-NEXT:    addi a2, zero, 3
-; RV32-ILP32D-NEXT:    addi a4, zero, 5
-; RV32-ILP32D-NEXT:    addi a6, zero, 7
+; RV32-ILP32D-NEXT:    li a0, 1
+; RV32-ILP32D-NEXT:    li a2, 3
+; RV32-ILP32D-NEXT:    li a4, 5
+; RV32-ILP32D-NEXT:    li a6, 7
 ; RV32-ILP32D-NEXT:    sw zero, 0(sp)
-; RV32-ILP32D-NEXT:    mv a1, zero
-; RV32-ILP32D-NEXT:    mv a3, zero
-; RV32-ILP32D-NEXT:    mv a5, zero
-; RV32-ILP32D-NEXT:    mv a7, zero
+; RV32-ILP32D-NEXT:    li a1, 0
+; RV32-ILP32D-NEXT:    li a3, 0
+; RV32-ILP32D-NEXT:    li a5, 0
+; RV32-ILP32D-NEXT:    li a7, 0
 ; RV32-ILP32D-NEXT:    call callee_double_on_stack_exhausted_gprs_fprs at plt
 ; RV32-ILP32D-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32D-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
index 2ceda820780e..8d0ffa7547e4 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
@@ -27,7 +27,7 @@ define i32 @caller_float_in_fpr() nounwind {
 ; RV32-ILP32FD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-ILP32FD-NEXT:    lui a0, %hi(.LCPI1_0)
 ; RV32-ILP32FD-NEXT:    flw fa0, %lo(.LCPI1_0)(a0)
-; RV32-ILP32FD-NEXT:    addi a0, zero, 1
+; RV32-ILP32FD-NEXT:    li a0, 1
 ; RV32-ILP32FD-NEXT:    call callee_float_in_fpr at plt
 ; RV32-ILP32FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32FD-NEXT:    addi sp, sp, 16
@@ -54,18 +54,18 @@ define i32 @caller_float_in_fpr_exhausted_gprs() nounwind {
 ; RV32-ILP32FD:       # %bb.0:
 ; RV32-ILP32FD-NEXT:    addi sp, sp, -16
 ; RV32-ILP32FD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32FD-NEXT:    addi a1, zero, 5
+; RV32-ILP32FD-NEXT:    li a1, 5
 ; RV32-ILP32FD-NEXT:    lui a0, %hi(.LCPI3_0)
 ; RV32-ILP32FD-NEXT:    flw fa0, %lo(.LCPI3_0)(a0)
-; RV32-ILP32FD-NEXT:    addi a0, zero, 1
-; RV32-ILP32FD-NEXT:    addi a2, zero, 2
-; RV32-ILP32FD-NEXT:    addi a4, zero, 3
-; RV32-ILP32FD-NEXT:    addi a6, zero, 4
+; RV32-ILP32FD-NEXT:    li a0, 1
+; RV32-ILP32FD-NEXT:    li a2, 2
+; RV32-ILP32FD-NEXT:    li a4, 3
+; RV32-ILP32FD-NEXT:    li a6, 4
 ; RV32-ILP32FD-NEXT:    sw a1, 0(sp)
-; RV32-ILP32FD-NEXT:    mv a1, zero
-; RV32-ILP32FD-NEXT:    mv a3, zero
-; RV32-ILP32FD-NEXT:    mv a5, zero
-; RV32-ILP32FD-NEXT:    mv a7, zero
+; RV32-ILP32FD-NEXT:    li a1, 0
+; RV32-ILP32FD-NEXT:    li a3, 0
+; RV32-ILP32FD-NEXT:    li a5, 0
+; RV32-ILP32FD-NEXT:    li a7, 0
 ; RV32-ILP32FD-NEXT:    call callee_float_in_fpr_exhausted_gprs at plt
 ; RV32-ILP32FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32FD-NEXT:    addi sp, sp, 16
@@ -158,15 +158,15 @@ define i32 @caller_float_on_stack_exhausted_gprs_fprs() nounwind {
 ; RV32-ILP32FD-NEXT:    flw fa6, %lo(.LCPI7_6)(a0)
 ; RV32-ILP32FD-NEXT:    lui a0, %hi(.LCPI7_7)
 ; RV32-ILP32FD-NEXT:    flw fa7, %lo(.LCPI7_7)(a0)
-; RV32-ILP32FD-NEXT:    addi a0, zero, 1
-; RV32-ILP32FD-NEXT:    addi a2, zero, 3
-; RV32-ILP32FD-NEXT:    addi a4, zero, 5
-; RV32-ILP32FD-NEXT:    addi a6, zero, 7
+; RV32-ILP32FD-NEXT:    li a0, 1
+; RV32-ILP32FD-NEXT:    li a2, 3
+; RV32-ILP32FD-NEXT:    li a4, 5
+; RV32-ILP32FD-NEXT:    li a6, 7
 ; RV32-ILP32FD-NEXT:    sw a1, 0(sp)
-; RV32-ILP32FD-NEXT:    mv a1, zero
-; RV32-ILP32FD-NEXT:    mv a3, zero
-; RV32-ILP32FD-NEXT:    mv a5, zero
-; RV32-ILP32FD-NEXT:    mv a7, zero
+; RV32-ILP32FD-NEXT:    li a1, 0
+; RV32-ILP32FD-NEXT:    li a3, 0
+; RV32-ILP32FD-NEXT:    li a5, 0
+; RV32-ILP32FD-NEXT:    li a7, 0
 ; RV32-ILP32FD-NEXT:    call callee_float_on_stack_exhausted_gprs_fprs at plt
 ; RV32-ILP32FD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-ILP32FD-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
index 0dbffeb95a3c..05f9cb41eaa0 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
@@ -33,9 +33,9 @@ define i64 @caller_double_in_regs() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    slli a1, a0, 62
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    call callee_double_in_regs at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -47,7 +47,7 @@ define i64 @caller_double_in_regs() nounwind {
 define double @callee_double_ret() nounwind {
 ; RV64I-LABEL: callee_double_ret:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1023
+; RV64I-NEXT:    li a0, 1023
 ; RV64I-NEXT:    slli a0, a0, 52
 ; RV64I-NEXT:    ret
   ret double 1.0

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
index 84dfed0d57e0..dcd56ac6c574 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -32,9 +32,9 @@ define i64 @caller_i128_in_regs() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a0, zero, 1
-; RV64I-NEXT:    addi a1, zero, 2
-; RV64I-NEXT:    mv a2, zero
+; RV64I-NEXT:    li a0, 1
+; RV64I-NEXT:    li a1, 2
+; RV64I-NEXT:    li a2, 0
 ; RV64I-NEXT:    call callee_i128_in_regs at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -83,17 +83,17 @@ define i32 @caller_many_scalars() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -32
 ; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a0, zero, 8
+; RV64I-NEXT:    li a0, 8
 ; RV64I-NEXT:    sd a0, 8(sp)
-; RV64I-NEXT:    addi a0, zero, 1
-; RV64I-NEXT:    addi a1, zero, 2
-; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a5, zero, 5
-; RV64I-NEXT:    addi a6, zero, 6
-; RV64I-NEXT:    addi a7, zero, 7
+; RV64I-NEXT:    li a0, 1
+; RV64I-NEXT:    li a1, 2
+; RV64I-NEXT:    li a2, 3
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a5, 5
+; RV64I-NEXT:    li a6, 6
+; RV64I-NEXT:    li a7, 7
 ; RV64I-NEXT:    sd zero, 0(sp)
-; RV64I-NEXT:    mv a4, zero
+; RV64I-NEXT:    li a4, 0
 ; RV64I-NEXT:    call callee_many_scalars at plt
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 32
@@ -137,12 +137,12 @@ define i64 @caller_large_scalars() nounwind {
 ; RV64I-NEXT:    sd zero, 24(sp)
 ; RV64I-NEXT:    sd zero, 16(sp)
 ; RV64I-NEXT:    sd zero, 8(sp)
-; RV64I-NEXT:    addi a0, zero, 2
+; RV64I-NEXT:    li a0, 2
 ; RV64I-NEXT:    sd a0, 0(sp)
 ; RV64I-NEXT:    sd zero, 56(sp)
 ; RV64I-NEXT:    sd zero, 48(sp)
 ; RV64I-NEXT:    sd zero, 40(sp)
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    addi a0, sp, 32
 ; RV64I-NEXT:    mv a1, sp
 ; RV64I-NEXT:    sd a2, 32(sp)
@@ -191,24 +191,24 @@ define i64 @caller_large_scalars_exhausted_regs() nounwind {
 ; RV64I-NEXT:    sd ra, 88(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addi a0, sp, 16
 ; RV64I-NEXT:    sd a0, 8(sp)
-; RV64I-NEXT:    addi a0, zero, 9
+; RV64I-NEXT:    li a0, 9
 ; RV64I-NEXT:    sd a0, 0(sp)
 ; RV64I-NEXT:    sd zero, 40(sp)
 ; RV64I-NEXT:    sd zero, 32(sp)
 ; RV64I-NEXT:    sd zero, 24(sp)
-; RV64I-NEXT:    addi a0, zero, 10
+; RV64I-NEXT:    li a0, 10
 ; RV64I-NEXT:    sd a0, 16(sp)
 ; RV64I-NEXT:    sd zero, 72(sp)
 ; RV64I-NEXT:    sd zero, 64(sp)
 ; RV64I-NEXT:    sd zero, 56(sp)
-; RV64I-NEXT:    addi t0, zero, 8
-; RV64I-NEXT:    addi a0, zero, 1
-; RV64I-NEXT:    addi a1, zero, 2
-; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 5
-; RV64I-NEXT:    addi a5, zero, 6
-; RV64I-NEXT:    addi a6, zero, 7
+; RV64I-NEXT:    li t0, 8
+; RV64I-NEXT:    li a0, 1
+; RV64I-NEXT:    li a1, 2
+; RV64I-NEXT:    li a2, 3
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 5
+; RV64I-NEXT:    li a5, 6
+; RV64I-NEXT:    li a6, 7
 ; RV64I-NEXT:    addi a7, sp, 48
 ; RV64I-NEXT:    sd t0, 48(sp)
 ; RV64I-NEXT:    call callee_large_scalars_exhausted_regs at plt
@@ -260,8 +260,8 @@ define i64 @caller_small_coerced_struct() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a0, zero, 1
-; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    li a0, 1
+; RV64I-NEXT:    li a1, 2
 ; RV64I-NEXT:    call callee_small_coerced_struct at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -294,13 +294,13 @@ define i64 @caller_large_struct() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -80
 ; RV64I-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    sd a0, 40(sp)
-; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    li a1, 2
 ; RV64I-NEXT:    sd a1, 48(sp)
-; RV64I-NEXT:    addi a2, zero, 3
+; RV64I-NEXT:    li a2, 3
 ; RV64I-NEXT:    sd a2, 56(sp)
-; RV64I-NEXT:    addi a3, zero, 4
+; RV64I-NEXT:    li a3, 4
 ; RV64I-NEXT:    sd a3, 64(sp)
 ; RV64I-NEXT:    sd a0, 8(sp)
 ; RV64I-NEXT:    sd a1, 16(sp)
@@ -360,25 +360,25 @@ define void @caller_aligned_stack() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -64
 ; RV64I-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a0, zero, 12
+; RV64I-NEXT:    li a0, 12
 ; RV64I-NEXT:    sd a0, 48(sp)
-; RV64I-NEXT:    addi a0, zero, 11
+; RV64I-NEXT:    li a0, 11
 ; RV64I-NEXT:    sd a0, 40(sp)
-; RV64I-NEXT:    addi a0, zero, 10
+; RV64I-NEXT:    li a0, 10
 ; RV64I-NEXT:    sd a0, 32(sp)
 ; RV64I-NEXT:    sd zero, 24(sp)
-; RV64I-NEXT:    addi a0, zero, 9
+; RV64I-NEXT:    li a0, 9
 ; RV64I-NEXT:    sd a0, 16(sp)
-; RV64I-NEXT:    addi a6, zero, 8
-; RV64I-NEXT:    addi a0, zero, 1
-; RV64I-NEXT:    addi a1, zero, 2
-; RV64I-NEXT:    addi a2, zero, 3
-; RV64I-NEXT:    addi a3, zero, 4
-; RV64I-NEXT:    addi a4, zero, 5
-; RV64I-NEXT:    addi a5, zero, 6
-; RV64I-NEXT:    addi a7, zero, 7
+; RV64I-NEXT:    li a6, 8
+; RV64I-NEXT:    li a0, 1
+; RV64I-NEXT:    li a1, 2
+; RV64I-NEXT:    li a2, 3
+; RV64I-NEXT:    li a3, 4
+; RV64I-NEXT:    li a4, 5
+; RV64I-NEXT:    li a5, 6
+; RV64I-NEXT:    li a7, 7
 ; RV64I-NEXT:    sd a6, 0(sp)
-; RV64I-NEXT:    mv a6, zero
+; RV64I-NEXT:    li a6, 0
 ; RV64I-NEXT:    call callee_aligned_stack at plt
 ; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 64
@@ -393,8 +393,8 @@ define void @caller_aligned_stack() nounwind {
 define i128 @callee_small_scalar_ret() nounwind {
 ; RV64I-LABEL: callee_small_scalar_ret:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, -1
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a0, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    ret
   ret i128 -1
 }
@@ -423,8 +423,8 @@ define i64 @caller_small_scalar_ret() nounwind {
 define %struct.small @callee_small_struct_ret() nounwind {
 ; RV64I-LABEL: callee_small_struct_ret:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a0, 1
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    ret
   ret %struct.small { i64 1, i64* null }
 }
@@ -452,7 +452,7 @@ define i64 @caller_small_struct_ret() nounwind {
 define i256 @callee_large_scalar_ret() nounwind {
 ; RV64I-LABEL: callee_large_scalar_ret:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    sd a1, 24(a0)
 ; RV64I-NEXT:    sd a1, 16(a0)
 ; RV64I-NEXT:    sd a1, 8(a0)
@@ -483,16 +483,16 @@ define void @callee_large_struct_ret(%struct.large* noalias sret(%struct.large)
 ; RV64I-LABEL: callee_large_struct_ret:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sw zero, 4(a0)
-; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    sw a1, 0(a0)
 ; RV64I-NEXT:    sw zero, 12(a0)
-; RV64I-NEXT:    addi a1, zero, 2
+; RV64I-NEXT:    li a1, 2
 ; RV64I-NEXT:    sw a1, 8(a0)
 ; RV64I-NEXT:    sw zero, 20(a0)
-; RV64I-NEXT:    addi a1, zero, 3
+; RV64I-NEXT:    li a1, 3
 ; RV64I-NEXT:    sw a1, 16(a0)
 ; RV64I-NEXT:    sw zero, 28(a0)
-; RV64I-NEXT:    addi a1, zero, 4
+; RV64I-NEXT:    li a1, 4
 ; RV64I-NEXT:    sw a1, 24(a0)
 ; RV64I-NEXT:    ret
   %a = getelementptr inbounds %struct.large, %struct.large* %agg.result, i64 0, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
index ca7d7b9491b2..95c993ee9495 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
@@ -55,7 +55,7 @@ define i64 @caller_float_in_regs() nounwind {
 ; RV64I-FPELIM:       # %bb.0:
 ; RV64I-FPELIM-NEXT:    addi sp, sp, -16
 ; RV64I-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-FPELIM-NEXT:    addi a0, zero, 1
+; RV64I-FPELIM-NEXT:    li a0, 1
 ; RV64I-FPELIM-NEXT:    lui a1, 262144
 ; RV64I-FPELIM-NEXT:    call callee_float_in_regs at plt
 ; RV64I-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -68,7 +68,7 @@ define i64 @caller_float_in_regs() nounwind {
 ; RV64I-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-WITHFP-NEXT:    addi s0, sp, 16
-; RV64I-WITHFP-NEXT:    addi a0, zero, 1
+; RV64I-WITHFP-NEXT:    li a0, 1
 ; RV64I-WITHFP-NEXT:    lui a1, 262144
 ; RV64I-WITHFP-NEXT:    call callee_float_in_regs at plt
 ; RV64I-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -109,15 +109,15 @@ define i64 @caller_float_on_stack() nounwind {
 ; RV64I-FPELIM-NEXT:    addi sp, sp, -16
 ; RV64I-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-FPELIM-NEXT:    lui a1, 264704
-; RV64I-FPELIM-NEXT:    addi a0, zero, 1
-; RV64I-FPELIM-NEXT:    addi a2, zero, 2
-; RV64I-FPELIM-NEXT:    addi a4, zero, 3
-; RV64I-FPELIM-NEXT:    addi a6, zero, 4
+; RV64I-FPELIM-NEXT:    li a0, 1
+; RV64I-FPELIM-NEXT:    li a2, 2
+; RV64I-FPELIM-NEXT:    li a4, 3
+; RV64I-FPELIM-NEXT:    li a6, 4
 ; RV64I-FPELIM-NEXT:    sd a1, 0(sp)
-; RV64I-FPELIM-NEXT:    mv a1, zero
-; RV64I-FPELIM-NEXT:    mv a3, zero
-; RV64I-FPELIM-NEXT:    mv a5, zero
-; RV64I-FPELIM-NEXT:    mv a7, zero
+; RV64I-FPELIM-NEXT:    li a1, 0
+; RV64I-FPELIM-NEXT:    li a3, 0
+; RV64I-FPELIM-NEXT:    li a5, 0
+; RV64I-FPELIM-NEXT:    li a7, 0
 ; RV64I-FPELIM-NEXT:    call callee_float_on_stack at plt
 ; RV64I-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-FPELIM-NEXT:    addi sp, sp, 16
@@ -130,15 +130,15 @@ define i64 @caller_float_on_stack() nounwind {
 ; RV64I-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-WITHFP-NEXT:    addi s0, sp, 32
 ; RV64I-WITHFP-NEXT:    lui a1, 264704
-; RV64I-WITHFP-NEXT:    addi a0, zero, 1
-; RV64I-WITHFP-NEXT:    addi a2, zero, 2
-; RV64I-WITHFP-NEXT:    addi a4, zero, 3
-; RV64I-WITHFP-NEXT:    addi a6, zero, 4
+; RV64I-WITHFP-NEXT:    li a0, 1
+; RV64I-WITHFP-NEXT:    li a2, 2
+; RV64I-WITHFP-NEXT:    li a4, 3
+; RV64I-WITHFP-NEXT:    li a6, 4
 ; RV64I-WITHFP-NEXT:    sd a1, 0(sp)
-; RV64I-WITHFP-NEXT:    mv a1, zero
-; RV64I-WITHFP-NEXT:    mv a3, zero
-; RV64I-WITHFP-NEXT:    mv a5, zero
-; RV64I-WITHFP-NEXT:    mv a7, zero
+; RV64I-WITHFP-NEXT:    li a1, 0
+; RV64I-WITHFP-NEXT:    li a3, 0
+; RV64I-WITHFP-NEXT:    li a5, 0
+; RV64I-WITHFP-NEXT:    li a7, 0
 ; RV64I-WITHFP-NEXT:    call callee_float_on_stack at plt
 ; RV64I-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64I-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
index 59f825477b17..bbd08ae15817 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
@@ -34,15 +34,15 @@ define float @caller_onstack_f32_noop(float %a) nounwind {
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IF-NEXT:    sw a0, 4(sp)
 ; RV32IF-NEXT:    lui a1, 264704
-; RV32IF-NEXT:    addi a0, zero, 1
-; RV32IF-NEXT:    addi a2, zero, 2
-; RV32IF-NEXT:    addi a4, zero, 3
-; RV32IF-NEXT:    addi a6, zero, 4
+; RV32IF-NEXT:    li a0, 1
+; RV32IF-NEXT:    li a2, 2
+; RV32IF-NEXT:    li a4, 3
+; RV32IF-NEXT:    li a6, 4
 ; RV32IF-NEXT:    sw a1, 0(sp)
-; RV32IF-NEXT:    mv a1, zero
-; RV32IF-NEXT:    mv a3, zero
-; RV32IF-NEXT:    mv a5, zero
-; RV32IF-NEXT:    mv a7, zero
+; RV32IF-NEXT:    li a1, 0
+; RV32IF-NEXT:    li a3, 0
+; RV32IF-NEXT:    li a5, 0
+; RV32IF-NEXT:    li a7, 0
 ; RV32IF-NEXT:    call onstack_f32_noop at plt
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
@@ -61,15 +61,15 @@ define float @caller_onstack_f32_fadd(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fadd.s ft2, ft1, ft0
 ; RV32IF-NEXT:    fsub.s ft0, ft0, ft1
 ; RV32IF-NEXT:    fsw ft0, 4(sp)
-; RV32IF-NEXT:    addi a0, zero, 1
-; RV32IF-NEXT:    addi a2, zero, 2
-; RV32IF-NEXT:    addi a4, zero, 3
-; RV32IF-NEXT:    addi a6, zero, 4
+; RV32IF-NEXT:    li a0, 1
+; RV32IF-NEXT:    li a2, 2
+; RV32IF-NEXT:    li a4, 3
+; RV32IF-NEXT:    li a6, 4
 ; RV32IF-NEXT:    fsw ft2, 0(sp)
-; RV32IF-NEXT:    mv a1, zero
-; RV32IF-NEXT:    mv a3, zero
-; RV32IF-NEXT:    mv a5, zero
-; RV32IF-NEXT:    mv a7, zero
+; RV32IF-NEXT:    li a1, 0
+; RV32IF-NEXT:    li a3, 0
+; RV32IF-NEXT:    li a5, 0
+; RV32IF-NEXT:    li a7, 0
 ; RV32IF-NEXT:    call onstack_f32_noop at plt
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll
index 25d41b6bc3c5..a88f5282e080 100644
--- a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll
+++ b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll
@@ -33,7 +33,7 @@ define void @lower_blockaddress() nounwind {
 ; RV32I-SMALL-LABEL: lower_blockaddress:
 ; RV32I-SMALL:       # %bb.0:
 ; RV32I-SMALL-NEXT:    lui a0, %hi(addr)
-; RV32I-SMALL-NEXT:    addi a1, zero, 1
+; RV32I-SMALL-NEXT:    li a1, 1
 ; RV32I-SMALL-NEXT:    sw a1, %lo(addr)(a0)
 ; RV32I-SMALL-NEXT:    ret
 ;
@@ -42,7 +42,7 @@ define void @lower_blockaddress() nounwind {
 ; RV32I-MEDIUM-NEXT:  .LBB1_1: # Label of block must be emitted
 ; RV32I-MEDIUM-NEXT:    auipc a0, %pcrel_hi(addr)
 ; RV32I-MEDIUM-NEXT:    addi a0, a0, %pcrel_lo(.LBB1_1)
-; RV32I-MEDIUM-NEXT:    addi a1, zero, 1
+; RV32I-MEDIUM-NEXT:    li a1, 1
 ; RV32I-MEDIUM-NEXT:    sw a1, 0(a0)
 ; RV32I-MEDIUM-NEXT:    ret
   store volatile i8* blockaddress(@lower_blockaddress, %block), i8** @addr
@@ -60,7 +60,7 @@ define signext i32 @lower_blockaddress_displ(i32 signext %w) nounwind {
 ; RV32I-SMALL-NEXT:    addi sp, sp, -16
 ; RV32I-SMALL-NEXT:    lui a1, %hi(.Ltmp0)
 ; RV32I-SMALL-NEXT:    addi a1, a1, %lo(.Ltmp0)
-; RV32I-SMALL-NEXT:    addi a2, zero, 101
+; RV32I-SMALL-NEXT:    li a2, 101
 ; RV32I-SMALL-NEXT:    sw a1, 8(sp)
 ; RV32I-SMALL-NEXT:    blt a0, a2, .LBB2_3
 ; RV32I-SMALL-NEXT:  # %bb.1: # %if.then
@@ -68,11 +68,11 @@ define signext i32 @lower_blockaddress_displ(i32 signext %w) nounwind {
 ; RV32I-SMALL-NEXT:    jr a0
 ; RV32I-SMALL-NEXT:  .Ltmp0: # Block address taken
 ; RV32I-SMALL-NEXT:  .LBB2_2: # %return
-; RV32I-SMALL-NEXT:    addi a0, zero, 4
+; RV32I-SMALL-NEXT:    li a0, 4
 ; RV32I-SMALL-NEXT:    addi sp, sp, 16
 ; RV32I-SMALL-NEXT:    ret
 ; RV32I-SMALL-NEXT:  .LBB2_3: # %return.clone
-; RV32I-SMALL-NEXT:    addi a0, zero, 3
+; RV32I-SMALL-NEXT:    li a0, 3
 ; RV32I-SMALL-NEXT:    addi sp, sp, 16
 ; RV32I-SMALL-NEXT:    ret
 ;
@@ -83,7 +83,7 @@ define signext i32 @lower_blockaddress_displ(i32 signext %w) nounwind {
 ; RV32I-MEDIUM-NEXT:    # Label of block must be emitted
 ; RV32I-MEDIUM-NEXT:    auipc a1, %pcrel_hi(.Ltmp0)
 ; RV32I-MEDIUM-NEXT:    addi a1, a1, %pcrel_lo(.LBB2_4)
-; RV32I-MEDIUM-NEXT:    addi a2, zero, 101
+; RV32I-MEDIUM-NEXT:    li a2, 101
 ; RV32I-MEDIUM-NEXT:    sw a1, 8(sp)
 ; RV32I-MEDIUM-NEXT:    blt a0, a2, .LBB2_3
 ; RV32I-MEDIUM-NEXT:  # %bb.1: # %if.then
@@ -91,11 +91,11 @@ define signext i32 @lower_blockaddress_displ(i32 signext %w) nounwind {
 ; RV32I-MEDIUM-NEXT:    jr a0
 ; RV32I-MEDIUM-NEXT:  .Ltmp0: # Block address taken
 ; RV32I-MEDIUM-NEXT:  .LBB2_2: # %return
-; RV32I-MEDIUM-NEXT:    addi a0, zero, 4
+; RV32I-MEDIUM-NEXT:    li a0, 4
 ; RV32I-MEDIUM-NEXT:    addi sp, sp, 16
 ; RV32I-MEDIUM-NEXT:    ret
 ; RV32I-MEDIUM-NEXT:  .LBB2_3: # %return.clone
-; RV32I-MEDIUM-NEXT:    addi a0, zero, 3
+; RV32I-MEDIUM-NEXT:    li a0, 3
 ; RV32I-MEDIUM-NEXT:    addi sp, sp, 16
 ; RV32I-MEDIUM-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/copysign-casts.ll b/llvm/test/CodeGen/RISCV/copysign-casts.ll
index b67b4f622ebc..054c4820f534 100644
--- a/llvm/test/CodeGen/RISCV/copysign-casts.ll
+++ b/llvm/test/CodeGen/RISCV/copysign-casts.ll
@@ -38,7 +38,7 @@ define double @fold_promote_d_s(double %a, float %b) nounwind {
 ;
 ; RV64I-LABEL: fold_promote_d_s:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, -1
+; RV64I-NEXT:    li a2, -1
 ; RV64I-NEXT:    srli a2, a2, 1
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    lui a2, 524288
@@ -109,7 +109,7 @@ define double @fold_promote_d_h(double %a, half %b) nounwind {
 ;
 ; RV64I-LABEL: fold_promote_d_h:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, -1
+; RV64I-NEXT:    li a2, -1
 ; RV64I-NEXT:    srli a2, a2, 1
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    lui a2, 8
@@ -290,7 +290,7 @@ define float @fold_demote_s_d(float %a, double %b) nounwind {
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    addiw a2, a2, -1
 ; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    addi a2, zero, -1
+; RV64I-NEXT:    li a2, -1
 ; RV64I-NEXT:    slli a2, a2, 63
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    srli a1, a1, 32
@@ -354,7 +354,7 @@ define half @fold_demote_h_s(half %a, float %b) nounwind {
 ; RV64I-NEXT:    lui a2, 8
 ; RV64I-NEXT:    addiw a2, a2, -1
 ; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    slli a2, a2, 31
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    srli a1, a1, 16
@@ -448,7 +448,7 @@ define half @fold_demote_h_d(half %a, double %b) nounwind {
 ; RV64I-NEXT:    lui a2, 8
 ; RV64I-NEXT:    addiw a2, a2, -1
 ; RV64I-NEXT:    and a0, a0, a2
-; RV64I-NEXT:    addi a2, zero, -1
+; RV64I-NEXT:    li a2, -1
 ; RV64I-NEXT:    slli a2, a2, 63
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    srli a1, a1, 48
@@ -496,7 +496,7 @@ define half @fold_demote_h_d(half %a, double %b) nounwind {
 ; RV64IFD-NEXT:    lui a2, 8
 ; RV64IFD-NEXT:    addiw a2, a2, -1
 ; RV64IFD-NEXT:    and a1, a1, a2
-; RV64IFD-NEXT:    addi a2, zero, -1
+; RV64IFD-NEXT:    li a2, -1
 ; RV64IFD-NEXT:    slli a2, a2, 63
 ; RV64IFD-NEXT:    and a0, a0, a2
 ; RV64IFD-NEXT:    srli a0, a0, 48

diff  --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll
index 57c39aaaf570..4a8a1bc5ac1e 100644
--- a/llvm/test/CodeGen/RISCV/div.ll
+++ b/llvm/test/CodeGen/RISCV/div.ll
@@ -49,7 +49,7 @@ define i32 @udiv_constant(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    li a1, 5
 ; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -69,7 +69,7 @@ define i32 @udiv_constant(i32 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    li a1, 5
 ; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -118,7 +118,7 @@ define i32 @udiv_constant_lhs(i32 %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    addi a0, zero, 10
+; RV32I-NEXT:    li a0, 10
 ; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -126,7 +126,7 @@ define i32 @udiv_constant_lhs(i32 %a) nounwind {
 ;
 ; RV32IM-LABEL: udiv_constant_lhs:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    addi a1, zero, 10
+; RV32IM-NEXT:    li a1, 10
 ; RV32IM-NEXT:    divu a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
@@ -136,7 +136,7 @@ define i32 @udiv_constant_lhs(i32 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a1, a0, 32
-; RV64I-NEXT:    addi a0, zero, 10
+; RV64I-NEXT:    li a0, 10
 ; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -144,7 +144,7 @@ define i32 @udiv_constant_lhs(i32 %a) nounwind {
 ;
 ; RV64IM-LABEL: udiv_constant_lhs:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    addi a1, zero, 10
+; RV64IM-NEXT:    li a1, 10
 ; RV64IM-NEXT:    divuw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = udiv i32 10, %a
@@ -192,8 +192,8 @@ define i64 @udiv64_constant(i64 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __udivdi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -203,8 +203,8 @@ define i64 @udiv64_constant(i64 %a) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IM-NEXT:    addi a2, zero, 5
-; RV32IM-NEXT:    mv a3, zero
+; RV32IM-NEXT:    li a2, 5
+; RV32IM-NEXT:    li a3, 0
 ; RV32IM-NEXT:    call __udivdi3 at plt
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
@@ -214,7 +214,7 @@ define i64 @udiv64_constant(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    li a1, 5
 ; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -244,8 +244,8 @@ define i64 @udiv64_constant_lhs(i64 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:    mv a2, a0
-; RV32I-NEXT:    addi a0, zero, 10
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a0, 10
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __udivdi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -257,8 +257,8 @@ define i64 @udiv64_constant_lhs(i64 %a) nounwind {
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    mv a3, a1
 ; RV32IM-NEXT:    mv a2, a0
-; RV32IM-NEXT:    addi a0, zero, 10
-; RV32IM-NEXT:    mv a1, zero
+; RV32IM-NEXT:    li a0, 10
+; RV32IM-NEXT:    li a1, 0
 ; RV32IM-NEXT:    call __udivdi3 at plt
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
@@ -269,7 +269,7 @@ define i64 @udiv64_constant_lhs(i64 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    addi a0, zero, 10
+; RV64I-NEXT:    li a0, 10
 ; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -277,7 +277,7 @@ define i64 @udiv64_constant_lhs(i64 %a) nounwind {
 ;
 ; RV64IM-LABEL: udiv64_constant_lhs:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    addi a1, zero, 10
+; RV64IM-NEXT:    li a1, 10
 ; RV64IM-NEXT:    divu a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = udiv i64 10, %a
@@ -330,7 +330,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    andi a0, a0, 255
-; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    li a1, 5
 ; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -339,7 +339,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
 ; RV32IM-LABEL: udiv8_constant:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    andi a0, a0, 255
-; RV32IM-NEXT:    addi a1, zero, 205
+; RV32IM-NEXT:    li a1, 205
 ; RV32IM-NEXT:    mul a0, a0, a1
 ; RV32IM-NEXT:    srli a0, a0, 10
 ; RV32IM-NEXT:    ret
@@ -349,7 +349,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    andi a0, a0, 255
-; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    li a1, 5
 ; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -358,7 +358,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
 ; RV64IM-LABEL: udiv8_constant:
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    andi a0, a0, 255
-; RV64IM-NEXT:    addi a1, zero, 205
+; RV64IM-NEXT:    li a1, 205
 ; RV64IM-NEXT:    mul a0, a0, a1
 ; RV64IM-NEXT:    srli a0, a0, 10
 ; RV64IM-NEXT:    ret
@@ -400,7 +400,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    andi a1, a0, 255
-; RV32I-NEXT:    addi a0, zero, 10
+; RV32I-NEXT:    li a0, 10
 ; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -409,7 +409,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
 ; RV32IM-LABEL: udiv8_constant_lhs:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    andi a0, a0, 255
-; RV32IM-NEXT:    addi a1, zero, 10
+; RV32IM-NEXT:    li a1, 10
 ; RV32IM-NEXT:    divu a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
@@ -418,7 +418,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    andi a1, a0, 255
-; RV64I-NEXT:    addi a0, zero, 10
+; RV64I-NEXT:    li a0, 10
 ; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -427,7 +427,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
 ; RV64IM-LABEL: udiv8_constant_lhs:
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    andi a0, a0, 255
-; RV64IM-NEXT:    addi a1, zero, 10
+; RV64IM-NEXT:    li a1, 10
 ; RV64IM-NEXT:    divuw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = udiv i8 10, %a
@@ -490,7 +490,7 @@ define i16 @udiv16_constant(i16 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    li a1, 5
 ; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -514,7 +514,7 @@ define i16 @udiv16_constant(i16 %a) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    li a1, 5
 ; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -570,7 +570,7 @@ define i16 @udiv16_constant_lhs(i16 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a1, a0, a1
-; RV32I-NEXT:    addi a0, zero, 10
+; RV32I-NEXT:    li a0, 10
 ; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -581,7 +581,7 @@ define i16 @udiv16_constant_lhs(i16 %a) nounwind {
 ; RV32IM-NEXT:    lui a1, 16
 ; RV32IM-NEXT:    addi a1, a1, -1
 ; RV32IM-NEXT:    and a0, a0, a1
-; RV32IM-NEXT:    addi a1, zero, 10
+; RV32IM-NEXT:    li a1, 10
 ; RV32IM-NEXT:    divu a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
@@ -592,7 +592,7 @@ define i16 @udiv16_constant_lhs(i16 %a) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    and a1, a0, a1
-; RV64I-NEXT:    addi a0, zero, 10
+; RV64I-NEXT:    li a0, 10
 ; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -603,7 +603,7 @@ define i16 @udiv16_constant_lhs(i16 %a) nounwind {
 ; RV64IM-NEXT:    lui a1, 16
 ; RV64IM-NEXT:    addiw a1, a1, -1
 ; RV64IM-NEXT:    and a0, a0, a1
-; RV64IM-NEXT:    addi a1, zero, 10
+; RV64IM-NEXT:    li a1, 10
 ; RV64IM-NEXT:    divuw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = udiv i16 10, %a
@@ -649,7 +649,7 @@ define i32 @sdiv_constant(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    li a1, 5
 ; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -670,7 +670,7 @@ define i32 @sdiv_constant(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    li a1, 5
 ; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -768,7 +768,7 @@ define i32 @sdiv_constant_lhs(i32 %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    addi a0, zero, -10
+; RV32I-NEXT:    li a0, -10
 ; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -776,7 +776,7 @@ define i32 @sdiv_constant_lhs(i32 %a) nounwind {
 ;
 ; RV32IM-LABEL: sdiv_constant_lhs:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    addi a1, zero, -10
+; RV32IM-NEXT:    li a1, -10
 ; RV32IM-NEXT:    div a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
@@ -785,7 +785,7 @@ define i32 @sdiv_constant_lhs(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a1, a0
-; RV64I-NEXT:    addi a0, zero, -10
+; RV64I-NEXT:    li a0, -10
 ; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -793,7 +793,7 @@ define i32 @sdiv_constant_lhs(i32 %a) nounwind {
 ;
 ; RV64IM-LABEL: sdiv_constant_lhs:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    addi a1, zero, -10
+; RV64IM-NEXT:    li a1, -10
 ; RV64IM-NEXT:    divw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = sdiv i32 -10, %a
@@ -841,8 +841,8 @@ define i64 @sdiv64_constant(i64 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 5
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 5
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __divdi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -852,8 +852,8 @@ define i64 @sdiv64_constant(i64 %a) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IM-NEXT:    addi a2, zero, 5
-; RV32IM-NEXT:    mv a3, zero
+; RV32IM-NEXT:    li a2, 5
+; RV32IM-NEXT:    li a3, 0
 ; RV32IM-NEXT:    call __divdi3 at plt
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
@@ -863,7 +863,7 @@ define i64 @sdiv64_constant(i64 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    li a1, 5
 ; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -895,8 +895,8 @@ define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:    mv a2, a0
-; RV32I-NEXT:    addi a0, zero, 10
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a0, 10
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __divdi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -908,8 +908,8 @@ define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    mv a3, a1
 ; RV32IM-NEXT:    mv a2, a0
-; RV32IM-NEXT:    addi a0, zero, 10
-; RV32IM-NEXT:    mv a1, zero
+; RV32IM-NEXT:    li a0, 10
+; RV32IM-NEXT:    li a1, 0
 ; RV32IM-NEXT:    call __divdi3 at plt
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
@@ -920,7 +920,7 @@ define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    addi a0, zero, 10
+; RV64I-NEXT:    li a0, 10
 ; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -928,7 +928,7 @@ define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
 ;
 ; RV64IM-LABEL: sdiv64_constant_lhs:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    addi a1, zero, 10
+; RV64IM-NEXT:    li a1, 10
 ; RV64IM-NEXT:    div a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = sdiv i64 10, %a
@@ -1041,7 +1041,7 @@ define i8 @sdiv8_constant(i8 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a0, a0, 24
-; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    li a1, 5
 ; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1051,7 +1051,7 @@ define i8 @sdiv8_constant(i8 %a) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    slli a0, a0, 24
 ; RV32IM-NEXT:    srai a0, a0, 24
-; RV32IM-NEXT:    addi a1, zero, 103
+; RV32IM-NEXT:    li a1, 103
 ; RV32IM-NEXT:    mul a0, a0, a1
 ; RV32IM-NEXT:    srai a1, a0, 9
 ; RV32IM-NEXT:    srli a0, a0, 15
@@ -1065,7 +1065,7 @@ define i8 @sdiv8_constant(i8 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
-; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    li a1, 5
 ; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1075,7 +1075,7 @@ define i8 @sdiv8_constant(i8 %a) nounwind {
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    slli a0, a0, 56
 ; RV64IM-NEXT:    srai a0, a0, 56
-; RV64IM-NEXT:    addi a1, zero, 103
+; RV64IM-NEXT:    li a1, 103
 ; RV64IM-NEXT:    mul a0, a0, a1
 ; RV64IM-NEXT:    srai a1, a0, 9
 ; RV64IM-NEXT:    srli a0, a0, 15
@@ -1141,7 +1141,7 @@ define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a1, a0, 24
-; RV32I-NEXT:    addi a0, zero, -10
+; RV32I-NEXT:    li a0, -10
 ; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1151,7 +1151,7 @@ define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    slli a0, a0, 24
 ; RV32IM-NEXT:    srai a0, a0, 24
-; RV32IM-NEXT:    addi a1, zero, -10
+; RV32IM-NEXT:    li a1, -10
 ; RV32IM-NEXT:    div a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
@@ -1161,7 +1161,7 @@ define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a1, a0, 56
-; RV64I-NEXT:    addi a0, zero, -10
+; RV64I-NEXT:    li a0, -10
 ; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1171,7 +1171,7 @@ define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    slli a0, a0, 56
 ; RV64IM-NEXT:    srai a0, a0, 56
-; RV64IM-NEXT:    addi a1, zero, -10
+; RV64IM-NEXT:    li a1, -10
 ; RV64IM-NEXT:    divw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = sdiv i8 -10, %a
@@ -1233,7 +1233,7 @@ define i16 @sdiv16_constant(i16 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a0, a0, 16
-; RV32I-NEXT:    addi a1, zero, 5
+; RV32I-NEXT:    li a1, 5
 ; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1257,7 +1257,7 @@ define i16 @sdiv16_constant(i16 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
-; RV64I-NEXT:    addi a1, zero, 5
+; RV64I-NEXT:    li a1, 5
 ; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1333,7 +1333,7 @@ define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a1, a0, 16
-; RV32I-NEXT:    addi a0, zero, -10
+; RV32I-NEXT:    li a0, -10
 ; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1343,7 +1343,7 @@ define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    slli a0, a0, 16
 ; RV32IM-NEXT:    srai a0, a0, 16
-; RV32IM-NEXT:    addi a1, zero, -10
+; RV32IM-NEXT:    li a1, -10
 ; RV32IM-NEXT:    div a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
@@ -1353,7 +1353,7 @@ define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a1, a0, 48
-; RV64I-NEXT:    addi a0, zero, -10
+; RV64I-NEXT:    li a0, -10
 ; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1363,7 +1363,7 @@ define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    slli a0, a0, 48
 ; RV64IM-NEXT:    srai a0, a0, 48
-; RV64IM-NEXT:    addi a1, zero, -10
+; RV64IM-NEXT:    li a1, -10
 ; RV64IM-NEXT:    divw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = sdiv i16 -10, %a

diff  --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll
index d1ed1d270c75..1189011cf69c 100644
--- a/llvm/test/CodeGen/RISCV/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith.ll
@@ -278,7 +278,7 @@ define double @fsgnj_d(double %a, double %b) nounwind {
 ;
 ; RV64I-LABEL: fsgnj_d:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, -1
+; RV64I-NEXT:    li a2, -1
 ; RV64I-NEXT:    slli a3, a2, 63
 ; RV64I-NEXT:    and a1, a1, a3
 ; RV64I-NEXT:    srli a2, a2, 1
@@ -334,7 +334,7 @@ define i32 @fneg_d(double %a, double %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    call __adddf3 at plt
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a1, a0, a1
 ; RV64I-NEXT:    call __eqdf2 at plt
@@ -371,7 +371,7 @@ define double @fsgnjn_d(double %a, double %b) nounwind {
 ;
 ; RV64IFD-LABEL: fsgnjn_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi a2, zero, -1
+; RV64IFD-NEXT:    li a2, -1
 ; RV64IFD-NEXT:    slli a2, a2, 63
 ; RV64IFD-NEXT:    xor a1, a1, a2
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
@@ -393,7 +393,7 @@ define double @fsgnjn_d(double %a, double %b) nounwind {
 ; RV64I-LABEL: fsgnjn_d:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    not a1, a1
-; RV64I-NEXT:    addi a2, zero, -1
+; RV64I-NEXT:    li a2, -1
 ; RV64I-NEXT:    slli a3, a2, 63
 ; RV64I-NEXT:    and a1, a1, a3
 ; RV64I-NEXT:    srli a2, a2, 1
@@ -459,7 +459,7 @@ define double @fabs_d(double %a, double %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __adddf3 at plt
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    call __adddf3 at plt
@@ -802,8 +802,8 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv a4, a0
 ; RV32I-NEXT:    lui a0, 524288
@@ -830,9 +830,9 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3 at plt
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a2, a0, a1
 ; RV64I-NEXT:    mv a0, s1
@@ -898,15 +898,15 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s5, a4
 ; RV32I-NEXT:    mv s2, a3
 ; RV32I-NEXT:    mv s3, a2
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    mv a0, s5
 ; RV32I-NEXT:    mv a1, s4
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv a4, a0
 ; RV32I-NEXT:    lui a0, 524288
@@ -936,13 +936,13 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s2, a1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3 at plt
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a2, a1, 63
 ; RV64I-NEXT:    xor a1, s1, a2
 ; RV64I-NEXT:    xor a2, a0, a2
@@ -1014,15 +1014,15 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    mv a1, a3
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv s5, a0
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s4
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv a4, a0
 ; RV32I-NEXT:    lui a0, 524288
@@ -1052,13 +1052,13 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3 at plt
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a2, a1, 63
 ; RV64I-NEXT:    xor a1, s1, a2
 ; RV64I-NEXT:    xor a2, a0, a2
@@ -1123,8 +1123,8 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s3, a4
 ; RV32I-NEXT:    mv s0, a3
 ; RV32I-NEXT:    mv s1, a2
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, a1, a2
@@ -1149,9 +1149,9 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3 at plt
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    mv a1, s1
@@ -1215,8 +1215,8 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    mv a1, a3
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    lui a0, 524288
@@ -1243,9 +1243,9 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3 at plt
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a1, a0, a1
 ; RV64I-NEXT:    mv a0, s1
@@ -1376,8 +1376,8 @@ define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, a4
 ; RV32I-NEXT:    mv a1, a5
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s5, a1
@@ -1409,7 +1409,7 @@ define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3 at plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s1
@@ -1480,22 +1480,22 @@ define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s3, a4
 ; RV32I-NEXT:    mv s0, a3
 ; RV32I-NEXT:    mv s1, a2
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s5, a1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    mv a1, s2
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv s3, a1
@@ -1529,21 +1529,21 @@ define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a2
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3 at plt
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3 at plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    mv a1, s1
 ; RV64I-NEXT:    call __muldf3 at plt
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    mv a1, s0
@@ -1613,15 +1613,15 @@ define double @fnmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s3, a4
 ; RV32I-NEXT:    mv s4, a3
 ; RV32I-NEXT:    mv s5, a2
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    mv a0, s5
 ; RV32I-NEXT:    mv a1, s4
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __adddf3 at plt
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    mv a3, a1
@@ -1652,11 +1652,11 @@ define double @fnmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a2
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3 at plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __adddf3 at plt
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s0

diff  --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
index 4876905ac9e7..62dbeaece174 100644
--- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
@@ -32,14 +32,14 @@ define double @fneg(double %a) nounwind {
 ;
 ; RV64I-LABEL: fneg:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fneg:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi a1, zero, -1
+; RV64IFD-NEXT:    li a1, -1
 ; RV64IFD-NEXT:    slli a1, a1, 63
 ; RV64IFD-NEXT:    xor a0, a0, a1
 ; RV64IFD-NEXT:    ret
@@ -66,14 +66,14 @@ define double @fabs(double %a) nounwind {
 ;
 ; RV64I-LABEL: fabs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    srli a1, a1, 1
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fabs:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi a1, zero, -1
+; RV64IFD-NEXT:    li a1, -1
 ; RV64IFD-NEXT:    srli a1, a1, 1
 ; RV64IFD-NEXT:    and a0, a0, a1
 ; RV64IFD-NEXT:    ret
@@ -117,7 +117,7 @@ define double @fcopysign_fneg(double %a, double %b) nounwind {
 ; RV64I-LABEL: fcopysign_fneg:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    not a1, a1
-; RV64I-NEXT:    addi a2, zero, -1
+; RV64I-NEXT:    li a2, -1
 ; RV64I-NEXT:    slli a3, a2, 63
 ; RV64I-NEXT:    and a1, a1, a3
 ; RV64I-NEXT:    srli a2, a2, 1
@@ -127,7 +127,7 @@ define double @fcopysign_fneg(double %a, double %b) nounwind {
 ;
 ; RV64IFD-LABEL: fcopysign_fneg:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi a2, zero, -1
+; RV64IFD-NEXT:    li a2, -1
 ; RV64IFD-NEXT:    slli a2, a2, 63
 ; RV64IFD-NEXT:    xor a1, a1, a2
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1

diff  --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
index a3c57e764c85..957bc3796159 100644
--- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
@@ -10,7 +10,7 @@ declare void @exit(i32)
 define void @br_fcmp_false(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_false:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi a0, zero, 1
+; RV32IFD-NEXT:    li a0, 1
 ; RV32IFD-NEXT:    bnez a0, .LBB0_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.then
 ; RV32IFD-NEXT:    ret
@@ -21,7 +21,7 @@ define void @br_fcmp_false(double %a, double %b) nounwind {
 ;
 ; RV64IFD-LABEL: br_fcmp_false:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi a0, zero, 1
+; RV64IFD-NEXT:    li a0, 1
 ; RV64IFD-NEXT:    bnez a0, .LBB0_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.then
 ; RV64IFD-NEXT:    ret
@@ -676,7 +676,7 @@ if.then:
 define void @br_fcmp_true(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: br_fcmp_true:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi a0, zero, 1
+; RV32IFD-NEXT:    li a0, 1
 ; RV32IFD-NEXT:    bnez a0, .LBB16_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
 ; RV32IFD-NEXT:    ret
@@ -687,7 +687,7 @@ define void @br_fcmp_true(double %a, double %b) nounwind {
 ;
 ; RV64IFD-LABEL: br_fcmp_true:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi a0, zero, 1
+; RV64IFD-NEXT:    li a0, 1
 ; RV64IFD-NEXT:    bnez a0, .LBB16_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/double-calling-conv.ll b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
index 5731216ba53b..ed39177cb013 100644
--- a/llvm/test/CodeGen/RISCV/double-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
@@ -81,12 +81,12 @@ define double @caller_double_split_reg_stack() nounwind {
 ; RV32IFD-NEXT:    addi a6, a0, 327
 ; RV32IFD-NEXT:    lui a0, 713032
 ; RV32IFD-NEXT:    addi a5, a0, -1311
-; RV32IFD-NEXT:    addi a0, zero, 1
-; RV32IFD-NEXT:    addi a1, zero, 2
-; RV32IFD-NEXT:    addi a3, zero, 3
+; RV32IFD-NEXT:    li a0, 1
+; RV32IFD-NEXT:    li a1, 2
+; RV32IFD-NEXT:    li a3, 3
 ; RV32IFD-NEXT:    sw a2, 0(sp)
-; RV32IFD-NEXT:    mv a2, zero
-; RV32IFD-NEXT:    mv a4, zero
+; RV32IFD-NEXT:    li a2, 0
+; RV32IFD-NEXT:    li a4, 0
 ; RV32IFD-NEXT:    mv a7, a5
 ; RV32IFD-NEXT:    call callee_double_split_reg_stack at plt
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -126,15 +126,15 @@ define double @caller_double_stack() nounwind {
 ; RV32IFD-NEXT:    lui a0, 262574
 ; RV32IFD-NEXT:    addi a0, a0, 327
 ; RV32IFD-NEXT:    sw a0, 12(sp)
-; RV32IFD-NEXT:    addi a0, zero, 1
-; RV32IFD-NEXT:    addi a2, zero, 2
-; RV32IFD-NEXT:    addi a4, zero, 3
-; RV32IFD-NEXT:    addi a6, zero, 4
+; RV32IFD-NEXT:    li a0, 1
+; RV32IFD-NEXT:    li a2, 2
+; RV32IFD-NEXT:    li a4, 3
+; RV32IFD-NEXT:    li a6, 4
 ; RV32IFD-NEXT:    sw a1, 8(sp)
-; RV32IFD-NEXT:    mv a1, zero
-; RV32IFD-NEXT:    mv a3, zero
-; RV32IFD-NEXT:    mv a5, zero
-; RV32IFD-NEXT:    mv a7, zero
+; RV32IFD-NEXT:    li a1, 0
+; RV32IFD-NEXT:    li a3, 0
+; RV32IFD-NEXT:    li a5, 0
+; RV32IFD-NEXT:    li a7, 0
 ; RV32IFD-NEXT:    call callee_double_stack at plt
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 32

diff  --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index 98a7f36aa21d..1c913c82ddd6 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -138,7 +138,7 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
 ; RV32IFD-NEXT:    feq.d a0, ft0, ft0
 ; RV32IFD-NEXT:    bnez a0, .LBB3_2
 ; RV32IFD-NEXT:  # %bb.1: # %start
-; RV32IFD-NEXT:    mv a0, zero
+; RV32IFD-NEXT:    li a0, 0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB3_2:
@@ -152,7 +152,7 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
 ; RV64IFD-NEXT:    feq.d a0, ft0, ft0
 ; RV64IFD-NEXT:    bnez a0, .LBB3_2
 ; RV64IFD-NEXT:  # %bb.1: # %start
-; RV64IFD-NEXT:    mv a0, zero
+; RV64IFD-NEXT:    li a0, 0
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB3_2:
 ; RV64IFD-NEXT:    fcvt.w.d a0, ft0, rtz
@@ -171,8 +171,8 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a3, 794112
-; RV32I-NEXT:    mv s0, zero
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li s0, 0
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __gedf2 at plt
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    mv a0, s1
@@ -224,21 +224,21 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addi a0, zero, -497
+; RV64I-NEXT:    li a0, -497
 ; RV64I-NEXT:    slli a1, a0, 53
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __gedf2 at plt
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixdfdi at plt
-; RV64I-NEXT:    mv s1, zero
+; RV64I-NEXT:    li s1, 0
 ; RV64I-NEXT:    lui s4, 524288
 ; RV64I-NEXT:    lui s3, 524288
 ; RV64I-NEXT:    bltz s2, .LBB3_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:  .LBB3_2: # %start
-; RV64I-NEXT:    addi a0, zero, 527
+; RV64I-NEXT:    li a0, 527
 ; RV64I-NEXT:    slli a0, a0, 31
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    slli a1, a0, 22
@@ -321,7 +321,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, i32* %y) {
 ; RV32IFD-NEXT:    sw a1, 12(sp)
 ; RV32IFD-NEXT:    fld ft0, 8(sp)
 ; RV32IFD-NEXT:    fcvt.wu.d a1, ft0, rtz
-; RV32IFD-NEXT:    addi a0, zero, 1
+; RV32IFD-NEXT:    li a0, 1
 ; RV32IFD-NEXT:    beqz a1, .LBB5_2
 ; RV32IFD-NEXT:  # %bb.1:
 ; RV32IFD-NEXT:    mv a0, a1
@@ -333,7 +333,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, i32* %y) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fcvt.wu.d a1, ft0, rtz
-; RV64IFD-NEXT:    addi a0, zero, 1
+; RV64IFD-NEXT:    li a0, 1
 ; RV64IFD-NEXT:    beqz a1, .LBB5_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    mv a0, a1
@@ -348,7 +348,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, i32* %y) {
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    call __fixunsdfsi at plt
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    beqz a1, .LBB5_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
@@ -365,7 +365,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, i32* %y) {
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    call __fixunsdfsi at plt
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    beqz a1, .LBB5_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
@@ -389,7 +389,7 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
 ; RV32IFD-NEXT:    feq.d a0, ft0, ft0
 ; RV32IFD-NEXT:    bnez a0, .LBB6_2
 ; RV32IFD-NEXT:  # %bb.1: # %start
-; RV32IFD-NEXT:    mv a0, zero
+; RV32IFD-NEXT:    li a0, 0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB6_2:
@@ -403,7 +403,7 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
 ; RV64IFD-NEXT:    feq.d a0, ft0, ft0
 ; RV64IFD-NEXT:    bnez a0, .LBB6_2
 ; RV64IFD-NEXT:  # %bb.1: # %start
-; RV64IFD-NEXT:    mv a0, zero
+; RV64IFD-NEXT:    li a0, 0
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB6_2:
 ; RV64IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
@@ -427,19 +427,19 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __gedf2 at plt
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    call __fixunsdfsi at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    bltz s3, .LBB6_2
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:  .LBB6_2: # %start
-; RV32I-NEXT:    addi a0, zero, -1
+; RV32I-NEXT:    li a0, -1
 ; RV32I-NEXT:    bgtz s2, .LBB6_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    mv a0, a1
@@ -460,17 +460,17 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __gedf2 at plt
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixunsdfdi at plt
-; RV64I-NEXT:    mv s1, zero
+; RV64I-NEXT:    li s1, 0
 ; RV64I-NEXT:    bltz s2, .LBB6_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:  .LBB6_2: # %start
-; RV64I-NEXT:    addi a0, zero, 1055
+; RV64I-NEXT:    li a0, 1055
 ; RV64I-NEXT:    slli a0, a0, 31
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    slli a1, a0, 21
@@ -478,7 +478,7 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    call __gtdf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB6_4
 ; RV64I-NEXT:  # %bb.3:
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    srli s1, a0, 32
 ; RV64I-NEXT:  .LBB6_4: # %start
 ; RV64I-NEXT:    mv a0, s1
@@ -712,12 +712,12 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32IFD-NEXT:    mv a2, a0
 ; RV32IFD-NEXT:    bnez a3, .LBB12_2
 ; RV32IFD-NEXT:  # %bb.1: # %start
-; RV32IFD-NEXT:    mv a2, zero
+; RV32IFD-NEXT:    li a2, 0
 ; RV32IFD-NEXT:  .LBB12_2: # %start
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI12_1)
 ; RV32IFD-NEXT:    fld ft0, %lo(.LCPI12_1)(a0)
 ; RV32IFD-NEXT:    flt.d a4, ft0, ft1
-; RV32IFD-NEXT:    addi a0, zero, -1
+; RV32IFD-NEXT:    li a0, -1
 ; RV32IFD-NEXT:    beqz a4, .LBB12_9
 ; RV32IFD-NEXT:  # %bb.3: # %start
 ; RV32IFD-NEXT:    feq.d a2, ft1, ft1
@@ -730,7 +730,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32IFD-NEXT:  .LBB12_6: # %start
 ; RV32IFD-NEXT:    bnez a2, .LBB12_8
 ; RV32IFD-NEXT:  .LBB12_7: # %start
-; RV32IFD-NEXT:    mv a1, zero
+; RV32IFD-NEXT:    li a1, 0
 ; RV32IFD-NEXT:  .LBB12_8: # %start
 ; RV32IFD-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 32
@@ -740,7 +740,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32IFD-NEXT:    feq.d a2, ft1, ft1
 ; RV32IFD-NEXT:    bnez a2, .LBB12_4
 ; RV32IFD-NEXT:  .LBB12_10: # %start
-; RV32IFD-NEXT:    mv a0, zero
+; RV32IFD-NEXT:    li a0, 0
 ; RV32IFD-NEXT:    lui a5, 524288
 ; RV32IFD-NEXT:    bnez a3, .LBB12_5
 ; RV32IFD-NEXT:  .LBB12_11: # %start
@@ -757,7 +757,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV64IFD-NEXT:    feq.d a0, ft0, ft0
 ; RV64IFD-NEXT:    bnez a0, .LBB12_2
 ; RV64IFD-NEXT:  # %bb.1: # %start
-; RV64IFD-NEXT:    mv a0, zero
+; RV64IFD-NEXT:    li a0, 0
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB12_2:
 ; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
@@ -779,16 +779,16 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a0, 278016
 ; RV32I-NEXT:    addi s3, a0, -1
-; RV32I-NEXT:    addi a2, zero, -1
+; RV32I-NEXT:    li a2, -1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a3, s3
 ; RV32I-NEXT:    call __gtdf2 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    lui a3, 802304
-; RV32I-NEXT:    mv s0, zero
+; RV32I-NEXT:    li s0, 0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s2
-; RV32I-NEXT:    mv a2, zero
+; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    call __gedf2 at plt
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    mv a0, s1
@@ -800,7 +800,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:  .LBB12_2: # %start
-; RV32I-NEXT:    addi s6, zero, -1
+; RV32I-NEXT:    li s6, -1
 ; RV32I-NEXT:    blt s0, s4, .LBB12_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    mv s6, a1
@@ -826,7 +826,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32I-NEXT:  # %bb.7: # %start
 ; RV32I-NEXT:    mv s6, s5
 ; RV32I-NEXT:  .LBB12_8: # %start
-; RV32I-NEXT:    addi a2, zero, -1
+; RV32I-NEXT:    li a2, -1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s2
 ; RV32I-NEXT:    mv a3, s3
@@ -868,15 +868,15 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addi a0, zero, -481
+; RV64I-NEXT:    li a0, -481
 ; RV64I-NEXT:    slli a1, a0, 53
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __gedf2 at plt
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixdfdi at plt
-; RV64I-NEXT:    mv s1, zero
-; RV64I-NEXT:    addi s4, zero, -1
+; RV64I-NEXT:    li s1, 0
+; RV64I-NEXT:    li s4, -1
 ; RV64I-NEXT:    bltz s3, .LBB12_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    mv s2, a0
@@ -884,7 +884,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV64I-NEXT:  .LBB12_2:
 ; RV64I-NEXT:    slli s2, s4, 63
 ; RV64I-NEXT:  .LBB12_3: # %start
-; RV64I-NEXT:    addi a0, zero, 543
+; RV64I-NEXT:    li a0, 543
 ; RV64I-NEXT:    slli a0, a0, 53
 ; RV64I-NEXT:    addi a1, a0, -1
 ; RV64I-NEXT:    mv a0, s0
@@ -968,13 +968,13 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV32IFD-NEXT:    mv a3, a0
 ; RV32IFD-NEXT:    bnez a4, .LBB14_2
 ; RV32IFD-NEXT:  # %bb.1: # %start
-; RV32IFD-NEXT:    mv a3, zero
+; RV32IFD-NEXT:    li a3, 0
 ; RV32IFD-NEXT:  .LBB14_2: # %start
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI14_0)
 ; RV32IFD-NEXT:    fld ft0, %lo(.LCPI14_0)(a0)
 ; RV32IFD-NEXT:    flt.d a5, ft0, ft1
-; RV32IFD-NEXT:    addi a2, zero, -1
-; RV32IFD-NEXT:    addi a0, zero, -1
+; RV32IFD-NEXT:    li a2, -1
+; RV32IFD-NEXT:    li a0, -1
 ; RV32IFD-NEXT:    beqz a5, .LBB14_7
 ; RV32IFD-NEXT:  # %bb.3: # %start
 ; RV32IFD-NEXT:    beqz a4, .LBB14_8
@@ -991,7 +991,7 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV32IFD-NEXT:    mv a0, a3
 ; RV32IFD-NEXT:    bnez a4, .LBB14_4
 ; RV32IFD-NEXT:  .LBB14_8: # %start
-; RV32IFD-NEXT:    mv a1, zero
+; RV32IFD-NEXT:    li a1, 0
 ; RV32IFD-NEXT:    beqz a5, .LBB14_5
 ; RV32IFD-NEXT:    j .LBB14_6
 ;
@@ -1001,7 +1001,7 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV64IFD-NEXT:    feq.d a0, ft0, ft0
 ; RV64IFD-NEXT:    bnez a0, .LBB14_2
 ; RV64IFD-NEXT:  # %bb.1: # %start
-; RV64IFD-NEXT:    mv a0, zero
+; RV64IFD-NEXT:    li a0, 0
 ; RV64IFD-NEXT:    ret
 ; RV64IFD-NEXT:  .LBB14_2:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
@@ -1022,33 +1022,33 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a0, 278272
 ; RV32I-NEXT:    addi s3, a0, -1
-; RV32I-NEXT:    addi a2, zero, -1
-; RV32I-NEXT:    addi s2, zero, -1
+; RV32I-NEXT:    li a2, -1
+; RV32I-NEXT:    li s2, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a3, s3
 ; RV32I-NEXT:    call __gtdf2 at plt
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __gedf2 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    call __fixunsdfdi at plt
 ; RV32I-NEXT:    mv s5, a1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    bltz s4, .LBB14_2
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:  .LBB14_2: # %start
-; RV32I-NEXT:    addi s4, zero, -1
+; RV32I-NEXT:    li s4, -1
 ; RV32I-NEXT:    bgtz s6, .LBB14_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    mv s4, a1
 ; RV32I-NEXT:  .LBB14_4: # %start
-; RV32I-NEXT:    addi a2, zero, -1
+; RV32I-NEXT:    li a2, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    mv a3, s3
@@ -1056,10 +1056,10 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __gedf2 at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    bltz a0, .LBB14_6
 ; RV32I-NEXT:  # %bb.5: # %start
 ; RV32I-NEXT:    mv a1, s5
@@ -1089,23 +1089,23 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __gedf2 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixunsdfdi at plt
-; RV64I-NEXT:    mv s2, zero
+; RV64I-NEXT:    li s2, 0
 ; RV64I-NEXT:    bltz s1, .LBB14_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:  .LBB14_2: # %start
-; RV64I-NEXT:    addi a0, zero, 1087
+; RV64I-NEXT:    li a0, 1087
 ; RV64I-NEXT:    slli a0, a0, 52
 ; RV64I-NEXT:    addi a1, a0, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __gtdf2 at plt
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    bgtz a1, .LBB14_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    mv a0, s2

diff  --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll
index fe2bada8741a..e1e0bba34da6 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll
@@ -11,22 +11,22 @@
 define i32 @fcmp_false(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_false:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    mv a0, zero
+; RV32IFD-NEXT:    li a0, 0
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_false:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    mv a0, zero
+; RV64IFD-NEXT:    li a0, 0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_false:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_false:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    ret
   %1 = fcmp false double %a, %b
   %2 = zext i1 %1 to i32
@@ -149,7 +149,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gedf2 at plt
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    slt a0, a1, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -160,7 +160,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gedf2 at plt
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slt a0, a1, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -552,7 +552,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltdf2 at plt
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    slt a0, a1, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -563,7 +563,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltdf2 at plt
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slt a0, a1, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -768,22 +768,22 @@ define i32 @fcmp_uno(double %a, double %b) nounwind {
 define i32 @fcmp_true(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_true:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    addi a0, zero, 1
+; RV32IFD-NEXT:    li a0, 1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fcmp_true:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi a0, zero, 1
+; RV64IFD-NEXT:    li a0, 1
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_true:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_true:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    ret
   %1 = fcmp true double %a, %b
   %2 = zext i1 %1 to i32

diff  --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index 5a3b6e457a64..5ba90b7c5332 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -668,7 +668,7 @@ define double @fabs_f64(double %a) nounwind {
 ;
 ; RV64IFD-LABEL: fabs_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi a1, zero, -1
+; RV64IFD-NEXT:    li a1, -1
 ; RV64IFD-NEXT:    srli a1, a1, 1
 ; RV64IFD-NEXT:    and a0, a0, a1
 ; RV64IFD-NEXT:    ret
@@ -682,7 +682,7 @@ define double @fabs_f64(double %a) nounwind {
 ;
 ; RV64I-LABEL: fabs_f64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    srli a1, a1, 1
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -841,7 +841,7 @@ define double @copysign_f64(double %a, double %b) nounwind {
 ;
 ; RV64I-LABEL: copysign_f64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, -1
+; RV64I-NEXT:    li a2, -1
 ; RV64I-NEXT:    slli a3, a2, 63
 ; RV64I-NEXT:    and a1, a1, a3
 ; RV64I-NEXT:    srli a2, a2, 1

diff  --git a/llvm/test/CodeGen/RISCV/double-previous-failure.ll b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
index d3c20db19716..aab8a0cad856 100644
--- a/llvm/test/CodeGen/RISCV/double-previous-failure.ll
+++ b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
@@ -18,7 +18,7 @@ define i32 @main() nounwind {
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IFD-NEXT:    lui a1, 262144
-; RV32IFD-NEXT:    mv a0, zero
+; RV32IFD-NEXT:    li a0, 0
 ; RV32IFD-NEXT:    call test at plt
 ; RV32IFD-NEXT:    sw a0, 0(sp)
 ; RV32IFD-NEXT:    sw a1, 4(sp)
@@ -36,7 +36,7 @@ define i32 @main() nounwind {
 ; RV32IFD-NEXT:  # %bb.1: # %if.then
 ; RV32IFD-NEXT:    call abort at plt
 ; RV32IFD-NEXT:  .LBB1_2: # %if.end
-; RV32IFD-NEXT:    mv a0, zero
+; RV32IFD-NEXT:    li a0, 0
 ; RV32IFD-NEXT:    call exit at plt
 entry:
   %call = call double @test(double 2.000000e+00)

diff  --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll
index 483ab80731c4..96a36c80ea56 100644
--- a/llvm/test/CodeGen/RISCV/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith.ll
@@ -686,7 +686,7 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    mv s0, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a2, a0, a1
@@ -708,7 +708,7 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    mv s0, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a2, a0, a1
@@ -760,11 +760,11 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, s1, a2
@@ -788,11 +788,11 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s2, a1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    xor a1, s1, a2
@@ -849,11 +849,11 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, s1, a2
@@ -877,11 +877,11 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    xor a1, s1, a2
@@ -933,7 +933,7 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a0, a0, a1
@@ -954,7 +954,7 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a0, a0, a1
@@ -1004,7 +1004,7 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, a1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    xor a1, a0, a1
@@ -1026,7 +1026,7 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    mv s0, a2
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    lui a1, 524288
 ; RV64I-NEXT:    xor a1, a0, a1
@@ -1128,7 +1128,7 @@ define float @fmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, a2
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s1
@@ -1153,7 +1153,7 @@ define float @fmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    mv s2, a1
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, a2
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s1
@@ -1210,15 +1210,15 @@ define float @fnmadd_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a2
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s3
@@ -1246,15 +1246,15 @@ define float @fnmadd_s_contract(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a2
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s3
@@ -1314,11 +1314,11 @@ define float @fnmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s2, a2
 ; RV32I-NEXT:    mv s1, a1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    mv a1, a0
 ; RV32I-NEXT:    mv a0, s0
@@ -1342,11 +1342,11 @@ define float @fnmsub_s_contract(float %a, float %b, float %c) nounwind {
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s2, a2
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s1
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    mv a0, s0

diff  --git a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
index 6128f00c4322..73f47ffe67f9 100644
--- a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
@@ -110,7 +110,7 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
 ; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64F-NEXT:    mv s0, a0
 ; RV64F-NEXT:    call __adddf3 at plt
-; RV64F-NEXT:    addi a1, zero, -1
+; RV64F-NEXT:    li a1, -1
 ; RV64F-NEXT:    srli a1, a1, 1
 ; RV64F-NEXT:    and a1, a0, a1
 ; RV64F-NEXT:    mv a0, s0
@@ -233,7 +233,7 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
 ; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64F-NEXT:    mv s0, a0
 ; RV64F-NEXT:    call __muldf3 at plt
-; RV64F-NEXT:    addi a1, zero, -1
+; RV64F-NEXT:    li a1, -1
 ; RV64F-NEXT:    slli a1, a1, 63
 ; RV64F-NEXT:    xor a1, a0, a1
 ; RV64F-NEXT:    mv a0, s0
@@ -360,7 +360,7 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
 ; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64F-NEXT:    mv s0, a0
 ; RV64F-NEXT:    call __muldf3 at plt
-; RV64F-NEXT:    addi a1, zero, -1
+; RV64F-NEXT:    li a1, -1
 ; RV64F-NEXT:    slli a1, a1, 63
 ; RV64F-NEXT:    or a1, a0, a1
 ; RV64F-NEXT:    mv a0, s0

diff  --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
index 5c696108cefa..535190f88544 100644
--- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
@@ -11,7 +11,7 @@ declare float @dummy(float)
 define void @br_fcmp_false(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_false:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi a0, zero, 1
+; RV32IF-NEXT:    li a0, 1
 ; RV32IF-NEXT:    bnez a0, .LBB0_2
 ; RV32IF-NEXT:  # %bb.1: # %if.then
 ; RV32IF-NEXT:    ret
@@ -22,7 +22,7 @@ define void @br_fcmp_false(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_false:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi a0, zero, 1
+; RV64IF-NEXT:    li a0, 1
 ; RV64IF-NEXT:    bnez a0, .LBB0_2
 ; RV64IF-NEXT:  # %bb.1: # %if.then
 ; RV64IF-NEXT:    ret
@@ -587,7 +587,7 @@ if.then:
 define void @br_fcmp_true(float %a, float %b) nounwind {
 ; RV32IF-LABEL: br_fcmp_true:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi a0, zero, 1
+; RV32IF-NEXT:    li a0, 1
 ; RV32IF-NEXT:    bnez a0, .LBB16_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    ret
@@ -598,7 +598,7 @@ define void @br_fcmp_true(float %a, float %b) nounwind {
 ;
 ; RV64IF-LABEL: br_fcmp_true:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi a0, zero, 1
+; RV64IF-NEXT:    li a0, 1
 ; RV64IF-NEXT:    bnez a0, .LBB16_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ret
@@ -623,7 +623,7 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ; RV32IF:       # %bb.0: # %entry
 ; RV32IF-NEXT:    addi sp, sp, -16
 ; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT:    mv a0, zero
+; RV32IF-NEXT:    li a0, 0
 ; RV32IF-NEXT:    call dummy at plt
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, zero
@@ -631,14 +631,14 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    feq.s a0, ft0, ft1
 ; RV32IF-NEXT:    beqz a0, .LBB17_3
 ; RV32IF-NEXT:  # %bb.1: # %if.end
-; RV32IF-NEXT:    mv a0, zero
+; RV32IF-NEXT:    li a0, 0
 ; RV32IF-NEXT:    call dummy at plt
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    flw ft1, 8(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    feq.s a0, ft0, ft1
 ; RV32IF-NEXT:    beqz a0, .LBB17_3
 ; RV32IF-NEXT:  # %bb.2: # %if.end4
-; RV32IF-NEXT:    mv a0, zero
+; RV32IF-NEXT:    li a0, 0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
 ; RV32IF-NEXT:    ret
@@ -667,7 +667,7 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    feq.s a0, ft0, ft1
 ; RV64IF-NEXT:    beqz a0, .LBB17_3
 ; RV64IF-NEXT:  # %bb.2: # %if.end4
-; RV64IF-NEXT:    mv a0, zero
+; RV64IF-NEXT:    li a0, 0
 ; RV64IF-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 32

diff  --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index ac83a5cbd5d3..5bbbae704b49 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -51,7 +51,7 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    feq.s a0, ft0, ft0
 ; RV32IF-NEXT:    bnez a0, .LBB1_2
 ; RV32IF-NEXT:  # %bb.1: # %start
-; RV32IF-NEXT:    mv a0, zero
+; RV32IF-NEXT:    li a0, 0
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB1_2:
 ; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
@@ -63,7 +63,7 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
 ; RV64IF-NEXT:    feq.s a0, ft0, ft0
 ; RV64IF-NEXT:    bnez a0, .LBB1_2
 ; RV64IF-NEXT:  # %bb.1: # %start
-; RV64IF-NEXT:    mv a0, zero
+; RV64IF-NEXT:    li a0, 0
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB1_2:
 ; RV64IF-NEXT:    fcvt.w.s a0, ft0, rtz
@@ -84,7 +84,7 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixsfsi at plt
-; RV32I-NEXT:    mv s1, zero
+; RV32I-NEXT:    li s1, 0
 ; RV32I-NEXT:    lui s4, 524288
 ; RV32I-NEXT:    lui s3, 524288
 ; RV32I-NEXT:    bltz s2, .LBB1_2
@@ -131,7 +131,7 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixsfdi at plt
-; RV64I-NEXT:    mv s1, zero
+; RV64I-NEXT:    li s1, 0
 ; RV64I-NEXT:    lui s4, 524288
 ; RV64I-NEXT:    lui s3, 524288
 ; RV64I-NEXT:    bltz s2, .LBB1_2
@@ -209,7 +209,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) {
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fcvt.wu.s a1, ft0, rtz
-; RV32IF-NEXT:    addi a0, zero, 1
+; RV32IF-NEXT:    li a0, 1
 ; RV32IF-NEXT:    beqz a1, .LBB3_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    mv a0, a1
@@ -220,7 +220,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) {
 ; RV64IF:       # %bb.0:
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fcvt.wu.s a1, ft0, rtz
-; RV64IF-NEXT:    addi a0, zero, 1
+; RV64IF-NEXT:    li a0, 1
 ; RV64IF-NEXT:    beqz a1, .LBB3_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    mv a0, a1
@@ -235,7 +235,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) {
 ; RV32I-NEXT:    .cfi_offset ra, -4
 ; RV32I-NEXT:    call __fixunssfsi at plt
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    beqz a1, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
@@ -252,7 +252,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) {
 ; RV64I-NEXT:    .cfi_offset ra, -8
 ; RV64I-NEXT:    call __fixunssfsi at plt
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    beqz a1, .LBB3_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
@@ -273,7 +273,7 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    feq.s a0, ft0, ft0
 ; RV32IF-NEXT:    bnez a0, .LBB4_2
 ; RV32IF-NEXT:  # %bb.1: # %start
-; RV32IF-NEXT:    mv a0, zero
+; RV32IF-NEXT:    li a0, 0
 ; RV32IF-NEXT:    ret
 ; RV32IF-NEXT:  .LBB4_2:
 ; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
@@ -285,7 +285,7 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV64IF-NEXT:    feq.s a0, ft0, ft0
 ; RV64IF-NEXT:    bnez a0, .LBB4_2
 ; RV64IF-NEXT:  # %bb.1: # %start
-; RV64IF-NEXT:    mv a0, zero
+; RV64IF-NEXT:    li a0, 0
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB4_2:
 ; RV64IF-NEXT:    fcvt.wu.s a0, ft0, rtz
@@ -299,12 +299,12 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __gesf2 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixunssfsi at plt
-; RV32I-NEXT:    mv s2, zero
+; RV32I-NEXT:    li s2, 0
 ; RV32I-NEXT:    bltz s1, .LBB4_2
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    mv s2, a0
@@ -314,7 +314,7 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    addi a0, zero, -1
+; RV32I-NEXT:    li a0, -1
 ; RV32I-NEXT:    bgtz a1, .LBB4_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    mv a0, s2
@@ -334,12 +334,12 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __gesf2 at plt
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixunssfdi at plt
-; RV64I-NEXT:    mv s1, zero
+; RV64I-NEXT:    li s1, 0
 ; RV64I-NEXT:    bltz s2, .LBB4_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    mv s1, a0
@@ -350,7 +350,7 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB4_4
 ; RV64I-NEXT:  # %bb.3:
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    srli s1, a0, 32
 ; RV64I-NEXT:  .LBB4_4: # %start
 ; RV64I-NEXT:    mv a0, s1
@@ -647,14 +647,14 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    mv a2, a0
 ; RV32IF-NEXT:    bnez s0, .LBB12_2
 ; RV32IF-NEXT:  # %bb.1: # %start
-; RV32IF-NEXT:    mv a2, zero
+; RV32IF-NEXT:    li a2, 0
 ; RV32IF-NEXT:  .LBB12_2: # %start
 ; RV32IF-NEXT:    lui a0, %hi(.LCPI12_1)
 ; RV32IF-NEXT:    flw ft0, %lo(.LCPI12_1)(a0)
 ; RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    flt.s a3, ft0, ft1
 ; RV32IF-NEXT:    fmv.s ft0, ft1
-; RV32IF-NEXT:    addi a0, zero, -1
+; RV32IF-NEXT:    li a0, -1
 ; RV32IF-NEXT:    beqz a3, .LBB12_9
 ; RV32IF-NEXT:  # %bb.3: # %start
 ; RV32IF-NEXT:    feq.s a2, ft0, ft0
@@ -667,7 +667,7 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:  .LBB12_6: # %start
 ; RV32IF-NEXT:    bnez a2, .LBB12_8
 ; RV32IF-NEXT:  .LBB12_7: # %start
-; RV32IF-NEXT:    mv a1, zero
+; RV32IF-NEXT:    li a1, 0
 ; RV32IF-NEXT:  .LBB12_8: # %start
 ; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -678,7 +678,7 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    feq.s a2, ft0, ft0
 ; RV32IF-NEXT:    bnez a2, .LBB12_4
 ; RV32IF-NEXT:  .LBB12_10: # %start
-; RV32IF-NEXT:    mv a0, zero
+; RV32IF-NEXT:    li a0, 0
 ; RV32IF-NEXT:    lui a4, 524288
 ; RV32IF-NEXT:    bnez s0, .LBB12_5
 ; RV32IF-NEXT:  .LBB12_11: # %start
@@ -695,7 +695,7 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV64IF-NEXT:    feq.s a0, ft0, ft0
 ; RV64IF-NEXT:    bnez a0, .LBB12_2
 ; RV64IF-NEXT:  # %bb.1: # %start
-; RV64IF-NEXT:    mv a0, zero
+; RV64IF-NEXT:    li a0, 0
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB12_2:
 ; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
@@ -719,8 +719,8 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixsfdi at plt
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    mv s1, zero
-; RV32I-NEXT:    mv s5, zero
+; RV32I-NEXT:    li s1, 0
+; RV32I-NEXT:    li s5, 0
 ; RV32I-NEXT:    bltz s3, .LBB12_2
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    mv s5, a0
@@ -730,7 +730,7 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s4
 ; RV32I-NEXT:    call __gtsf2 at plt
-; RV32I-NEXT:    addi s6, zero, -1
+; RV32I-NEXT:    li s6, -1
 ; RV32I-NEXT:    blt s1, a0, .LBB12_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    mv s6, s5
@@ -794,8 +794,8 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixsfdi at plt
-; RV64I-NEXT:    mv s1, zero
-; RV64I-NEXT:    addi s4, zero, -1
+; RV64I-NEXT:    li s1, 0
+; RV64I-NEXT:    li s4, -1
 ; RV64I-NEXT:    bltz s3, .LBB12_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    mv s2, a0
@@ -884,14 +884,14 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    mv a3, a0
 ; RV32IF-NEXT:    bnez s0, .LBB14_2
 ; RV32IF-NEXT:  # %bb.1: # %start
-; RV32IF-NEXT:    mv a3, zero
+; RV32IF-NEXT:    li a3, 0
 ; RV32IF-NEXT:  .LBB14_2: # %start
 ; RV32IF-NEXT:    lui a0, %hi(.LCPI14_0)
 ; RV32IF-NEXT:    flw ft0, %lo(.LCPI14_0)(a0)
 ; RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    flt.s a4, ft0, ft1
-; RV32IF-NEXT:    addi a2, zero, -1
-; RV32IF-NEXT:    addi a0, zero, -1
+; RV32IF-NEXT:    li a2, -1
+; RV32IF-NEXT:    li a0, -1
 ; RV32IF-NEXT:    beqz a4, .LBB14_7
 ; RV32IF-NEXT:  # %bb.3: # %start
 ; RV32IF-NEXT:    beqz s0, .LBB14_8
@@ -909,7 +909,7 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32IF-NEXT:    mv a0, a3
 ; RV32IF-NEXT:    bnez s0, .LBB14_4
 ; RV32IF-NEXT:  .LBB14_8: # %start
-; RV32IF-NEXT:    mv a1, zero
+; RV32IF-NEXT:    li a1, 0
 ; RV32IF-NEXT:    beqz a4, .LBB14_5
 ; RV32IF-NEXT:    j .LBB14_6
 ;
@@ -919,7 +919,7 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV64IF-NEXT:    feq.s a0, ft0, ft0
 ; RV64IF-NEXT:    bnez a0, .LBB14_2
 ; RV64IF-NEXT:  # %bb.1: # %start
-; RV64IF-NEXT:    mv a0, zero
+; RV64IF-NEXT:    li a0, 0
 ; RV64IF-NEXT:    ret
 ; RV64IF-NEXT:  .LBB14_2:
 ; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
@@ -936,13 +936,13 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __gesf2 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixunssfdi at plt
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    mv s5, zero
+; RV32I-NEXT:    li s5, 0
 ; RV32I-NEXT:    bltz s1, .LBB14_2
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    mv s5, a0
@@ -952,16 +952,16 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    call __gtsf2 at plt
-; RV32I-NEXT:    addi s3, zero, -1
-; RV32I-NEXT:    addi s4, zero, -1
+; RV32I-NEXT:    li s3, -1
+; RV32I-NEXT:    li s4, -1
 ; RV32I-NEXT:    bgtz a0, .LBB14_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    mv s4, s5
 ; RV32I-NEXT:  .LBB14_4: # %start
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __gesf2 at plt
-; RV32I-NEXT:    mv s5, zero
+; RV32I-NEXT:    li s5, 0
 ; RV32I-NEXT:    bltz a0, .LBB14_6
 ; RV32I-NEXT:  # %bb.5: # %start
 ; RV32I-NEXT:    mv s5, s2
@@ -993,12 +993,12 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __gesf2 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixunssfdi at plt
-; RV64I-NEXT:    mv s2, zero
+; RV64I-NEXT:    li s2, 0
 ; RV64I-NEXT:    bltz s1, .LBB14_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    mv s2, a0
@@ -1008,7 +1008,7 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    bgtz a1, .LBB14_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    mv a0, s2

diff  --git a/llvm/test/CodeGen/RISCV/float-fcmp.ll b/llvm/test/CodeGen/RISCV/float-fcmp.ll
index 272aee752317..af5e356269ba 100644
--- a/llvm/test/CodeGen/RISCV/float-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-fcmp.ll
@@ -11,22 +11,22 @@
 define i32 @fcmp_false(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_false:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    mv a0, zero
+; RV32IF-NEXT:    li a0, 0
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_false:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    mv a0, zero
+; RV64IF-NEXT:    li a0, 0
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_false:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_false:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    ret
   %1 = fcmp false float %a, %b
   %2 = zext i1 %1 to i32
@@ -131,7 +131,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gesf2 at plt
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    slt a0, a1, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -142,7 +142,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gesf2 at plt
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slt a0, a1, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -476,7 +476,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltsf2 at plt
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    slt a0, a1, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -487,7 +487,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltsf2 at plt
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slt a0, a1, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -668,22 +668,22 @@ define i32 @fcmp_uno(float %a, float %b) nounwind {
 define i32 @fcmp_true(float %a, float %b) nounwind {
 ; RV32IF-LABEL: fcmp_true:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi a0, zero, 1
+; RV32IF-NEXT:    li a0, 1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64IF-LABEL: fcmp_true:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    addi a0, zero, 1
+; RV64IF-NEXT:    li a0, 1
 ; RV64IF-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_true:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_true:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    ret
   %1 = fcmp true float %a, %b
   %2 = zext i1 %1 to i32

diff  --git a/llvm/test/CodeGen/RISCV/flt-rounds.ll b/llvm/test/CodeGen/RISCV/flt-rounds.ll
index cb6d166de4ff..eb40c6d32c2e 100644
--- a/llvm/test/CodeGen/RISCV/flt-rounds.ll
+++ b/llvm/test/CodeGen/RISCV/flt-rounds.ll
@@ -9,12 +9,12 @@ declare i32 @llvm.flt.rounds()
 define i32 @test_flt_rounds() nounwind {
 ; RV32I-LABEL: test_flt_rounds:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: test_flt_rounds:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    ret
   %1 = call i32 @llvm.flt.rounds()
   ret i32 %1

diff  --git a/llvm/test/CodeGen/RISCV/fp-imm.ll b/llvm/test/CodeGen/RISCV/fp-imm.ll
index 61e5f4f54ca0..55c413989bfb 100644
--- a/llvm/test/CodeGen/RISCV/fp-imm.ll
+++ b/llvm/test/CodeGen/RISCV/fp-imm.ll
@@ -61,8 +61,8 @@ define float @f32_negative_zero(float *%pf) nounwind {
 define double @f64_positive_zero(double *%pd) nounwind {
 ; RV32F-LABEL: f64_positive_zero:
 ; RV32F:       # %bb.0:
-; RV32F-NEXT:    mv a0, zero
-; RV32F-NEXT:    mv a1, zero
+; RV32F-NEXT:    li a0, 0
+; RV32F-NEXT:    li a1, 0
 ; RV32F-NEXT:    ret
 ;
 ; RV32D-LABEL: f64_positive_zero:
@@ -72,7 +72,7 @@ define double @f64_positive_zero(double *%pd) nounwind {
 ;
 ; RV64F-LABEL: f64_positive_zero:
 ; RV64F:       # %bb.0:
-; RV64F-NEXT:    mv a0, zero
+; RV64F-NEXT:    li a0, 0
 ; RV64F-NEXT:    ret
 ;
 ; RV64D-LABEL: f64_positive_zero:
@@ -86,7 +86,7 @@ define double @f64_negative_zero(double *%pd) nounwind {
 ; RV32F-LABEL: f64_negative_zero:
 ; RV32F:       # %bb.0:
 ; RV32F-NEXT:    lui a1, 524288
-; RV32F-NEXT:    mv a0, zero
+; RV32F-NEXT:    li a0, 0
 ; RV32F-NEXT:    ret
 ;
 ; RV32D-LABEL: f64_negative_zero:
@@ -97,7 +97,7 @@ define double @f64_negative_zero(double *%pd) nounwind {
 ;
 ; RV64F-LABEL: f64_negative_zero:
 ; RV64F:       # %bb.0:
-; RV64F-NEXT:    addi a0, zero, -1
+; RV64F-NEXT:    li a0, -1
 ; RV64F-NEXT:    slli a0, a0, 63
 ; RV64F-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/frame.ll b/llvm/test/CodeGen/RISCV/frame.ll
index 4ed4e118d8d1..3cde03b8d33e 100644
--- a/llvm/test/CodeGen/RISCV/frame.ll
+++ b/llvm/test/CodeGen/RISCV/frame.ll
@@ -18,7 +18,7 @@ define i32 @test() nounwind {
 ; RV32I-FPELIM-NEXT:    sw zero, 8(sp)
 ; RV32I-FPELIM-NEXT:    addi a0, sp, 12
 ; RV32I-FPELIM-NEXT:    call test1 at plt
-; RV32I-FPELIM-NEXT:    mv a0, zero
+; RV32I-FPELIM-NEXT:    li a0, 0
 ; RV32I-FPELIM-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-FPELIM-NEXT:    addi sp, sp, 32
 ; RV32I-FPELIM-NEXT:    ret
@@ -36,7 +36,7 @@ define i32 @test() nounwind {
 ; RV32I-WITHFP-NEXT:    sw zero, -32(s0)
 ; RV32I-WITHFP-NEXT:    addi a0, s0, -28
 ; RV32I-WITHFP-NEXT:    call test1 at plt
-; RV32I-WITHFP-NEXT:    mv a0, zero
+; RV32I-WITHFP-NEXT:    li a0, 0
 ; RV32I-WITHFP-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-WITHFP-NEXT:    addi sp, sp, 32

diff  --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll
index 07e994b11bfd..96dbe1dba0f5 100644
--- a/llvm/test/CodeGen/RISCV/half-arith.ll
+++ b/llvm/test/CodeGen/RISCV/half-arith.ll
@@ -1090,7 +1090,7 @@ define half @fmsub_s(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    addi s0, a0, -1
 ; RV32I-NEXT:    and a0, a2, s0
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV32I-NEXT:    and a0, a0, s0
@@ -1136,7 +1136,7 @@ define half @fmsub_s(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    addiw s0, a0, -1
 ; RV64I-NEXT:    and a0, a2, s0
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV64I-NEXT:    and a0, a0, s0
@@ -1204,13 +1204,13 @@ define half @fnmadd_s(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    addi s1, a1, -1
 ; RV32I-NEXT:    and a0, a0, s1
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    and a0, s3, s1
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV32I-NEXT:    mv s0, a0
@@ -1262,13 +1262,13 @@ define half @fnmadd_s(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    addiw s1, a1, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    and a0, s3, s1
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV64I-NEXT:    mv s0, a0
@@ -1344,13 +1344,13 @@ define half @fnmadd_s_2(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    addi s1, a0, -1
 ; RV32I-NEXT:    and a0, a1, s1
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    and a0, s3, s1
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV32I-NEXT:    mv s0, a0
@@ -1402,13 +1402,13 @@ define half @fnmadd_s_2(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    addiw s1, a0, -1
 ; RV64I-NEXT:    and a0, a1, s1
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    and a0, s3, s1
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV64I-NEXT:    mv s0, a0
@@ -1482,7 +1482,7 @@ define half @fnmsub_s(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    addi s0, a1, -1
 ; RV32I-NEXT:    and a0, a0, s0
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV32I-NEXT:    and a0, a0, s0
@@ -1527,7 +1527,7 @@ define half @fnmsub_s(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    addiw s0, a1, -1
 ; RV64I-NEXT:    and a0, a0, s0
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV64I-NEXT:    and a0, a0, s0
@@ -1592,7 +1592,7 @@ define half @fnmsub_s_2(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    addi s0, a0, -1
 ; RV32I-NEXT:    and a0, a1, s0
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV32I-NEXT:    and a0, a0, s0
@@ -1638,7 +1638,7 @@ define half @fnmsub_s_2(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    addiw s0, a0, -1
 ; RV64I-NEXT:    and a0, a1, s0
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV64I-NEXT:    and a0, a0, s0
@@ -1794,7 +1794,7 @@ define half @fmsub_s_contract(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    addi s0, a0, -1
 ; RV32I-NEXT:    and a0, a2, s0
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV32I-NEXT:    mv s2, a0
@@ -1838,7 +1838,7 @@ define half @fmsub_s_contract(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    addiw s0, a0, -1
 ; RV64I-NEXT:    and a0, a2, s0
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV64I-NEXT:    mv s2, a0
@@ -1907,19 +1907,19 @@ define half @fnmadd_s_contract(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    addi s1, a1, -1
 ; RV32I-NEXT:    and a0, a0, s1
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    and a0, s0, s1
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    and a0, s2, s1
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV32I-NEXT:    mv s2, a0
@@ -1970,19 +1970,19 @@ define half @fnmadd_s_contract(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    addiw s1, a1, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    and a0, s0, s1
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV64I-NEXT:    mv s4, a0
 ; RV64I-NEXT:    and a0, s2, s1
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV64I-NEXT:    mv s2, a0
@@ -2057,13 +2057,13 @@ define half @fnmsub_s_contract(half %a, half %b, half %c) nounwind {
 ; RV32I-NEXT:    addi s0, a1, -1
 ; RV32I-NEXT:    and a0, a0, s0
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    and a0, s1, s0
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __addsf3 at plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV32I-NEXT:    mv s1, a0
@@ -2108,13 +2108,13 @@ define half @fnmsub_s_contract(half %a, half %b, half %c) nounwind {
 ; RV64I-NEXT:    addiw s0, a1, -1
 ; RV64I-NEXT:    and a0, a0, s0
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    and a0, s1, s0
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __addsf3 at plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee at plt
 ; RV64I-NEXT:    mv s1, a0

diff  --git a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
index d7d03578ea81..b949054b934a 100644
--- a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
@@ -11,7 +11,7 @@ declare half @dummy(half)
 define void @br_fcmp_false(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_false:
 ; RV32IZFH:       # %bb.0:
-; RV32IZFH-NEXT:    addi a0, zero, 1
+; RV32IZFH-NEXT:    li a0, 1
 ; RV32IZFH-NEXT:    bnez a0, .LBB0_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.then
 ; RV32IZFH-NEXT:    ret
@@ -22,7 +22,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
 ;
 ; RV64IZFH-LABEL: br_fcmp_false:
 ; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    addi a0, zero, 1
+; RV64IZFH-NEXT:    li a0, 1
 ; RV64IZFH-NEXT:    bnez a0, .LBB0_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.then
 ; RV64IZFH-NEXT:    ret
@@ -527,7 +527,7 @@ if.then:
 define void @br_fcmp_true(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: br_fcmp_true:
 ; RV32IZFH:       # %bb.0:
-; RV32IZFH-NEXT:    addi a0, zero, 1
+; RV32IZFH-NEXT:    li a0, 1
 ; RV32IZFH-NEXT:    bnez a0, .LBB16_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
 ; RV32IZFH-NEXT:    ret
@@ -538,7 +538,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
 ;
 ; RV64IZFH-LABEL: br_fcmp_true:
 ; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    addi a0, zero, 1
+; RV64IZFH-NEXT:    li a0, 1
 ; RV64IZFH-NEXT:    bnez a0, .LBB16_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
 ; RV64IZFH-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index 7874109d4c60..5c7cd114cff1 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -69,7 +69,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:    feq.s a0, ft0, ft0
 ; RV32IZFH-NEXT:    bnez a0, .LBB1_2
 ; RV32IZFH-NEXT:  # %bb.1: # %start
-; RV32IZFH-NEXT:    mv a0, zero
+; RV32IZFH-NEXT:    li a0, 0
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB1_2:
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI1_0)
@@ -87,7 +87,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV64IZFH-NEXT:    feq.s a0, ft0, ft0
 ; RV64IZFH-NEXT:    bnez a0, .LBB1_2
 ; RV64IZFH-NEXT:  # %bb.1: # %start
-; RV64IZFH-NEXT:    mv a0, zero
+; RV64IZFH-NEXT:    li a0, 0
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB1_2:
 ; RV64IZFH-NEXT:    lui a0, %hi(.LCPI1_0)
@@ -105,7 +105,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:    feq.s a0, ft0, ft0
 ; RV32IDZFH-NEXT:    bnez a0, .LBB1_2
 ; RV32IDZFH-NEXT:  # %bb.1: # %start
-; RV32IDZFH-NEXT:    mv a0, zero
+; RV32IDZFH-NEXT:    li a0, 0
 ; RV32IDZFH-NEXT:    ret
 ; RV32IDZFH-NEXT:  .LBB1_2:
 ; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI1_0)
@@ -123,7 +123,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV64IDZFH-NEXT:    feq.s a0, ft0, ft0
 ; RV64IDZFH-NEXT:    bnez a0, .LBB1_2
 ; RV64IDZFH-NEXT:  # %bb.1: # %start
-; RV64IDZFH-NEXT:    mv a0, zero
+; RV64IDZFH-NEXT:    li a0, 0
 ; RV64IDZFH-NEXT:    ret
 ; RV64IDZFH-NEXT:  .LBB1_2:
 ; RV64IDZFH-NEXT:    lui a0, %hi(.LCPI1_0)
@@ -153,7 +153,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixsfsi at plt
-; RV32I-NEXT:    mv s1, zero
+; RV32I-NEXT:    li s1, 0
 ; RV32I-NEXT:    lui s3, 1048568
 ; RV32I-NEXT:    bltz s2, .LBB1_2
 ; RV32I-NEXT:  # %bb.1: # %start
@@ -202,7 +202,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixsfdi at plt
-; RV64I-NEXT:    mv s1, zero
+; RV64I-NEXT:    li s1, 0
 ; RV64I-NEXT:    lui s3, 1048568
 ; RV64I-NEXT:    bltz s2, .LBB1_2
 ; RV64I-NEXT:  # %bb.1: # %start
@@ -295,7 +295,7 @@ define i32 @fcvt_ui_h_multiple_use(half %x, i32* %y) {
 ; RV32IZFH-LABEL: fcvt_ui_h_multiple_use:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    fcvt.wu.h a1, fa0, rtz
-; RV32IZFH-NEXT:    addi a0, zero, 1
+; RV32IZFH-NEXT:    li a0, 1
 ; RV32IZFH-NEXT:    beqz a1, .LBB3_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    mv a0, a1
@@ -305,7 +305,7 @@ define i32 @fcvt_ui_h_multiple_use(half %x, i32* %y) {
 ; RV64IZFH-LABEL: fcvt_ui_h_multiple_use:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fcvt.wu.h a1, fa0, rtz
-; RV64IZFH-NEXT:    addi a0, zero, 1
+; RV64IZFH-NEXT:    li a0, 1
 ; RV64IZFH-NEXT:    beqz a1, .LBB3_2
 ; RV64IZFH-NEXT:  # %bb.1:
 ; RV64IZFH-NEXT:    mv a0, a1
@@ -315,7 +315,7 @@ define i32 @fcvt_ui_h_multiple_use(half %x, i32* %y) {
 ; RV32IDZFH-LABEL: fcvt_ui_h_multiple_use:
 ; RV32IDZFH:       # %bb.0:
 ; RV32IDZFH-NEXT:    fcvt.wu.h a1, fa0, rtz
-; RV32IDZFH-NEXT:    addi a0, zero, 1
+; RV32IDZFH-NEXT:    li a0, 1
 ; RV32IDZFH-NEXT:    beqz a1, .LBB3_2
 ; RV32IDZFH-NEXT:  # %bb.1:
 ; RV32IDZFH-NEXT:    mv a0, a1
@@ -325,7 +325,7 @@ define i32 @fcvt_ui_h_multiple_use(half %x, i32* %y) {
 ; RV64IDZFH-LABEL: fcvt_ui_h_multiple_use:
 ; RV64IDZFH:       # %bb.0:
 ; RV64IDZFH-NEXT:    fcvt.wu.h a1, fa0, rtz
-; RV64IDZFH-NEXT:    addi a0, zero, 1
+; RV64IDZFH-NEXT:    li a0, 1
 ; RV64IDZFH-NEXT:    beqz a1, .LBB3_2
 ; RV64IDZFH-NEXT:  # %bb.1:
 ; RV64IDZFH-NEXT:    mv a0, a1
@@ -344,7 +344,7 @@ define i32 @fcvt_ui_h_multiple_use(half %x, i32* %y) {
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32I-NEXT:    call __fixunssfsi at plt
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    beqz a1, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
@@ -365,7 +365,7 @@ define i32 @fcvt_ui_h_multiple_use(half %x, i32* %y) {
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64I-NEXT:    call __fixunssfdi at plt
 ; RV64I-NEXT:    sext.w a2, a0
-; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    beqz a2, .LBB3_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a1, a0
@@ -438,12 +438,12 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, s2
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __gesf2 at plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __fixunssfsi at plt
-; RV32I-NEXT:    mv s3, zero
+; RV32I-NEXT:    li s3, 0
 ; RV32I-NEXT:    bltz s0, .LBB4_2
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    mv s3, a0
@@ -478,12 +478,12 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, s2
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __gesf2 at plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __fixunssfdi at plt
-; RV64I-NEXT:    mv s3, zero
+; RV64I-NEXT:    li s3, 0
 ; RV64I-NEXT:    bltz s0, .LBB4_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    mv s3, a0
@@ -566,7 +566,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:    feq.h a0, fa0, fa0
 ; RV32IZFH-NEXT:    bnez a0, .LBB6_2
 ; RV32IZFH-NEXT:  # %bb.1: # %start
-; RV32IZFH-NEXT:    mv a0, zero
+; RV32IZFH-NEXT:    li a0, 0
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB6_2:
 ; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
@@ -577,7 +577,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV64IZFH-NEXT:    feq.h a0, fa0, fa0
 ; RV64IZFH-NEXT:    bnez a0, .LBB6_2
 ; RV64IZFH-NEXT:  # %bb.1: # %start
-; RV64IZFH-NEXT:    mv a0, zero
+; RV64IZFH-NEXT:    li a0, 0
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB6_2:
 ; RV64IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
@@ -588,7 +588,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:    feq.h a0, fa0, fa0
 ; RV32IDZFH-NEXT:    bnez a0, .LBB6_2
 ; RV32IDZFH-NEXT:  # %bb.1: # %start
-; RV32IDZFH-NEXT:    mv a0, zero
+; RV32IDZFH-NEXT:    li a0, 0
 ; RV32IDZFH-NEXT:    ret
 ; RV32IDZFH-NEXT:  .LBB6_2:
 ; RV32IDZFH-NEXT:    fcvt.w.h a0, fa0, rtz
@@ -599,7 +599,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV64IDZFH-NEXT:    feq.h a0, fa0, fa0
 ; RV64IDZFH-NEXT:    bnez a0, .LBB6_2
 ; RV64IDZFH-NEXT:  # %bb.1: # %start
-; RV64IDZFH-NEXT:    mv a0, zero
+; RV64IDZFH-NEXT:    li a0, 0
 ; RV64IDZFH-NEXT:    ret
 ; RV64IDZFH-NEXT:  .LBB6_2:
 ; RV64IDZFH-NEXT:    fcvt.w.h a0, fa0, rtz
@@ -624,7 +624,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    mv s2, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixsfsi at plt
-; RV32I-NEXT:    mv s1, zero
+; RV32I-NEXT:    li s1, 0
 ; RV32I-NEXT:    lui s4, 524288
 ; RV32I-NEXT:    lui s3, 524288
 ; RV32I-NEXT:    bltz s2, .LBB6_2
@@ -675,7 +675,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixsfdi at plt
-; RV64I-NEXT:    mv s1, zero
+; RV64I-NEXT:    li s1, 0
 ; RV64I-NEXT:    lui s4, 524288
 ; RV64I-NEXT:    lui s3, 524288
 ; RV64I-NEXT:    bltz s2, .LBB6_2
@@ -768,7 +768,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:    feq.h a0, fa0, fa0
 ; RV32IZFH-NEXT:    bnez a0, .LBB8_2
 ; RV32IZFH-NEXT:  # %bb.1: # %start
-; RV32IZFH-NEXT:    mv a0, zero
+; RV32IZFH-NEXT:    li a0, 0
 ; RV32IZFH-NEXT:    ret
 ; RV32IZFH-NEXT:  .LBB8_2:
 ; RV32IZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
@@ -779,7 +779,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV64IZFH-NEXT:    feq.h a0, fa0, fa0
 ; RV64IZFH-NEXT:    bnez a0, .LBB8_2
 ; RV64IZFH-NEXT:  # %bb.1: # %start
-; RV64IZFH-NEXT:    mv a0, zero
+; RV64IZFH-NEXT:    li a0, 0
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB8_2:
 ; RV64IZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
@@ -790,7 +790,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:    feq.h a0, fa0, fa0
 ; RV32IDZFH-NEXT:    bnez a0, .LBB8_2
 ; RV32IDZFH-NEXT:  # %bb.1: # %start
-; RV32IDZFH-NEXT:    mv a0, zero
+; RV32IDZFH-NEXT:    li a0, 0
 ; RV32IDZFH-NEXT:    ret
 ; RV32IDZFH-NEXT:  .LBB8_2:
 ; RV32IDZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
@@ -801,7 +801,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV64IDZFH-NEXT:    feq.h a0, fa0, fa0
 ; RV64IDZFH-NEXT:    bnez a0, .LBB8_2
 ; RV64IDZFH-NEXT:  # %bb.1: # %start
-; RV64IDZFH-NEXT:    mv a0, zero
+; RV64IDZFH-NEXT:    li a0, 0
 ; RV64IDZFH-NEXT:    ret
 ; RV64IDZFH-NEXT:  .LBB8_2:
 ; RV64IDZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
@@ -819,12 +819,12 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __gesf2 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixunssfsi at plt
-; RV32I-NEXT:    mv s2, zero
+; RV32I-NEXT:    li s2, 0
 ; RV32I-NEXT:    bltz s1, .LBB8_2
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    mv s2, a0
@@ -834,7 +834,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __gtsf2 at plt
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    addi a0, zero, -1
+; RV32I-NEXT:    li a0, -1
 ; RV32I-NEXT:    bgtz a1, .LBB8_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    mv a0, s2
@@ -858,12 +858,12 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __gesf2 at plt
 ; RV64I-NEXT:    mv s2, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixunssfdi at plt
-; RV64I-NEXT:    mv s1, zero
+; RV64I-NEXT:    li s1, 0
 ; RV64I-NEXT:    bltz s2, .LBB8_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    mv s1, a0
@@ -874,7 +874,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    blez a0, .LBB8_4
 ; RV64I-NEXT:  # %bb.3:
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    srli s1, a0, 32
 ; RV64I-NEXT:  .LBB8_4: # %start
 ; RV64I-NEXT:    mv a0, s1
@@ -964,12 +964,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:    mv a2, a0
 ; RV32IZFH-NEXT:    bnez s0, .LBB10_2
 ; RV32IZFH-NEXT:  # %bb.1: # %start
-; RV32IZFH-NEXT:    mv a2, zero
+; RV32IZFH-NEXT:    li a2, 0
 ; RV32IZFH-NEXT:  .LBB10_2: # %start
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI10_1)
 ; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI10_1)(a0)
 ; RV32IZFH-NEXT:    flt.s a3, ft0, fs0
-; RV32IZFH-NEXT:    addi a0, zero, -1
+; RV32IZFH-NEXT:    li a0, -1
 ; RV32IZFH-NEXT:    beqz a3, .LBB10_9
 ; RV32IZFH-NEXT:  # %bb.3: # %start
 ; RV32IZFH-NEXT:    feq.s a2, fs0, fs0
@@ -982,7 +982,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:  .LBB10_6: # %start
 ; RV32IZFH-NEXT:    bnez a2, .LBB10_8
 ; RV32IZFH-NEXT:  .LBB10_7: # %start
-; RV32IZFH-NEXT:    mv a1, zero
+; RV32IZFH-NEXT:    li a1, 0
 ; RV32IZFH-NEXT:  .LBB10_8: # %start
 ; RV32IZFH-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -994,7 +994,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:    feq.s a2, fs0, fs0
 ; RV32IZFH-NEXT:    bnez a2, .LBB10_4
 ; RV32IZFH-NEXT:  .LBB10_10: # %start
-; RV32IZFH-NEXT:    mv a0, zero
+; RV32IZFH-NEXT:    li a0, 0
 ; RV32IZFH-NEXT:    lui a4, 524288
 ; RV32IZFH-NEXT:    bnez s0, .LBB10_5
 ; RV32IZFH-NEXT:  .LBB10_11: # %start
@@ -1010,7 +1010,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV64IZFH-NEXT:    feq.h a0, fa0, fa0
 ; RV64IZFH-NEXT:    bnez a0, .LBB10_2
 ; RV64IZFH-NEXT:  # %bb.1: # %start
-; RV64IZFH-NEXT:    mv a0, zero
+; RV64IZFH-NEXT:    li a0, 0
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB10_2:
 ; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rtz
@@ -1031,12 +1031,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:    mv a2, a0
 ; RV32IDZFH-NEXT:    bnez s0, .LBB10_2
 ; RV32IDZFH-NEXT:  # %bb.1: # %start
-; RV32IDZFH-NEXT:    mv a2, zero
+; RV32IDZFH-NEXT:    li a2, 0
 ; RV32IDZFH-NEXT:  .LBB10_2: # %start
 ; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI10_1)
 ; RV32IDZFH-NEXT:    flw ft0, %lo(.LCPI10_1)(a0)
 ; RV32IDZFH-NEXT:    flt.s a3, ft0, fs0
-; RV32IDZFH-NEXT:    addi a0, zero, -1
+; RV32IDZFH-NEXT:    li a0, -1
 ; RV32IDZFH-NEXT:    beqz a3, .LBB10_9
 ; RV32IDZFH-NEXT:  # %bb.3: # %start
 ; RV32IDZFH-NEXT:    feq.s a2, fs0, fs0
@@ -1049,7 +1049,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:  .LBB10_6: # %start
 ; RV32IDZFH-NEXT:    bnez a2, .LBB10_8
 ; RV32IDZFH-NEXT:  .LBB10_7: # %start
-; RV32IDZFH-NEXT:    mv a1, zero
+; RV32IDZFH-NEXT:    li a1, 0
 ; RV32IDZFH-NEXT:  .LBB10_8: # %start
 ; RV32IDZFH-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
 ; RV32IDZFH-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
@@ -1061,7 +1061,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:    feq.s a2, fs0, fs0
 ; RV32IDZFH-NEXT:    bnez a2, .LBB10_4
 ; RV32IDZFH-NEXT:  .LBB10_10: # %start
-; RV32IDZFH-NEXT:    mv a0, zero
+; RV32IDZFH-NEXT:    li a0, 0
 ; RV32IDZFH-NEXT:    lui a4, 524288
 ; RV32IDZFH-NEXT:    bnez s0, .LBB10_5
 ; RV32IDZFH-NEXT:  .LBB10_11: # %start
@@ -1077,7 +1077,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV64IDZFH-NEXT:    feq.h a0, fa0, fa0
 ; RV64IDZFH-NEXT:    bnez a0, .LBB10_2
 ; RV64IDZFH-NEXT:  # %bb.1: # %start
-; RV64IDZFH-NEXT:    mv a0, zero
+; RV64IDZFH-NEXT:    li a0, 0
 ; RV64IDZFH-NEXT:    ret
 ; RV64IDZFH-NEXT:  .LBB10_2:
 ; RV64IDZFH-NEXT:    fcvt.l.h a0, fa0, rtz
@@ -1105,8 +1105,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixsfdi at plt
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    mv s1, zero
-; RV32I-NEXT:    mv s5, zero
+; RV32I-NEXT:    li s1, 0
+; RV32I-NEXT:    li s5, 0
 ; RV32I-NEXT:    bltz s3, .LBB10_2
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    mv s5, a0
@@ -1116,7 +1116,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s4
 ; RV32I-NEXT:    call __gtsf2 at plt
-; RV32I-NEXT:    addi s6, zero, -1
+; RV32I-NEXT:    li s6, -1
 ; RV32I-NEXT:    blt s1, a0, .LBB10_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    mv s6, s5
@@ -1184,8 +1184,8 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    mv s3, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixsfdi at plt
-; RV64I-NEXT:    mv s1, zero
-; RV64I-NEXT:    addi s4, zero, -1
+; RV64I-NEXT:    li s1, 0
+; RV64I-NEXT:    li s4, -1
 ; RV64I-NEXT:    bltz s3, .LBB10_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    mv s2, a0
@@ -1296,13 +1296,13 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:    mv a3, a0
 ; RV32IZFH-NEXT:    bnez s0, .LBB12_2
 ; RV32IZFH-NEXT:  # %bb.1: # %start
-; RV32IZFH-NEXT:    mv a3, zero
+; RV32IZFH-NEXT:    li a3, 0
 ; RV32IZFH-NEXT:  .LBB12_2: # %start
 ; RV32IZFH-NEXT:    lui a0, %hi(.LCPI12_0)
 ; RV32IZFH-NEXT:    flw ft0, %lo(.LCPI12_0)(a0)
 ; RV32IZFH-NEXT:    flt.s a4, ft0, fs0
-; RV32IZFH-NEXT:    addi a2, zero, -1
-; RV32IZFH-NEXT:    addi a0, zero, -1
+; RV32IZFH-NEXT:    li a2, -1
+; RV32IZFH-NEXT:    li a0, -1
 ; RV32IZFH-NEXT:    beqz a4, .LBB12_7
 ; RV32IZFH-NEXT:  # %bb.3: # %start
 ; RV32IZFH-NEXT:    beqz s0, .LBB12_8
@@ -1321,7 +1321,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32IZFH-NEXT:    mv a0, a3
 ; RV32IZFH-NEXT:    bnez s0, .LBB12_4
 ; RV32IZFH-NEXT:  .LBB12_8: # %start
-; RV32IZFH-NEXT:    mv a1, zero
+; RV32IZFH-NEXT:    li a1, 0
 ; RV32IZFH-NEXT:    beqz a4, .LBB12_5
 ; RV32IZFH-NEXT:    j .LBB12_6
 ;
@@ -1330,7 +1330,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV64IZFH-NEXT:    feq.h a0, fa0, fa0
 ; RV64IZFH-NEXT:    bnez a0, .LBB12_2
 ; RV64IZFH-NEXT:  # %bb.1: # %start
-; RV64IZFH-NEXT:    mv a0, zero
+; RV64IZFH-NEXT:    li a0, 0
 ; RV64IZFH-NEXT:    ret
 ; RV64IZFH-NEXT:  .LBB12_2:
 ; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rtz
@@ -1350,13 +1350,13 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:    mv a3, a0
 ; RV32IDZFH-NEXT:    bnez s0, .LBB12_2
 ; RV32IDZFH-NEXT:  # %bb.1: # %start
-; RV32IDZFH-NEXT:    mv a3, zero
+; RV32IDZFH-NEXT:    li a3, 0
 ; RV32IDZFH-NEXT:  .LBB12_2: # %start
 ; RV32IDZFH-NEXT:    lui a0, %hi(.LCPI12_0)
 ; RV32IDZFH-NEXT:    flw ft0, %lo(.LCPI12_0)(a0)
 ; RV32IDZFH-NEXT:    flt.s a4, ft0, fs0
-; RV32IDZFH-NEXT:    addi a2, zero, -1
-; RV32IDZFH-NEXT:    addi a0, zero, -1
+; RV32IDZFH-NEXT:    li a2, -1
+; RV32IDZFH-NEXT:    li a0, -1
 ; RV32IDZFH-NEXT:    beqz a4, .LBB12_7
 ; RV32IDZFH-NEXT:  # %bb.3: # %start
 ; RV32IDZFH-NEXT:    beqz s0, .LBB12_8
@@ -1375,7 +1375,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32IDZFH-NEXT:    mv a0, a3
 ; RV32IDZFH-NEXT:    bnez s0, .LBB12_4
 ; RV32IDZFH-NEXT:  .LBB12_8: # %start
-; RV32IDZFH-NEXT:    mv a1, zero
+; RV32IDZFH-NEXT:    li a1, 0
 ; RV32IDZFH-NEXT:    beqz a4, .LBB12_5
 ; RV32IDZFH-NEXT:    j .LBB12_6
 ;
@@ -1384,7 +1384,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV64IDZFH-NEXT:    feq.h a0, fa0, fa0
 ; RV64IDZFH-NEXT:    bnez a0, .LBB12_2
 ; RV64IDZFH-NEXT:  # %bb.1: # %start
-; RV64IDZFH-NEXT:    mv a0, zero
+; RV64IDZFH-NEXT:    li a0, 0
 ; RV64IDZFH-NEXT:    ret
 ; RV64IDZFH-NEXT:  .LBB12_2:
 ; RV64IDZFH-NEXT:    fcvt.lu.h a0, fa0, rtz
@@ -1405,13 +1405,13 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __gesf2 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixunssfdi at plt
 ; RV32I-NEXT:    mv s2, a1
-; RV32I-NEXT:    mv s5, zero
+; RV32I-NEXT:    li s5, 0
 ; RV32I-NEXT:    bltz s1, .LBB12_2
 ; RV32I-NEXT:  # %bb.1: # %start
 ; RV32I-NEXT:    mv s5, a0
@@ -1421,16 +1421,16 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    call __gtsf2 at plt
-; RV32I-NEXT:    addi s3, zero, -1
-; RV32I-NEXT:    addi s4, zero, -1
+; RV32I-NEXT:    li s3, -1
+; RV32I-NEXT:    li s4, -1
 ; RV32I-NEXT:    bgtz a0, .LBB12_4
 ; RV32I-NEXT:  # %bb.3: # %start
 ; RV32I-NEXT:    mv s4, s5
 ; RV32I-NEXT:  .LBB12_4: # %start
 ; RV32I-NEXT:    mv a0, s0
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __gesf2 at plt
-; RV32I-NEXT:    mv s5, zero
+; RV32I-NEXT:    li s5, 0
 ; RV32I-NEXT:    bltz a0, .LBB12_6
 ; RV32I-NEXT:  # %bb.5: # %start
 ; RV32I-NEXT:    mv s5, s2
@@ -1466,12 +1466,12 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    call __gnu_h2f_ieee at plt
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __gesf2 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixunssfdi at plt
-; RV64I-NEXT:    mv s2, zero
+; RV64I-NEXT:    li s2, 0
 ; RV64I-NEXT:    bltz s1, .LBB12_2
 ; RV64I-NEXT:  # %bb.1: # %start
 ; RV64I-NEXT:    mv s2, a0
@@ -1481,7 +1481,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __gtsf2 at plt
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    bgtz a1, .LBB12_4
 ; RV64I-NEXT:  # %bb.3: # %start
 ; RV64I-NEXT:    mv a0, s2

diff  --git a/llvm/test/CodeGen/RISCV/half-fcmp.ll b/llvm/test/CodeGen/RISCV/half-fcmp.ll
index 8fb5c06cd258..c2f3f472ff96 100644
--- a/llvm/test/CodeGen/RISCV/half-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-fcmp.ll
@@ -11,22 +11,22 @@
 define i32 @fcmp_false(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: fcmp_false:
 ; RV32IZFH:       # %bb.0:
-; RV32IZFH-NEXT:    mv a0, zero
+; RV32IZFH-NEXT:    li a0, 0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fcmp_false:
 ; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    mv a0, zero
+; RV64IZFH-NEXT:    li a0, 0
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_false:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_false:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    ret
   %1 = fcmp false half %a, %b
   %2 = zext i1 %1 to i32
@@ -502,22 +502,22 @@ define i32 @fcmp_uno(half %a, half %b) nounwind {
 define i32 @fcmp_true(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: fcmp_true:
 ; RV32IZFH:       # %bb.0:
-; RV32IZFH-NEXT:    addi a0, zero, 1
+; RV32IZFH-NEXT:    li a0, 1
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fcmp_true:
 ; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    addi a0, zero, 1
+; RV64IZFH-NEXT:    li a0, 1
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV32I-LABEL: fcmp_true:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fcmp_true:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    ret
   %1 = fcmp true half %a, %b
   %2 = zext i1 %1 to i32

diff  --git a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
index 6521ea26d705..85df696eb6a5 100644
--- a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
+++ b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
@@ -13,9 +13,9 @@ define dso_local void @multiple_stores() local_unnamed_addr nounwind {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lui a0, %hi(s)
 ; CHECK-NEXT:    addi a0, a0, %lo(s)
-; CHECK-NEXT:    addi a1, zero, 10
+; CHECK-NEXT:    li a1, 10
 ; CHECK-NEXT:    sw a1, 160(a0)
-; CHECK-NEXT:    addi a1, zero, 20
+; CHECK-NEXT:    li a1, 20
 ; CHECK-NEXT:    sw a1, 164(a0)
 ; CHECK-NEXT:    ret
 entry:
@@ -32,7 +32,7 @@ define dso_local void @control_flow_with_mem_access() local_unnamed_addr nounwin
 ; CHECK-NEXT:    lw a1, 164(a0)
 ; CHECK-NEXT:    blez a1, .LBB1_2
 ; CHECK-NEXT:  # %bb.1: # %if.then
-; CHECK-NEXT:    addi a1, zero, 10
+; CHECK-NEXT:    li a1, 10
 ; CHECK-NEXT:    sw a1, 160(a0)
 ; CHECK-NEXT:  .LBB1_2: # %if.end
 ; CHECK-NEXT:    ret
@@ -149,10 +149,10 @@ define dso_local i32 @load_half() nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    lui a0, %hi(foo+8)
 ; RV32-NEXT:    lhu a0, %lo(foo+8)(a0)
-; RV32-NEXT:    addi a1, zero, 140
+; RV32-NEXT:    li a1, 140
 ; RV32-NEXT:    bne a0, a1, .LBB7_2
 ; RV32-NEXT:  # %bb.1: # %if.end
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -165,10 +165,10 @@ define dso_local i32 @load_half() nounwind {
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    lui a0, %hi(foo+8)
 ; RV64-NEXT:    lhu a0, %lo(foo+8)(a0)
-; RV64-NEXT:    addi a1, zero, 140
+; RV64-NEXT:    li a1, 140
 ; RV64-NEXT:    bne a0, a1, .LBB7_2
 ; RV64-NEXT:  # %bb.1: # %if.end
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -193,7 +193,7 @@ define dso_local void @one_store() local_unnamed_addr nounwind {
 ; CHECK-LABEL: one_store:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lui a0, %hi(s+160)
-; CHECK-NEXT:    addi a1, zero, 10
+; CHECK-NEXT:    li a1, 10
 ; CHECK-NEXT:    sw a1, %lo(s+160)(a0)
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/i32-icmp.ll b/llvm/test/CodeGen/RISCV/i32-icmp.ll
index 34557696cc57..6a11d24902d6 100644
--- a/llvm/test/CodeGen/RISCV/i32-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/i32-icmp.ll
@@ -41,7 +41,7 @@ define i32 @icmp_eq_constant_2048(i32 %a) nounwind {
 define i32 @icmp_eq_constant_neg_2048(i32 %a) nounwind {
 ; RV32I-LABEL: icmp_eq_constant_neg_2048:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, -2048
+; RV32I-NEXT:    li a1, -2048
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    seqz a0, a0
 ; RV32I-NEXT:    ret
@@ -107,7 +107,7 @@ define i32 @icmp_ne_constant_2048(i32 %a) nounwind {
 define i32 @icmp_ne_constant_neg_2048(i32 %a) nounwind {
 ; RV32I-LABEL: icmp_ne_constant_neg_2048:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, -2048
+; RV32I-NEXT:    li a1, -2048
 ; RV32I-NEXT:    xor a0, a0, a1
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/imm.ll b/llvm/test/CodeGen/RISCV/imm.ll
index b39050291494..b7d7df6ced1f 100644
--- a/llvm/test/CodeGen/RISCV/imm.ll
+++ b/llvm/test/CodeGen/RISCV/imm.ll
@@ -17,22 +17,22 @@
 define signext i32 @zero() nounwind {
 ; RV32I-LABEL: zero:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: zero:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: zero:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    mv a0, zero
+; RV64IZBA-NEXT:    li a0, 0
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: zero:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    mv a0, zero
+; RV64IZBS-NEXT:    li a0, 0
 ; RV64IZBS-NEXT:    ret
   ret i32 0
 }
@@ -40,22 +40,22 @@ define signext i32 @zero() nounwind {
 define signext i32 @pos_small() nounwind {
 ; RV32I-LABEL: pos_small:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 2047
+; RV32I-NEXT:    li a0, 2047
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: pos_small:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 2047
+; RV64I-NEXT:    li a0, 2047
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: pos_small:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, 2047
+; RV64IZBA-NEXT:    li a0, 2047
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: pos_small:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a0, zero, 2047
+; RV64IZBS-NEXT:    li a0, 2047
 ; RV64IZBS-NEXT:    ret
   ret i32 2047
 }
@@ -63,22 +63,22 @@ define signext i32 @pos_small() nounwind {
 define signext i32 @neg_small() nounwind {
 ; RV32I-LABEL: neg_small:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, -2048
+; RV32I-NEXT:    li a0, -2048
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: neg_small:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, -2048
+; RV64I-NEXT:    li a0, -2048
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: neg_small:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, -2048
+; RV64IZBA-NEXT:    li a0, -2048
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: neg_small:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a0, zero, -2048
+; RV64IZBS-NEXT:    li a0, -2048
 ; RV64IZBS-NEXT:    ret
   ret i32 -2048
 }
@@ -274,24 +274,24 @@ define i64 @imm64_1() nounwind {
 ; RV32I-LABEL: imm64_1:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm64_1:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    slli a0, a0, 31
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: imm64_1:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, 1
+; RV64IZBA-NEXT:    li a0, 1
 ; RV64IZBA-NEXT:    slli a0, a0, 31
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: imm64_1:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a0, zero, 1
+; RV64IZBS-NEXT:    li a0, 1
 ; RV64IZBS-NEXT:    slli a0, a0, 31
 ; RV64IZBS-NEXT:    ret
   ret i64 2147483648 ; 0x8000_0000
@@ -300,25 +300,25 @@ define i64 @imm64_1() nounwind {
 define i64 @imm64_2() nounwind {
 ; RV32I-LABEL: imm64_2:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, -1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a0, -1
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm64_2:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: imm64_2:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, -1
+; RV64IZBA-NEXT:    li a0, -1
 ; RV64IZBA-NEXT:    srli a0, a0, 32
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: imm64_2:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a0, zero, -1
+; RV64IZBS-NEXT:    li a0, -1
 ; RV64IZBS-NEXT:    srli a0, a0, 32
 ; RV64IZBS-NEXT:    ret
   ret i64 4294967295 ; 0xFFFF_FFFF
@@ -327,25 +327,25 @@ define i64 @imm64_2() nounwind {
 define i64 @imm64_3() nounwind {
 ; RV32I-LABEL: imm64_3:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 1
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a1, 1
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm64_3:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: imm64_3:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, 1
+; RV64IZBA-NEXT:    li a0, 1
 ; RV64IZBA-NEXT:    slli a0, a0, 32
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: imm64_3:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a0, zero, 1
+; RV64IZBS-NEXT:    li a0, 1
 ; RV64IZBS-NEXT:    slli a0, a0, 32
 ; RV64IZBS-NEXT:    ret
   ret i64 4294967296 ; 0x1_0000_0000
@@ -355,24 +355,24 @@ define i64 @imm64_4() nounwind {
 ; RV32I-LABEL: imm64_4:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm64_4:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    slli a0, a0, 63
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: imm64_4:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, -1
+; RV64IZBA-NEXT:    li a0, -1
 ; RV64IZBA-NEXT:    slli a0, a0, 63
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: imm64_4:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a0, zero, -1
+; RV64IZBS-NEXT:    li a0, -1
 ; RV64IZBS-NEXT:    slli a0, a0, 63
 ; RV64IZBS-NEXT:    ret
   ret i64 9223372036854775808 ; 0x8000_0000_0000_0000
@@ -382,24 +382,24 @@ define i64 @imm64_5() nounwind {
 ; RV32I-LABEL: imm64_5:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm64_5:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    slli a0, a0, 63
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: imm64_5:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, -1
+; RV64IZBA-NEXT:    li a0, -1
 ; RV64IZBA-NEXT:    slli a0, a0, 63
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: imm64_5:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a0, zero, -1
+; RV64IZBS-NEXT:    li a0, -1
 ; RV64IZBS-NEXT:    slli a0, a0, 63
 ; RV64IZBS-NEXT:    ret
   ret i64 -9223372036854775808 ; 0x8000_0000_0000_0000
@@ -410,7 +410,7 @@ define i64 @imm64_6() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 74565
 ; RV32I-NEXT:    addi a1, a0, 1656
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm64_6:
@@ -446,7 +446,7 @@ define i64 @imm64_7() nounwind {
 ;
 ; RV64I-LABEL: imm64_7:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 7
+; RV64I-NEXT:    li a0, 7
 ; RV64I-NEXT:    slli a0, a0, 36
 ; RV64I-NEXT:    addi a0, a0, 11
 ; RV64I-NEXT:    slli a0, a0, 24
@@ -455,7 +455,7 @@ define i64 @imm64_7() nounwind {
 ;
 ; RV64IZBA-LABEL: imm64_7:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, 7
+; RV64IZBA-NEXT:    li a0, 7
 ; RV64IZBA-NEXT:    slli a0, a0, 36
 ; RV64IZBA-NEXT:    addi a0, a0, 11
 ; RV64IZBA-NEXT:    slli a0, a0, 24
@@ -464,7 +464,7 @@ define i64 @imm64_7() nounwind {
 ;
 ; RV64IZBS-LABEL: imm64_7:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a0, zero, 7
+; RV64IZBS-NEXT:    li a0, 7
 ; RV64IZBS-NEXT:    slli a0, a0, 36
 ; RV64IZBS-NEXT:    addi a0, a0, 11
 ; RV64IZBS-NEXT:    slli a0, a0, 24
@@ -525,23 +525,23 @@ define i64 @imm64_8() nounwind {
 define i64 @imm64_9() nounwind {
 ; RV32I-LABEL: imm64_9:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, -1
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a0, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm64_9:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: imm64_9:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, -1
+; RV64IZBA-NEXT:    li a0, -1
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: imm64_9:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a0, zero, -1
+; RV64IZBS-NEXT:    li a0, -1
 ; RV64IZBS-NEXT:    ret
   ret i64 -1
 }
@@ -553,7 +553,7 @@ define i64 @imm_left_shifted_lui_1() nounwind {
 ; RV32I-LABEL: imm_left_shifted_lui_1:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 524290
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_left_shifted_lui_1:
@@ -580,7 +580,7 @@ define i64 @imm_left_shifted_lui_2() nounwind {
 ; RV32I-LABEL: imm_left_shifted_lui_2:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 4
-; RV32I-NEXT:    addi a1, zero, 1
+; RV32I-NEXT:    li a1, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_left_shifted_lui_2:
@@ -608,7 +608,7 @@ define i64 @imm_left_shifted_lui_3() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a1, a0, 1
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_left_shifted_lui_3:
@@ -668,7 +668,7 @@ define i64 @imm_right_shifted_lui_2() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 1048575
 ; RV32I-NEXT:    addi a0, a0, 1
-; RV32I-NEXT:    addi a1, zero, 255
+; RV32I-NEXT:    li a1, 255
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_right_shifted_lui_2:
@@ -700,7 +700,7 @@ define i64 @imm_right_shifted_lui_2() nounwind {
 define i64 @imm_decoupled_lui_addi() nounwind {
 ; RV32I-LABEL: imm_decoupled_lui_addi:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, -3
+; RV32I-NEXT:    li a0, -3
 ; RV32I-NEXT:    lui a1, 1
 ; RV32I-NEXT:    ret
 ;
@@ -739,7 +739,7 @@ define i64 @imm_end_xori_1() nounwind {
 ;
 ; RV64I-LABEL: imm_end_xori_1:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    slli a0, a0, 36
 ; RV64I-NEXT:    addi a0, a0, 1
 ; RV64I-NEXT:    slli a0, a0, 25
@@ -748,7 +748,7 @@ define i64 @imm_end_xori_1() nounwind {
 ;
 ; RV64IZBA-LABEL: imm_end_xori_1:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, -1
+; RV64IZBA-NEXT:    li a0, -1
 ; RV64IZBA-NEXT:    slli a0, a0, 36
 ; RV64IZBA-NEXT:    addi a0, a0, 1
 ; RV64IZBA-NEXT:    slli a0, a0, 25
@@ -757,7 +757,7 @@ define i64 @imm_end_xori_1() nounwind {
 ;
 ; RV64IZBS-LABEL: imm_end_xori_1:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a0, zero, -1
+; RV64IZBS-NEXT:    li a0, -1
 ; RV64IZBS-NEXT:    slli a0, a0, 36
 ; RV64IZBS-NEXT:    addi a0, a0, 1
 ; RV64IZBS-NEXT:    slli a0, a0, 25
@@ -779,7 +779,7 @@ define i64 @imm_end_2addi_1() nounwind {
 ;
 ; RV64I-LABEL: imm_end_2addi_1:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, -2047
+; RV64I-NEXT:    li a0, -2047
 ; RV64I-NEXT:    slli a0, a0, 27
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    slli a0, a0, 12
@@ -788,7 +788,7 @@ define i64 @imm_end_2addi_1() nounwind {
 ;
 ; RV64IZBA-LABEL: imm_end_2addi_1:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, -2047
+; RV64IZBA-NEXT:    li a0, -2047
 ; RV64IZBA-NEXT:    slli a0, a0, 27
 ; RV64IZBA-NEXT:    addi a0, a0, -1
 ; RV64IZBA-NEXT:    slli a0, a0, 12
@@ -797,7 +797,7 @@ define i64 @imm_end_2addi_1() nounwind {
 ;
 ; RV64IZBS-LABEL: imm_end_2addi_1:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a0, zero, -2047
+; RV64IZBS-NEXT:    li a0, -2047
 ; RV64IZBS-NEXT:    slli a0, a0, 27
 ; RV64IZBS-NEXT:    addi a0, a0, -1
 ; RV64IZBS-NEXT:    slli a0, a0, 12
@@ -819,7 +819,7 @@ define i64 @imm_2reg_1() nounwind {
 ;
 ; RV64I-LABEL: imm_2reg_1:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    slli a0, a0, 35
 ; RV64I-NEXT:    addi a0, a0, 9
 ; RV64I-NEXT:    slli a0, a0, 13
@@ -830,7 +830,7 @@ define i64 @imm_2reg_1() nounwind {
 ;
 ; RV64IZBA-LABEL: imm_2reg_1:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, -1
+; RV64IZBA-NEXT:    li a0, -1
 ; RV64IZBA-NEXT:    slli a0, a0, 35
 ; RV64IZBA-NEXT:    addi a0, a0, 9
 ; RV64IZBA-NEXT:    slli a0, a0, 13
@@ -855,25 +855,25 @@ define i64 @imm_2reg_1() nounwind {
 define void @imm_store_i16_neg1(i16* %p) nounwind {
 ; RV32I-LABEL: imm_store_i16_neg1:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    sh a1, 0(a0)
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_store_i16_neg1:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    sh a1, 0(a0)
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: imm_store_i16_neg1:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a1, zero, -1
+; RV64IZBA-NEXT:    li a1, -1
 ; RV64IZBA-NEXT:    sh a1, 0(a0)
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: imm_store_i16_neg1:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a1, zero, -1
+; RV64IZBS-NEXT:    li a1, -1
 ; RV64IZBS-NEXT:    sh a1, 0(a0)
 ; RV64IZBS-NEXT:    ret
   store i16 -1, i16* %p
@@ -884,25 +884,25 @@ define void @imm_store_i16_neg1(i16* %p) nounwind {
 define void @imm_store_i32_neg1(i32* %p) nounwind {
 ; RV32I-LABEL: imm_store_i32_neg1:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    sw a1, 0(a0)
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_store_i32_neg1:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    sw a1, 0(a0)
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: imm_store_i32_neg1:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a1, zero, -1
+; RV64IZBA-NEXT:    li a1, -1
 ; RV64IZBA-NEXT:    sw a1, 0(a0)
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: imm_store_i32_neg1:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a1, zero, -1
+; RV64IZBS-NEXT:    li a1, -1
 ; RV64IZBS-NEXT:    sw a1, 0(a0)
 ; RV64IZBS-NEXT:    ret
   store i32 -1, i32* %p
@@ -914,7 +914,7 @@ define i64 @imm_5372288229() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 263018
 ; RV32I-NEXT:    addi a0, a0, -795
-; RV32I-NEXT:    addi a1, zero, 1
+; RV32I-NEXT:    li a1, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_5372288229:
@@ -946,7 +946,7 @@ define i64 @imm_neg_5372288229() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 785558
 ; RV32I-NEXT:    addi a0, a0, 795
-; RV32I-NEXT:    addi a1, zero, -2
+; RV32I-NEXT:    li a1, -2
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_neg_5372288229:
@@ -978,7 +978,7 @@ define i64 @imm_8953813715() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 88838
 ; RV32I-NEXT:    addi a0, a0, -1325
-; RV32I-NEXT:    addi a1, zero, 2
+; RV32I-NEXT:    li a1, 2
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_8953813715:
@@ -1010,7 +1010,7 @@ define i64 @imm_neg_8953813715() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 959738
 ; RV32I-NEXT:    addi a0, a0, 1325
-; RV32I-NEXT:    addi a1, zero, -3
+; RV32I-NEXT:    li a1, -3
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_neg_8953813715:
@@ -1042,7 +1042,7 @@ define i64 @imm_16116864687() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 789053
 ; RV32I-NEXT:    addi a0, a0, 1711
-; RV32I-NEXT:    addi a1, zero, 3
+; RV32I-NEXT:    li a1, 3
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_16116864687:
@@ -1075,7 +1075,7 @@ define i64 @imm_neg_16116864687() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 259523
 ; RV32I-NEXT:    addi a0, a0, -1711
-; RV32I-NEXT:    addi a1, zero, -4
+; RV32I-NEXT:    li a1, -4
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_neg_16116864687:
@@ -1108,7 +1108,7 @@ define i64 @imm_2344336315() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 572348
 ; RV32I-NEXT:    addi a0, a0, -1093
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_2344336315:
@@ -1181,7 +1181,7 @@ define i64 @imm_neg_9223372034778874949() {
 ;
 ; RV64I-LABEL: imm_neg_9223372034778874949:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    slli a0, a0, 37
 ; RV64I-NEXT:    addi a0, a0, 31
 ; RV64I-NEXT:    slli a0, a0, 12
@@ -1192,7 +1192,7 @@ define i64 @imm_neg_9223372034778874949() {
 ;
 ; RV64IZBA-LABEL: imm_neg_9223372034778874949:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, -1
+; RV64IZBA-NEXT:    li a0, -1
 ; RV64IZBA-NEXT:    slli a0, a0, 37
 ; RV64IZBA-NEXT:    addi a0, a0, 31
 ; RV64IZBA-NEXT:    slli a0, a0, 12
@@ -1255,7 +1255,7 @@ define i64 @imm_neg_2219066437() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 506812
 ; RV32I-NEXT:    addi a0, a0, -1093
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_neg_2219066437:
@@ -1397,7 +1397,7 @@ define i64 @imm_2863311530() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 699051
 ; RV32I-NEXT:    addi a0, a0, -1366
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_2863311530:
@@ -1429,7 +1429,7 @@ define i64 @imm_neg_2863311530() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 349525
 ; RV32I-NEXT:    addi a0, a0, 1366
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_neg_2863311530:
@@ -1461,26 +1461,26 @@ define i64 @imm_2147486378() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 524288
 ; RV32I-NEXT:    addi a0, a0, 1365
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_2147486378:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    slli a0, a0, 31
 ; RV64I-NEXT:    addi a0, a0, 1365
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: imm_2147486378:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, 1
+; RV64IZBA-NEXT:    li a0, 1
 ; RV64IZBA-NEXT:    slli a0, a0, 31
 ; RV64IZBA-NEXT:    addi a0, a0, 1365
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: imm_2147486378:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a0, zero, 1365
+; RV64IZBS-NEXT:    li a0, 1365
 ; RV64IZBS-NEXT:    bseti a0, a0, 31
 ; RV64IZBS-NEXT:    ret
   ret i64 2147485013
@@ -1491,26 +1491,26 @@ define i64 @imm_neg_2147485013() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 524288
 ; RV32I-NEXT:    addi a0, a0, -1365
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_neg_2147485013:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    slli a0, a0, 31
 ; RV64I-NEXT:    addi a0, a0, -1365
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZBA-LABEL: imm_neg_2147485013:
 ; RV64IZBA:       # %bb.0:
-; RV64IZBA-NEXT:    addi a0, zero, -1
+; RV64IZBA-NEXT:    li a0, -1
 ; RV64IZBA-NEXT:    slli a0, a0, 31
 ; RV64IZBA-NEXT:    addi a0, a0, -1365
 ; RV64IZBA-NEXT:    ret
 ;
 ; RV64IZBS-LABEL: imm_neg_2147485013:
 ; RV64IZBS:       # %bb.0:
-; RV64IZBS-NEXT:    addi a0, zero, -1365
+; RV64IZBS-NEXT:    li a0, -1365
 ; RV64IZBS-NEXT:    bclri a0, a0, 31
 ; RV64IZBS-NEXT:    ret
   ret i64 -2147485013
@@ -1554,7 +1554,7 @@ define i64 @imm_50394234880() {
 ; RV32I-LABEL: imm_50394234880:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 768944
-; RV32I-NEXT:    addi a1, zero, 11
+; RV32I-NEXT:    li a1, 11
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_50394234880:
@@ -1699,7 +1699,7 @@ define i64 @imm_7158272001() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 699049
 ; RV32I-NEXT:    addi a0, a0, 1
-; RV32I-NEXT:    addi a1, zero, 1
+; RV32I-NEXT:    li a1, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_7158272001:
@@ -1732,7 +1732,7 @@ define i64 @imm_12884889601() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 1048573
 ; RV32I-NEXT:    addi a0, a0, 1
-; RV32I-NEXT:    addi a1, zero, 2
+; RV32I-NEXT:    li a1, 2
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_12884889601:
@@ -1765,7 +1765,7 @@ define i64 @imm_neg_3435982847() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 209713
 ; RV32I-NEXT:    addi a0, a0, 1
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_neg_3435982847:
@@ -1797,7 +1797,7 @@ define i64 @imm_neg_5726842879() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 698997
 ; RV32I-NEXT:    addi a0, a0, 1
-; RV32I-NEXT:    addi a1, zero, -2
+; RV32I-NEXT:    li a1, -2
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_neg_5726842879:
@@ -1829,7 +1829,7 @@ define i64 @imm_neg_10307948543() {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a0, 629139
 ; RV32I-NEXT:    addi a0, a0, 1
-; RV32I-NEXT:    addi a1, zero, -3
+; RV32I-NEXT:    li a1, -3
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: imm_neg_10307948543:

diff  --git a/llvm/test/CodeGen/RISCV/indirectbr.ll b/llvm/test/CodeGen/RISCV/indirectbr.ll
index 6edd0dd003b9..14039b62a109 100644
--- a/llvm/test/CodeGen/RISCV/indirectbr.ll
+++ b/llvm/test/CodeGen/RISCV/indirectbr.ll
@@ -7,7 +7,7 @@ define i32 @indirectbr(i8* %target) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    jr a0
 ; RV32I-NEXT:  .LBB0_1: # %test_label
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
   indirectbr i8* %target, [label %test_label]
 test_label:
@@ -21,7 +21,7 @@ define i32 @indirectbr_with_offset(i8* %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    jr 1380(a0)
 ; RV32I-NEXT:  .LBB1_1: # %test_label
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
   %target = getelementptr inbounds i8, i8* %a, i32 1380
   indirectbr i8* %target, [label %test_label]

diff  --git a/llvm/test/CodeGen/RISCV/jumptable.ll b/llvm/test/CodeGen/RISCV/jumptable.ll
index f3c65462cd6b..10a7938c652a 100644
--- a/llvm/test/CodeGen/RISCV/jumptable.ll
+++ b/llvm/test/CodeGen/RISCV/jumptable.ll
@@ -11,31 +11,31 @@
 define void @below_threshold(i32 %in, i32* %out) nounwind {
 ; RV32I-SMALL-LABEL: below_threshold:
 ; RV32I-SMALL:       # %bb.0: # %entry
-; RV32I-SMALL-NEXT:    addi a2, zero, 2
+; RV32I-SMALL-NEXT:    li a2, 2
 ; RV32I-SMALL-NEXT:    blt a2, a0, .LBB0_4
 ; RV32I-SMALL-NEXT:  # %bb.1: # %entry
-; RV32I-SMALL-NEXT:    addi a2, zero, 1
+; RV32I-SMALL-NEXT:    li a2, 1
 ; RV32I-SMALL-NEXT:    beq a0, a2, .LBB0_7
 ; RV32I-SMALL-NEXT:  # %bb.2: # %entry
-; RV32I-SMALL-NEXT:    addi a2, zero, 2
+; RV32I-SMALL-NEXT:    li a2, 2
 ; RV32I-SMALL-NEXT:    bne a0, a2, .LBB0_10
 ; RV32I-SMALL-NEXT:  # %bb.3: # %bb2
-; RV32I-SMALL-NEXT:    addi a0, zero, 3
+; RV32I-SMALL-NEXT:    li a0, 3
 ; RV32I-SMALL-NEXT:    j .LBB0_9
 ; RV32I-SMALL-NEXT:  .LBB0_4: # %entry
-; RV32I-SMALL-NEXT:    addi a2, zero, 3
+; RV32I-SMALL-NEXT:    li a2, 3
 ; RV32I-SMALL-NEXT:    beq a0, a2, .LBB0_8
 ; RV32I-SMALL-NEXT:  # %bb.5: # %entry
-; RV32I-SMALL-NEXT:    addi a2, zero, 4
+; RV32I-SMALL-NEXT:    li a2, 4
 ; RV32I-SMALL-NEXT:    bne a0, a2, .LBB0_10
 ; RV32I-SMALL-NEXT:  # %bb.6: # %bb4
-; RV32I-SMALL-NEXT:    addi a0, zero, 1
+; RV32I-SMALL-NEXT:    li a0, 1
 ; RV32I-SMALL-NEXT:    j .LBB0_9
 ; RV32I-SMALL-NEXT:  .LBB0_7: # %bb1
-; RV32I-SMALL-NEXT:    addi a0, zero, 4
+; RV32I-SMALL-NEXT:    li a0, 4
 ; RV32I-SMALL-NEXT:    j .LBB0_9
 ; RV32I-SMALL-NEXT:  .LBB0_8: # %bb3
-; RV32I-SMALL-NEXT:    addi a0, zero, 2
+; RV32I-SMALL-NEXT:    li a0, 2
 ; RV32I-SMALL-NEXT:  .LBB0_9: # %exit
 ; RV32I-SMALL-NEXT:    sw a0, 0(a1)
 ; RV32I-SMALL-NEXT:  .LBB0_10: # %exit
@@ -43,31 +43,31 @@ define void @below_threshold(i32 %in, i32* %out) nounwind {
 ;
 ; RV32I-MEDIUM-LABEL: below_threshold:
 ; RV32I-MEDIUM:       # %bb.0: # %entry
-; RV32I-MEDIUM-NEXT:    addi a2, zero, 2
+; RV32I-MEDIUM-NEXT:    li a2, 2
 ; RV32I-MEDIUM-NEXT:    blt a2, a0, .LBB0_4
 ; RV32I-MEDIUM-NEXT:  # %bb.1: # %entry
-; RV32I-MEDIUM-NEXT:    addi a2, zero, 1
+; RV32I-MEDIUM-NEXT:    li a2, 1
 ; RV32I-MEDIUM-NEXT:    beq a0, a2, .LBB0_7
 ; RV32I-MEDIUM-NEXT:  # %bb.2: # %entry
-; RV32I-MEDIUM-NEXT:    addi a2, zero, 2
+; RV32I-MEDIUM-NEXT:    li a2, 2
 ; RV32I-MEDIUM-NEXT:    bne a0, a2, .LBB0_10
 ; RV32I-MEDIUM-NEXT:  # %bb.3: # %bb2
-; RV32I-MEDIUM-NEXT:    addi a0, zero, 3
+; RV32I-MEDIUM-NEXT:    li a0, 3
 ; RV32I-MEDIUM-NEXT:    j .LBB0_9
 ; RV32I-MEDIUM-NEXT:  .LBB0_4: # %entry
-; RV32I-MEDIUM-NEXT:    addi a2, zero, 3
+; RV32I-MEDIUM-NEXT:    li a2, 3
 ; RV32I-MEDIUM-NEXT:    beq a0, a2, .LBB0_8
 ; RV32I-MEDIUM-NEXT:  # %bb.5: # %entry
-; RV32I-MEDIUM-NEXT:    addi a2, zero, 4
+; RV32I-MEDIUM-NEXT:    li a2, 4
 ; RV32I-MEDIUM-NEXT:    bne a0, a2, .LBB0_10
 ; RV32I-MEDIUM-NEXT:  # %bb.6: # %bb4
-; RV32I-MEDIUM-NEXT:    addi a0, zero, 1
+; RV32I-MEDIUM-NEXT:    li a0, 1
 ; RV32I-MEDIUM-NEXT:    j .LBB0_9
 ; RV32I-MEDIUM-NEXT:  .LBB0_7: # %bb1
-; RV32I-MEDIUM-NEXT:    addi a0, zero, 4
+; RV32I-MEDIUM-NEXT:    li a0, 4
 ; RV32I-MEDIUM-NEXT:    j .LBB0_9
 ; RV32I-MEDIUM-NEXT:  .LBB0_8: # %bb3
-; RV32I-MEDIUM-NEXT:    addi a0, zero, 2
+; RV32I-MEDIUM-NEXT:    li a0, 2
 ; RV32I-MEDIUM-NEXT:  .LBB0_9: # %exit
 ; RV32I-MEDIUM-NEXT:    sw a0, 0(a1)
 ; RV32I-MEDIUM-NEXT:  .LBB0_10: # %exit
@@ -76,31 +76,31 @@ define void @below_threshold(i32 %in, i32* %out) nounwind {
 ; RV64I-SMALL-LABEL: below_threshold:
 ; RV64I-SMALL:       # %bb.0: # %entry
 ; RV64I-SMALL-NEXT:    sext.w a0, a0
-; RV64I-SMALL-NEXT:    addi a2, zero, 2
+; RV64I-SMALL-NEXT:    li a2, 2
 ; RV64I-SMALL-NEXT:    blt a2, a0, .LBB0_4
 ; RV64I-SMALL-NEXT:  # %bb.1: # %entry
-; RV64I-SMALL-NEXT:    addi a2, zero, 1
+; RV64I-SMALL-NEXT:    li a2, 1
 ; RV64I-SMALL-NEXT:    beq a0, a2, .LBB0_7
 ; RV64I-SMALL-NEXT:  # %bb.2: # %entry
-; RV64I-SMALL-NEXT:    addi a2, zero, 2
+; RV64I-SMALL-NEXT:    li a2, 2
 ; RV64I-SMALL-NEXT:    bne a0, a2, .LBB0_10
 ; RV64I-SMALL-NEXT:  # %bb.3: # %bb2
-; RV64I-SMALL-NEXT:    addi a0, zero, 3
+; RV64I-SMALL-NEXT:    li a0, 3
 ; RV64I-SMALL-NEXT:    j .LBB0_9
 ; RV64I-SMALL-NEXT:  .LBB0_4: # %entry
-; RV64I-SMALL-NEXT:    addi a2, zero, 3
+; RV64I-SMALL-NEXT:    li a2, 3
 ; RV64I-SMALL-NEXT:    beq a0, a2, .LBB0_8
 ; RV64I-SMALL-NEXT:  # %bb.5: # %entry
-; RV64I-SMALL-NEXT:    addi a2, zero, 4
+; RV64I-SMALL-NEXT:    li a2, 4
 ; RV64I-SMALL-NEXT:    bne a0, a2, .LBB0_10
 ; RV64I-SMALL-NEXT:  # %bb.6: # %bb4
-; RV64I-SMALL-NEXT:    addi a0, zero, 1
+; RV64I-SMALL-NEXT:    li a0, 1
 ; RV64I-SMALL-NEXT:    j .LBB0_9
 ; RV64I-SMALL-NEXT:  .LBB0_7: # %bb1
-; RV64I-SMALL-NEXT:    addi a0, zero, 4
+; RV64I-SMALL-NEXT:    li a0, 4
 ; RV64I-SMALL-NEXT:    j .LBB0_9
 ; RV64I-SMALL-NEXT:  .LBB0_8: # %bb3
-; RV64I-SMALL-NEXT:    addi a0, zero, 2
+; RV64I-SMALL-NEXT:    li a0, 2
 ; RV64I-SMALL-NEXT:  .LBB0_9: # %exit
 ; RV64I-SMALL-NEXT:    sw a0, 0(a1)
 ; RV64I-SMALL-NEXT:  .LBB0_10: # %exit
@@ -109,31 +109,31 @@ define void @below_threshold(i32 %in, i32* %out) nounwind {
 ; RV64I-MEDIUM-LABEL: below_threshold:
 ; RV64I-MEDIUM:       # %bb.0: # %entry
 ; RV64I-MEDIUM-NEXT:    sext.w a0, a0
-; RV64I-MEDIUM-NEXT:    addi a2, zero, 2
+; RV64I-MEDIUM-NEXT:    li a2, 2
 ; RV64I-MEDIUM-NEXT:    blt a2, a0, .LBB0_4
 ; RV64I-MEDIUM-NEXT:  # %bb.1: # %entry
-; RV64I-MEDIUM-NEXT:    addi a2, zero, 1
+; RV64I-MEDIUM-NEXT:    li a2, 1
 ; RV64I-MEDIUM-NEXT:    beq a0, a2, .LBB0_7
 ; RV64I-MEDIUM-NEXT:  # %bb.2: # %entry
-; RV64I-MEDIUM-NEXT:    addi a2, zero, 2
+; RV64I-MEDIUM-NEXT:    li a2, 2
 ; RV64I-MEDIUM-NEXT:    bne a0, a2, .LBB0_10
 ; RV64I-MEDIUM-NEXT:  # %bb.3: # %bb2
-; RV64I-MEDIUM-NEXT:    addi a0, zero, 3
+; RV64I-MEDIUM-NEXT:    li a0, 3
 ; RV64I-MEDIUM-NEXT:    j .LBB0_9
 ; RV64I-MEDIUM-NEXT:  .LBB0_4: # %entry
-; RV64I-MEDIUM-NEXT:    addi a2, zero, 3
+; RV64I-MEDIUM-NEXT:    li a2, 3
 ; RV64I-MEDIUM-NEXT:    beq a0, a2, .LBB0_8
 ; RV64I-MEDIUM-NEXT:  # %bb.5: # %entry
-; RV64I-MEDIUM-NEXT:    addi a2, zero, 4
+; RV64I-MEDIUM-NEXT:    li a2, 4
 ; RV64I-MEDIUM-NEXT:    bne a0, a2, .LBB0_10
 ; RV64I-MEDIUM-NEXT:  # %bb.6: # %bb4
-; RV64I-MEDIUM-NEXT:    addi a0, zero, 1
+; RV64I-MEDIUM-NEXT:    li a0, 1
 ; RV64I-MEDIUM-NEXT:    j .LBB0_9
 ; RV64I-MEDIUM-NEXT:  .LBB0_7: # %bb1
-; RV64I-MEDIUM-NEXT:    addi a0, zero, 4
+; RV64I-MEDIUM-NEXT:    li a0, 4
 ; RV64I-MEDIUM-NEXT:    j .LBB0_9
 ; RV64I-MEDIUM-NEXT:  .LBB0_8: # %bb3
-; RV64I-MEDIUM-NEXT:    addi a0, zero, 2
+; RV64I-MEDIUM-NEXT:    li a0, 2
 ; RV64I-MEDIUM-NEXT:  .LBB0_9: # %exit
 ; RV64I-MEDIUM-NEXT:    sw a0, 0(a1)
 ; RV64I-MEDIUM-NEXT:  .LBB0_10: # %exit
@@ -165,7 +165,7 @@ define void @above_threshold(i32 %in, i32* %out) nounwind {
 ; RV32I-SMALL-LABEL: above_threshold:
 ; RV32I-SMALL:       # %bb.0: # %entry
 ; RV32I-SMALL-NEXT:    addi a0, a0, -1
-; RV32I-SMALL-NEXT:    addi a2, zero, 5
+; RV32I-SMALL-NEXT:    li a2, 5
 ; RV32I-SMALL-NEXT:    bltu a2, a0, .LBB1_9
 ; RV32I-SMALL-NEXT:  # %bb.1: # %entry
 ; RV32I-SMALL-NEXT:    slli a0, a0, 2
@@ -175,22 +175,22 @@ define void @above_threshold(i32 %in, i32* %out) nounwind {
 ; RV32I-SMALL-NEXT:    lw a0, 0(a0)
 ; RV32I-SMALL-NEXT:    jr a0
 ; RV32I-SMALL-NEXT:  .LBB1_2: # %bb1
-; RV32I-SMALL-NEXT:    addi a0, zero, 4
+; RV32I-SMALL-NEXT:    li a0, 4
 ; RV32I-SMALL-NEXT:    j .LBB1_8
 ; RV32I-SMALL-NEXT:  .LBB1_3: # %bb2
-; RV32I-SMALL-NEXT:    addi a0, zero, 3
+; RV32I-SMALL-NEXT:    li a0, 3
 ; RV32I-SMALL-NEXT:    j .LBB1_8
 ; RV32I-SMALL-NEXT:  .LBB1_4: # %bb3
-; RV32I-SMALL-NEXT:    addi a0, zero, 2
+; RV32I-SMALL-NEXT:    li a0, 2
 ; RV32I-SMALL-NEXT:    j .LBB1_8
 ; RV32I-SMALL-NEXT:  .LBB1_5: # %bb4
-; RV32I-SMALL-NEXT:    addi a0, zero, 1
+; RV32I-SMALL-NEXT:    li a0, 1
 ; RV32I-SMALL-NEXT:    j .LBB1_8
 ; RV32I-SMALL-NEXT:  .LBB1_6: # %bb5
-; RV32I-SMALL-NEXT:    addi a0, zero, 100
+; RV32I-SMALL-NEXT:    li a0, 100
 ; RV32I-SMALL-NEXT:    j .LBB1_8
 ; RV32I-SMALL-NEXT:  .LBB1_7: # %bb6
-; RV32I-SMALL-NEXT:    addi a0, zero, 200
+; RV32I-SMALL-NEXT:    li a0, 200
 ; RV32I-SMALL-NEXT:  .LBB1_8: # %exit
 ; RV32I-SMALL-NEXT:    sw a0, 0(a1)
 ; RV32I-SMALL-NEXT:  .LBB1_9: # %exit
@@ -199,7 +199,7 @@ define void @above_threshold(i32 %in, i32* %out) nounwind {
 ; RV32I-MEDIUM-LABEL: above_threshold:
 ; RV32I-MEDIUM:       # %bb.0: # %entry
 ; RV32I-MEDIUM-NEXT:    addi a0, a0, -1
-; RV32I-MEDIUM-NEXT:    addi a2, zero, 5
+; RV32I-MEDIUM-NEXT:    li a2, 5
 ; RV32I-MEDIUM-NEXT:    bltu a2, a0, .LBB1_9
 ; RV32I-MEDIUM-NEXT:  # %bb.1: # %entry
 ; RV32I-MEDIUM-NEXT:    slli a0, a0, 2
@@ -211,22 +211,22 @@ define void @above_threshold(i32 %in, i32* %out) nounwind {
 ; RV32I-MEDIUM-NEXT:    lw a0, 0(a0)
 ; RV32I-MEDIUM-NEXT:    jr a0
 ; RV32I-MEDIUM-NEXT:  .LBB1_2: # %bb1
-; RV32I-MEDIUM-NEXT:    addi a0, zero, 4
+; RV32I-MEDIUM-NEXT:    li a0, 4
 ; RV32I-MEDIUM-NEXT:    j .LBB1_8
 ; RV32I-MEDIUM-NEXT:  .LBB1_3: # %bb2
-; RV32I-MEDIUM-NEXT:    addi a0, zero, 3
+; RV32I-MEDIUM-NEXT:    li a0, 3
 ; RV32I-MEDIUM-NEXT:    j .LBB1_8
 ; RV32I-MEDIUM-NEXT:  .LBB1_4: # %bb3
-; RV32I-MEDIUM-NEXT:    addi a0, zero, 2
+; RV32I-MEDIUM-NEXT:    li a0, 2
 ; RV32I-MEDIUM-NEXT:    j .LBB1_8
 ; RV32I-MEDIUM-NEXT:  .LBB1_5: # %bb4
-; RV32I-MEDIUM-NEXT:    addi a0, zero, 1
+; RV32I-MEDIUM-NEXT:    li a0, 1
 ; RV32I-MEDIUM-NEXT:    j .LBB1_8
 ; RV32I-MEDIUM-NEXT:  .LBB1_6: # %bb5
-; RV32I-MEDIUM-NEXT:    addi a0, zero, 100
+; RV32I-MEDIUM-NEXT:    li a0, 100
 ; RV32I-MEDIUM-NEXT:    j .LBB1_8
 ; RV32I-MEDIUM-NEXT:  .LBB1_7: # %bb6
-; RV32I-MEDIUM-NEXT:    addi a0, zero, 200
+; RV32I-MEDIUM-NEXT:    li a0, 200
 ; RV32I-MEDIUM-NEXT:  .LBB1_8: # %exit
 ; RV32I-MEDIUM-NEXT:    sw a0, 0(a1)
 ; RV32I-MEDIUM-NEXT:  .LBB1_9: # %exit
@@ -236,7 +236,7 @@ define void @above_threshold(i32 %in, i32* %out) nounwind {
 ; RV64I-SMALL:       # %bb.0: # %entry
 ; RV64I-SMALL-NEXT:    sext.w a0, a0
 ; RV64I-SMALL-NEXT:    addi a0, a0, -1
-; RV64I-SMALL-NEXT:    addi a2, zero, 5
+; RV64I-SMALL-NEXT:    li a2, 5
 ; RV64I-SMALL-NEXT:    bltu a2, a0, .LBB1_9
 ; RV64I-SMALL-NEXT:  # %bb.1: # %entry
 ; RV64I-SMALL-NEXT:    slli a0, a0, 3
@@ -246,22 +246,22 @@ define void @above_threshold(i32 %in, i32* %out) nounwind {
 ; RV64I-SMALL-NEXT:    ld a0, 0(a0)
 ; RV64I-SMALL-NEXT:    jr a0
 ; RV64I-SMALL-NEXT:  .LBB1_2: # %bb1
-; RV64I-SMALL-NEXT:    addi a0, zero, 4
+; RV64I-SMALL-NEXT:    li a0, 4
 ; RV64I-SMALL-NEXT:    j .LBB1_8
 ; RV64I-SMALL-NEXT:  .LBB1_3: # %bb2
-; RV64I-SMALL-NEXT:    addi a0, zero, 3
+; RV64I-SMALL-NEXT:    li a0, 3
 ; RV64I-SMALL-NEXT:    j .LBB1_8
 ; RV64I-SMALL-NEXT:  .LBB1_4: # %bb3
-; RV64I-SMALL-NEXT:    addi a0, zero, 2
+; RV64I-SMALL-NEXT:    li a0, 2
 ; RV64I-SMALL-NEXT:    j .LBB1_8
 ; RV64I-SMALL-NEXT:  .LBB1_5: # %bb4
-; RV64I-SMALL-NEXT:    addi a0, zero, 1
+; RV64I-SMALL-NEXT:    li a0, 1
 ; RV64I-SMALL-NEXT:    j .LBB1_8
 ; RV64I-SMALL-NEXT:  .LBB1_6: # %bb5
-; RV64I-SMALL-NEXT:    addi a0, zero, 100
+; RV64I-SMALL-NEXT:    li a0, 100
 ; RV64I-SMALL-NEXT:    j .LBB1_8
 ; RV64I-SMALL-NEXT:  .LBB1_7: # %bb6
-; RV64I-SMALL-NEXT:    addi a0, zero, 200
+; RV64I-SMALL-NEXT:    li a0, 200
 ; RV64I-SMALL-NEXT:  .LBB1_8: # %exit
 ; RV64I-SMALL-NEXT:    sw a0, 0(a1)
 ; RV64I-SMALL-NEXT:  .LBB1_9: # %exit
@@ -271,7 +271,7 @@ define void @above_threshold(i32 %in, i32* %out) nounwind {
 ; RV64I-MEDIUM:       # %bb.0: # %entry
 ; RV64I-MEDIUM-NEXT:    sext.w a0, a0
 ; RV64I-MEDIUM-NEXT:    addi a0, a0, -1
-; RV64I-MEDIUM-NEXT:    addi a2, zero, 5
+; RV64I-MEDIUM-NEXT:    li a2, 5
 ; RV64I-MEDIUM-NEXT:    bltu a2, a0, .LBB1_9
 ; RV64I-MEDIUM-NEXT:  # %bb.1: # %entry
 ; RV64I-MEDIUM-NEXT:    slli a0, a0, 3
@@ -283,22 +283,22 @@ define void @above_threshold(i32 %in, i32* %out) nounwind {
 ; RV64I-MEDIUM-NEXT:    ld a0, 0(a0)
 ; RV64I-MEDIUM-NEXT:    jr a0
 ; RV64I-MEDIUM-NEXT:  .LBB1_2: # %bb1
-; RV64I-MEDIUM-NEXT:    addi a0, zero, 4
+; RV64I-MEDIUM-NEXT:    li a0, 4
 ; RV64I-MEDIUM-NEXT:    j .LBB1_8
 ; RV64I-MEDIUM-NEXT:  .LBB1_3: # %bb2
-; RV64I-MEDIUM-NEXT:    addi a0, zero, 3
+; RV64I-MEDIUM-NEXT:    li a0, 3
 ; RV64I-MEDIUM-NEXT:    j .LBB1_8
 ; RV64I-MEDIUM-NEXT:  .LBB1_4: # %bb3
-; RV64I-MEDIUM-NEXT:    addi a0, zero, 2
+; RV64I-MEDIUM-NEXT:    li a0, 2
 ; RV64I-MEDIUM-NEXT:    j .LBB1_8
 ; RV64I-MEDIUM-NEXT:  .LBB1_5: # %bb4
-; RV64I-MEDIUM-NEXT:    addi a0, zero, 1
+; RV64I-MEDIUM-NEXT:    li a0, 1
 ; RV64I-MEDIUM-NEXT:    j .LBB1_8
 ; RV64I-MEDIUM-NEXT:  .LBB1_6: # %bb5
-; RV64I-MEDIUM-NEXT:    addi a0, zero, 100
+; RV64I-MEDIUM-NEXT:    li a0, 100
 ; RV64I-MEDIUM-NEXT:    j .LBB1_8
 ; RV64I-MEDIUM-NEXT:  .LBB1_7: # %bb6
-; RV64I-MEDIUM-NEXT:    addi a0, zero, 200
+; RV64I-MEDIUM-NEXT:    li a0, 200
 ; RV64I-MEDIUM-NEXT:  .LBB1_8: # %exit
 ; RV64I-MEDIUM-NEXT:    sw a0, 0(a1)
 ; RV64I-MEDIUM-NEXT:  .LBB1_9: # %exit

diff  --git a/llvm/test/CodeGen/RISCV/legalize-fneg.ll b/llvm/test/CodeGen/RISCV/legalize-fneg.ll
index 80f0a988a6a6..b35884f8f87c 100644
--- a/llvm/test/CodeGen/RISCV/legalize-fneg.ll
+++ b/llvm/test/CodeGen/RISCV/legalize-fneg.ll
@@ -16,7 +16,7 @@ define void @test1(float* %a, float* %b) nounwind {
 ; RV64-LABEL: test1:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    lw a1, 0(a1)
-; RV64-NEXT:    addi a2, zero, 1
+; RV64-NEXT:    li a2, 1
 ; RV64-NEXT:    slli a2, a2, 31
 ; RV64-NEXT:    xor a1, a1, a2
 ; RV64-NEXT:    sw a1, 0(a0)
@@ -42,7 +42,7 @@ define void @test2(double* %a, double* %b) nounwind {
 ; RV64-LABEL: test2:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    ld a1, 0(a1)
-; RV64-NEXT:    addi a2, zero, -1
+; RV64-NEXT:    li a2, -1
 ; RV64-NEXT:    slli a2, a2, 63
 ; RV64-NEXT:    xor a1, a1, a2
 ; RV64-NEXT:    sd a1, 0(a0)
@@ -73,7 +73,7 @@ define void @test3(fp128* %a, fp128* %b) nounwind {
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    ld a2, 8(a1)
 ; RV64-NEXT:    ld a1, 0(a1)
-; RV64-NEXT:    addi a3, zero, -1
+; RV64-NEXT:    li a3, -1
 ; RV64-NEXT:    slli a3, a3, 63
 ; RV64-NEXT:    xor a2, a2, a3
 ; RV64-NEXT:    sd a1, 0(a0)

diff  --git a/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll b/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll
index dbe9eb9f050a..000130264cb8 100644
--- a/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll
+++ b/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll
@@ -11,7 +11,7 @@
 define i32 @main() nounwind {
 ; RV32I-LABEL: main:
 ; RV32I:       # %bb.0: # %entry
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    lui a1, %hi(b)
 ; RV32I-NEXT:    addi a1, a1, %lo(b)
 ; RV32I-NEXT:    lui a2, %hi(a)
@@ -27,7 +27,7 @@ define i32 @main() nounwind {
 ; RV32I-NEXT:    addi a2, a2, 4
 ; RV32I-NEXT:    bne a0, a3, .LBB0_1
 ; RV32I-NEXT:  # %bb.2: # %for.end
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 entry:
   br label %for.body

diff  --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index 7a6cf4a78ede..43914996a43f 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -181,7 +181,7 @@ define i64 @mul64_constant(i64 %a) nounwind {
 ;
 ; RV32IM-LABEL: mul64_constant:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    addi a2, zero, 5
+; RV32IM-NEXT:    li a2, 5
 ; RV32IM-NEXT:    mulhu a2, a0, a2
 ; RV32IM-NEXT:    slli a3, a1, 2
 ; RV32IM-NEXT:    add a1, a3, a1
@@ -267,7 +267,7 @@ define i32 @mulhs_positive_constant(i32 %a) nounwind {
 ;
 ; RV32IM-LABEL: mulhs_positive_constant:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    addi a1, zero, 5
+; RV32IM-NEXT:    li a1, 5
 ; RV32IM-NEXT:    mulh a0, a0, a1
 ; RV32IM-NEXT:    ret
 ;
@@ -312,7 +312,7 @@ define i32 @mulhs_negative_constant(i32 %a) nounwind {
 ;
 ; RV32IM-LABEL: mulhs_negative_constant:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    addi a1, zero, -5
+; RV32IM-NEXT:    li a1, -5
 ; RV32IM-NEXT:    mulh a0, a0, a1
 ; RV32IM-NEXT:    ret
 ;
@@ -346,8 +346,8 @@ define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, a1
-; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __muldi3 at plt
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -389,7 +389,7 @@ define i32 @mulhsu(i32 %a, i32 %b) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a2, a1
 ; RV32I-NEXT:    srai a3, a1, 31
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __muldi3 at plt
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -442,7 +442,7 @@ define i32 @mulhu_constant(i32 %a) nounwind {
 ;
 ; RV32IM-LABEL: mulhu_constant:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    addi a1, zero, 5
+; RV32IM-NEXT:    li a1, 5
 ; RV32IM-NEXT:    mulhu a0, a0, a1
 ; RV32IM-NEXT:    ret
 ;
@@ -542,7 +542,7 @@ define i64 @muli64_p65(i64 %a) nounwind {
 ;
 ; RV32IM-LABEL: muli64_p65:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    addi a2, zero, 65
+; RV32IM-NEXT:    li a2, 65
 ; RV32IM-NEXT:    mulhu a2, a0, a2
 ; RV32IM-NEXT:    slli a3, a1, 6
 ; RV32IM-NEXT:    add a1, a3, a1
@@ -581,7 +581,7 @@ define i64 @muli64_p63(i64 %a) nounwind {
 ;
 ; RV32IM-LABEL: muli64_p63:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    addi a2, zero, 63
+; RV32IM-NEXT:    li a2, 63
 ; RV32IM-NEXT:    mulhu a2, a0, a2
 ; RV32IM-NEXT:    slli a3, a1, 6
 ; RV32IM-NEXT:    sub a1, a3, a1
@@ -682,7 +682,7 @@ define i64 @muli64_m63(i64 %a) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    slli a2, a1, 6
 ; RV32IM-NEXT:    sub a1, a1, a2
-; RV32IM-NEXT:    addi a2, zero, -63
+; RV32IM-NEXT:    li a2, -63
 ; RV32IM-NEXT:    mulhu a2, a0, a2
 ; RV32IM-NEXT:    sub a2, a2, a0
 ; RV32IM-NEXT:    add a1, a2, a1
@@ -726,7 +726,7 @@ define i64 @muli64_m65(i64 %a) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    slli a2, a1, 6
 ; RV32IM-NEXT:    add a1, a2, a1
-; RV32IM-NEXT:    addi a2, zero, -65
+; RV32IM-NEXT:    li a2, -65
 ; RV32IM-NEXT:    mulhu a2, a0, a2
 ; RV32IM-NEXT:    sub a2, a2, a0
 ; RV32IM-NEXT:    sub a1, a2, a1
@@ -757,7 +757,7 @@ define i32 @muli32_p384(i32 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 384
+; RV32I-NEXT:    li a1, 384
 ; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -765,7 +765,7 @@ define i32 @muli32_p384(i32 %a) nounwind {
 ;
 ; RV32IM-LABEL: muli32_p384:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    addi a1, zero, 384
+; RV32IM-NEXT:    li a1, 384
 ; RV32IM-NEXT:    mul a0, a0, a1
 ; RV32IM-NEXT:    ret
 ;
@@ -773,7 +773,7 @@ define i32 @muli32_p384(i32 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, zero, 384
+; RV64I-NEXT:    li a1, 384
 ; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -781,7 +781,7 @@ define i32 @muli32_p384(i32 %a) nounwind {
 ;
 ; RV64IM-LABEL: muli32_p384:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    addi a1, zero, 384
+; RV64IM-NEXT:    li a1, 384
 ; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = mul i32 %a, 384
@@ -1055,7 +1055,7 @@ define i64 @muli64_m4352(i64 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    lui a2, 1048575
 ; RV32I-NEXT:    addi a2, a2, -256
-; RV32I-NEXT:    addi a3, zero, -1
+; RV32I-NEXT:    li a3, -1
 ; RV32I-NEXT:    call __muldi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1208,7 +1208,7 @@ define i128 @muli128_m3840(i128 %a) nounwind {
 ; RV32IM-NEXT:    sub t0, a2, a4
 ; RV32IM-NEXT:    neg t4, a4
 ; RV32IM-NEXT:    sltu t1, t0, t4
-; RV32IM-NEXT:    addi t2, zero, -1
+; RV32IM-NEXT:    li t2, -1
 ; RV32IM-NEXT:    mulhu t3, a4, t2
 ; RV32IM-NEXT:    add a2, t3, t1
 ; RV32IM-NEXT:    add t1, t5, a2
@@ -1323,7 +1323,7 @@ define i128 @muli128_m63(i128 %a) nounwind {
 ; RV32IM-NEXT:    lw a3, 0(a1)
 ; RV32IM-NEXT:    lw a4, 4(a1)
 ; RV32IM-NEXT:    lw t5, 8(a1)
-; RV32IM-NEXT:    addi a6, zero, -63
+; RV32IM-NEXT:    li a6, -63
 ; RV32IM-NEXT:    mulhu a5, a3, a6
 ; RV32IM-NEXT:    slli a2, a4, 6
 ; RV32IM-NEXT:    sub a2, a2, a4
@@ -1335,7 +1335,7 @@ define i128 @muli128_m63(i128 %a) nounwind {
 ; RV32IM-NEXT:    sub t0, a5, a3
 ; RV32IM-NEXT:    neg t1, a3
 ; RV32IM-NEXT:    sltu a5, t0, t1
-; RV32IM-NEXT:    addi t2, zero, -1
+; RV32IM-NEXT:    li t2, -1
 ; RV32IM-NEXT:    mulhu t3, a3, t2
 ; RV32IM-NEXT:    add a5, t3, a5
 ; RV32IM-NEXT:    add a5, t4, a5
@@ -1391,7 +1391,7 @@ define i128 @muli128_m63(i128 %a) nounwind {
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    slli a2, a1, 6
 ; RV64IM-NEXT:    sub a1, a1, a2
-; RV64IM-NEXT:    addi a2, zero, -63
+; RV64IM-NEXT:    li a2, -63
 ; RV64IM-NEXT:    mulhu a2, a0, a2
 ; RV64IM-NEXT:    sub a2, a2, a0
 ; RV64IM-NEXT:    add a1, a2, a1
@@ -1422,39 +1422,39 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    mv s2, a1
 ; RV32I-NEXT:    mv s3, a0
 ; RV32I-NEXT:    srai s4, a3, 31
-; RV32I-NEXT:    mv a1, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __muldi3 at plt
 ; RV32I-NEXT:    mv s1, a1
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    mv a2, s5
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __muldi3 at plt
 ; RV32I-NEXT:    add s1, a0, s1
 ; RV32I-NEXT:    sltu a0, s1, a0
 ; RV32I-NEXT:    add s7, a1, a0
 ; RV32I-NEXT:    mv a0, s3
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __muldi3 at plt
 ; RV32I-NEXT:    add a2, a0, s1
 ; RV32I-NEXT:    sltu a0, a2, a0
 ; RV32I-NEXT:    add a0, a1, a0
 ; RV32I-NEXT:    add s8, s7, a0
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    mv a2, s0
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __muldi3 at plt
 ; RV32I-NEXT:    mv s9, a0
 ; RV32I-NEXT:    mv s6, a1
 ; RV32I-NEXT:    add s1, a0, s8
 ; RV32I-NEXT:    mv a0, s5
 ; RV32I-NEXT:    mv a1, s0
-; RV32I-NEXT:    mv a2, zero
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 0
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __muldi3 at plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    mv s5, a1
@@ -1530,7 +1530,7 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a2, a1
 ; RV64I-NEXT:    srai a3, a1, 63
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __multi3 at plt
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/pr51206.ll b/llvm/test/CodeGen/RISCV/pr51206.ll
index fc9ed6b62608..019853b5e8b0 100644
--- a/llvm/test/CodeGen/RISCV/pr51206.ll
+++ b/llvm/test/CodeGen/RISCV/pr51206.ll
@@ -29,13 +29,13 @@ define signext i32 @wobble() nounwind {
 ; CHECK-NEXT:    mul a1, a1, a2
 ; CHECK-NEXT:    srli a1, a1, 18
 ; CHECK-NEXT:    lui a2, %hi(global.3)
-; CHECK-NEXT:    addi a3, zero, 5
+; CHECK-NEXT:    li a3, 5
 ; CHECK-NEXT:    sw a1, %lo(global.3)(a2)
 ; CHECK-NEXT:    bltu a0, a3, .LBB0_2
 ; CHECK-NEXT:  # %bb.1: # %bb10
 ; CHECK-NEXT:    call quux at plt
 ; CHECK-NEXT:  .LBB0_2: # %bb12
-; CHECK-NEXT:    mv a0, zero
+; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll
index cb7098cea611..09cb681b1f07 100644
--- a/llvm/test/CodeGen/RISCV/rem.ll
+++ b/llvm/test/CodeGen/RISCV/rem.ll
@@ -50,7 +50,7 @@ define i32 @urem_constant_lhs(i32 %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    addi a0, zero, 10
+; RV32I-NEXT:    li a0, 10
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -58,7 +58,7 @@ define i32 @urem_constant_lhs(i32 %a) nounwind {
 ;
 ; RV32IM-LABEL: urem_constant_lhs:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    addi a1, zero, 10
+; RV32IM-NEXT:    li a1, 10
 ; RV32IM-NEXT:    remu a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
@@ -68,7 +68,7 @@ define i32 @urem_constant_lhs(i32 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a1, a0, 32
-; RV64I-NEXT:    addi a0, zero, 10
+; RV64I-NEXT:    li a0, 10
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -76,7 +76,7 @@ define i32 @urem_constant_lhs(i32 %a) nounwind {
 ;
 ; RV64IM-LABEL: urem_constant_lhs:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    addi a1, zero, 10
+; RV64IM-NEXT:    li a1, 10
 ; RV64IM-NEXT:    remuw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = urem i32 10, %a
@@ -207,7 +207,7 @@ define i32 @srem_constant_lhs(i32 %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    addi a0, zero, -10
+; RV32I-NEXT:    li a0, -10
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -215,7 +215,7 @@ define i32 @srem_constant_lhs(i32 %a) nounwind {
 ;
 ; RV32IM-LABEL: srem_constant_lhs:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    addi a1, zero, -10
+; RV32IM-NEXT:    li a1, -10
 ; RV32IM-NEXT:    rem a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
@@ -224,7 +224,7 @@ define i32 @srem_constant_lhs(i32 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a1, a0
-; RV64I-NEXT:    addi a0, zero, -10
+; RV64I-NEXT:    li a0, -10
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -232,7 +232,7 @@ define i32 @srem_constant_lhs(i32 %a) nounwind {
 ;
 ; RV64IM-LABEL: srem_constant_lhs:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    addi a1, zero, -10
+; RV64IM-NEXT:    li a1, -10
 ; RV64IM-NEXT:    remw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = srem i32 -10, %a
@@ -282,8 +282,8 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:    mv a2, a0
-; RV32I-NEXT:    addi a0, zero, 10
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a0, 10
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __umoddi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -295,8 +295,8 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    mv a3, a1
 ; RV32IM-NEXT:    mv a2, a0
-; RV32IM-NEXT:    addi a0, zero, 10
-; RV32IM-NEXT:    mv a1, zero
+; RV32IM-NEXT:    li a0, 10
+; RV32IM-NEXT:    li a1, 0
 ; RV32IM-NEXT:    call __umoddi3 at plt
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
@@ -307,7 +307,7 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    addi a0, zero, 10
+; RV64I-NEXT:    li a0, 10
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -315,7 +315,7 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
 ;
 ; RV64IM-LABEL: urem64_constant_lhs:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    addi a1, zero, 10
+; RV64IM-NEXT:    li a1, 10
 ; RV64IM-NEXT:    remu a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = urem i64 10, %a
@@ -365,8 +365,8 @@ define i64 @srem64_constant_lhs(i64 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:    mv a2, a0
-; RV32I-NEXT:    addi a0, zero, -10
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a0, -10
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    call __moddi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -378,8 +378,8 @@ define i64 @srem64_constant_lhs(i64 %a) nounwind {
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IM-NEXT:    mv a3, a1
 ; RV32IM-NEXT:    mv a2, a0
-; RV32IM-NEXT:    addi a0, zero, -10
-; RV32IM-NEXT:    addi a1, zero, -1
+; RV32IM-NEXT:    li a0, -10
+; RV32IM-NEXT:    li a1, -1
 ; RV32IM-NEXT:    call __moddi3 at plt
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
@@ -390,7 +390,7 @@ define i64 @srem64_constant_lhs(i64 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    addi a0, zero, -10
+; RV64I-NEXT:    li a0, -10
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -398,7 +398,7 @@ define i64 @srem64_constant_lhs(i64 %a) nounwind {
 ;
 ; RV64IM-LABEL: srem64_constant_lhs:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    addi a1, zero, -10
+; RV64IM-NEXT:    li a1, -10
 ; RV64IM-NEXT:    rem a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = srem i64 -10, %a
@@ -451,7 +451,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    andi a1, a0, 255
-; RV32I-NEXT:    addi a0, zero, 10
+; RV32I-NEXT:    li a0, 10
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -460,7 +460,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
 ; RV32IM-LABEL: urem8_constant_lhs:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    andi a0, a0, 255
-; RV32IM-NEXT:    addi a1, zero, 10
+; RV32IM-NEXT:    li a1, 10
 ; RV32IM-NEXT:    remu a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
@@ -469,7 +469,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    andi a1, a0, 255
-; RV64I-NEXT:    addi a0, zero, 10
+; RV64I-NEXT:    li a0, 10
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -478,7 +478,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
 ; RV64IM-LABEL: urem8_constant_lhs:
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    andi a0, a0, 255
-; RV64IM-NEXT:    addi a1, zero, 10
+; RV64IM-NEXT:    li a1, 10
 ; RV64IM-NEXT:    remuw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = urem i8 10, %a
@@ -541,7 +541,7 @@ define i8 @srem8_constant_lhs(i8 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 24
 ; RV32I-NEXT:    srai a1, a0, 24
-; RV32I-NEXT:    addi a0, zero, -10
+; RV32I-NEXT:    li a0, -10
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -551,7 +551,7 @@ define i8 @srem8_constant_lhs(i8 %a) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    slli a0, a0, 24
 ; RV32IM-NEXT:    srai a0, a0, 24
-; RV32IM-NEXT:    addi a1, zero, -10
+; RV32IM-NEXT:    li a1, -10
 ; RV32IM-NEXT:    rem a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
@@ -561,7 +561,7 @@ define i8 @srem8_constant_lhs(i8 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a1, a0, 56
-; RV64I-NEXT:    addi a0, zero, -10
+; RV64I-NEXT:    li a0, -10
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -571,7 +571,7 @@ define i8 @srem8_constant_lhs(i8 %a) nounwind {
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    slli a0, a0, 56
 ; RV64IM-NEXT:    srai a0, a0, 56
-; RV64IM-NEXT:    addi a1, zero, -10
+; RV64IM-NEXT:    li a1, -10
 ; RV64IM-NEXT:    remw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = srem i8 -10, %a
@@ -635,7 +635,7 @@ define i16 @urem16_constant_lhs(i16 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a1, a0, a1
-; RV32I-NEXT:    addi a0, zero, 10
+; RV32I-NEXT:    li a0, 10
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -646,7 +646,7 @@ define i16 @urem16_constant_lhs(i16 %a) nounwind {
 ; RV32IM-NEXT:    lui a1, 16
 ; RV32IM-NEXT:    addi a1, a1, -1
 ; RV32IM-NEXT:    and a0, a0, a1
-; RV32IM-NEXT:    addi a1, zero, 10
+; RV32IM-NEXT:    li a1, 10
 ; RV32IM-NEXT:    remu a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
@@ -657,7 +657,7 @@ define i16 @urem16_constant_lhs(i16 %a) nounwind {
 ; RV64I-NEXT:    lui a1, 16
 ; RV64I-NEXT:    addiw a1, a1, -1
 ; RV64I-NEXT:    and a1, a0, a1
-; RV64I-NEXT:    addi a0, zero, 10
+; RV64I-NEXT:    li a0, 10
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -668,7 +668,7 @@ define i16 @urem16_constant_lhs(i16 %a) nounwind {
 ; RV64IM-NEXT:    lui a1, 16
 ; RV64IM-NEXT:    addiw a1, a1, -1
 ; RV64IM-NEXT:    and a0, a0, a1
-; RV64IM-NEXT:    addi a1, zero, 10
+; RV64IM-NEXT:    li a1, 10
 ; RV64IM-NEXT:    remuw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = urem i16 10, %a
@@ -730,7 +730,7 @@ define i16 @srem16_constant_lhs(i16 %a) nounwind {
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srai a1, a0, 16
-; RV32I-NEXT:    addi a0, zero, -10
+; RV32I-NEXT:    li a0, -10
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -740,7 +740,7 @@ define i16 @srem16_constant_lhs(i16 %a) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    slli a0, a0, 16
 ; RV32IM-NEXT:    srai a0, a0, 16
-; RV32IM-NEXT:    addi a1, zero, -10
+; RV32IM-NEXT:    li a1, -10
 ; RV32IM-NEXT:    rem a0, a1, a0
 ; RV32IM-NEXT:    ret
 ;
@@ -750,7 +750,7 @@ define i16 @srem16_constant_lhs(i16 %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a1, a0, 48
-; RV64I-NEXT:    addi a0, zero, -10
+; RV64I-NEXT:    li a0, -10
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -760,7 +760,7 @@ define i16 @srem16_constant_lhs(i16 %a) nounwind {
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    slli a0, a0, 48
 ; RV64IM-NEXT:    srai a0, a0, 48
-; RV64IM-NEXT:    addi a1, zero, -10
+; RV64IM-NEXT:    li a1, -10
 ; RV64IM-NEXT:    remw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = srem i16 -10, %a

diff  --git a/llvm/test/CodeGen/RISCV/remat.ll b/llvm/test/CodeGen/RISCV/remat.ll
index e80707c34029..a70e29a61b71 100644
--- a/llvm/test/CodeGen/RISCV/remat.ll
+++ b/llvm/test/CodeGen/RISCV/remat.ll
@@ -69,7 +69,7 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    lw a2, %lo(c)(s10)
 ; RV32I-NEXT:    lw a3, %lo(d)(s1)
 ; RV32I-NEXT:    lw a4, %lo(e)(s0)
-; RV32I-NEXT:    addi a5, zero, 32
+; RV32I-NEXT:    li a5, 32
 ; RV32I-NEXT:    call foo at plt
 ; RV32I-NEXT:  .LBB0_5: # %if.end
 ; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
@@ -82,7 +82,7 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    lw a2, %lo(d)(s1)
 ; RV32I-NEXT:    lw a3, %lo(e)(s0)
 ; RV32I-NEXT:    lw a4, %lo(f)(s7)
-; RV32I-NEXT:    addi a5, zero, 64
+; RV32I-NEXT:    li a5, 64
 ; RV32I-NEXT:    call foo at plt
 ; RV32I-NEXT:  .LBB0_7: # %if.end5
 ; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
@@ -95,7 +95,7 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    lw a2, %lo(e)(s0)
 ; RV32I-NEXT:    lw a3, %lo(f)(s7)
 ; RV32I-NEXT:    lw a4, %lo(g)(s8)
-; RV32I-NEXT:    addi a5, zero, 32
+; RV32I-NEXT:    li a5, 32
 ; RV32I-NEXT:    call foo at plt
 ; RV32I-NEXT:  .LBB0_9: # %if.end9
 ; RV32I-NEXT:    # in Loop: Header=BB0_3 Depth=1
@@ -108,11 +108,11 @@ define i32 @test() nounwind {
 ; RV32I-NEXT:    lw a2, %lo(f)(s7)
 ; RV32I-NEXT:    lw a3, %lo(g)(s8)
 ; RV32I-NEXT:    lw a4, %lo(h)(s9)
-; RV32I-NEXT:    addi a5, zero, 32
+; RV32I-NEXT:    li a5, 32
 ; RV32I-NEXT:    call foo at plt
 ; RV32I-NEXT:    j .LBB0_2
 ; RV32I-NEXT:  .LBB0_11: # %for.end
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
index d3f08804a3d4..ab472e9f4431 100644
--- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll
+++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
@@ -8,7 +8,7 @@
 define i32 @rotl(i32 %x, i32 %y) nounwind {
 ; RV32I-LABEL: rotl:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 32
+; RV32I-NEXT:    li a2, 32
 ; RV32I-NEXT:    sub a2, a2, a1
 ; RV32I-NEXT:    sll a1, a0, a1
 ; RV32I-NEXT:    srl a0, a0, a2
@@ -24,7 +24,7 @@ define i32 @rotl(i32 %x, i32 %y) nounwind {
 define i32 @rotr(i32 %x, i32 %y) nounwind {
 ; RV32I-LABEL: rotr:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 32
+; RV32I-NEXT:    li a2, 32
 ; RV32I-NEXT:    sub a2, a2, a1
 ; RV32I-NEXT:    srl a1, a0, a1
 ; RV32I-NEXT:    sll a0, a0, a2

diff  --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll
index b631cd9f9452..f00e096cc829 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -63,7 +63,7 @@ define i64 @sh3add(i64 %0, i64* %1) {
 define i32 @addmul6(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul6:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 6
+; RV32I-NEXT:    li a2, 6
 ; RV32I-NEXT:    mul a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
@@ -81,7 +81,7 @@ define i32 @addmul6(i32 %a, i32 %b) {
 define i32 @addmul10(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul10:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 10
+; RV32I-NEXT:    li a2, 10
 ; RV32I-NEXT:    mul a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
@@ -99,7 +99,7 @@ define i32 @addmul10(i32 %a, i32 %b) {
 define i32 @addmul12(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul12:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 12
+; RV32I-NEXT:    li a2, 12
 ; RV32I-NEXT:    mul a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
@@ -117,7 +117,7 @@ define i32 @addmul12(i32 %a, i32 %b) {
 define i32 @addmul18(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul18:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 18
+; RV32I-NEXT:    li a2, 18
 ; RV32I-NEXT:    mul a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
@@ -135,7 +135,7 @@ define i32 @addmul18(i32 %a, i32 %b) {
 define i32 @addmul20(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul20:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 20
+; RV32I-NEXT:    li a2, 20
 ; RV32I-NEXT:    mul a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
@@ -153,7 +153,7 @@ define i32 @addmul20(i32 %a, i32 %b) {
 define i32 @addmul24(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul24:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 24
+; RV32I-NEXT:    li a2, 24
 ; RV32I-NEXT:    mul a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
@@ -171,7 +171,7 @@ define i32 @addmul24(i32 %a, i32 %b) {
 define i32 @addmul36(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul36:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 36
+; RV32I-NEXT:    li a2, 36
 ; RV32I-NEXT:    mul a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
@@ -189,7 +189,7 @@ define i32 @addmul36(i32 %a, i32 %b) {
 define i32 @addmul40(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul40:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 40
+; RV32I-NEXT:    li a2, 40
 ; RV32I-NEXT:    mul a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
@@ -207,7 +207,7 @@ define i32 @addmul40(i32 %a, i32 %b) {
 define i32 @addmul72(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul72:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 72
+; RV32I-NEXT:    li a2, 72
 ; RV32I-NEXT:    mul a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
@@ -225,7 +225,7 @@ define i32 @addmul72(i32 %a, i32 %b) {
 define i32 @mul96(i32 %a) {
 ; RV32I-LABEL: mul96:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 96
+; RV32I-NEXT:    li a1, 96
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -241,7 +241,7 @@ define i32 @mul96(i32 %a) {
 define i32 @mul160(i32 %a) {
 ; RV32I-LABEL: mul160:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 160
+; RV32I-NEXT:    li a1, 160
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -257,7 +257,7 @@ define i32 @mul160(i32 %a) {
 define i32 @mul288(i32 %a) {
 ; RV32I-LABEL: mul288:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 288
+; RV32I-NEXT:    li a1, 288
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -273,13 +273,13 @@ define i32 @mul288(i32 %a) {
 define i32 @mul258(i32 %a) {
 ; RV32I-LABEL: mul258:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 258
+; RV32I-NEXT:    li a1, 258
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: mul258:
 ; RV32ZBA:       # %bb.0:
-; RV32ZBA-NEXT:    addi a1, zero, 258
+; RV32ZBA-NEXT:    li a1, 258
 ; RV32ZBA-NEXT:    mul a0, a0, a1
 ; RV32ZBA-NEXT:    ret
   %c = mul i32 %a, 258
@@ -289,13 +289,13 @@ define i32 @mul258(i32 %a) {
 define i32 @mul260(i32 %a) {
 ; RV32I-LABEL: mul260:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 260
+; RV32I-NEXT:    li a1, 260
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: mul260:
 ; RV32ZBA:       # %bb.0:
-; RV32ZBA-NEXT:    addi a1, zero, 260
+; RV32ZBA-NEXT:    li a1, 260
 ; RV32ZBA-NEXT:    mul a0, a0, a1
 ; RV32ZBA-NEXT:    ret
   %c = mul i32 %a, 260
@@ -305,13 +305,13 @@ define i32 @mul260(i32 %a) {
 define i32 @mul264(i32 %a) {
 ; RV32I-LABEL: mul264:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 264
+; RV32I-NEXT:    li a1, 264
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: mul264:
 ; RV32ZBA:       # %bb.0:
-; RV32ZBA-NEXT:    addi a1, zero, 264
+; RV32ZBA-NEXT:    li a1, 264
 ; RV32ZBA-NEXT:    mul a0, a0, a1
 ; RV32ZBA-NEXT:    ret
   %c = mul i32 %a, 264
@@ -321,7 +321,7 @@ define i32 @mul264(i32 %a) {
 define i32 @mul11(i32 %a) {
 ; RV32I-LABEL: mul11:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 11
+; RV32I-NEXT:    li a1, 11
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -337,7 +337,7 @@ define i32 @mul11(i32 %a) {
 define i32 @mul19(i32 %a) {
 ; RV32I-LABEL: mul19:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 19
+; RV32I-NEXT:    li a1, 19
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -353,7 +353,7 @@ define i32 @mul19(i32 %a) {
 define i32 @mul13(i32 %a) {
 ; RV32I-LABEL: mul13:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 13
+; RV32I-NEXT:    li a1, 13
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -369,7 +369,7 @@ define i32 @mul13(i32 %a) {
 define i32 @mul21(i32 %a) {
 ; RV32I-LABEL: mul21:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 21
+; RV32I-NEXT:    li a1, 21
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -385,7 +385,7 @@ define i32 @mul21(i32 %a) {
 define i32 @mul37(i32 %a) {
 ; RV32I-LABEL: mul37:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 37
+; RV32I-NEXT:    li a1, 37
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -401,7 +401,7 @@ define i32 @mul37(i32 %a) {
 define i32 @mul25(i32 %a) {
 ; RV32I-LABEL: mul25:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 25
+; RV32I-NEXT:    li a1, 25
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -417,7 +417,7 @@ define i32 @mul25(i32 %a) {
 define i32 @mul41(i32 %a) {
 ; RV32I-LABEL: mul41:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 41
+; RV32I-NEXT:    li a1, 41
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -433,7 +433,7 @@ define i32 @mul41(i32 %a) {
 define i32 @mul73(i32 %a) {
 ; RV32I-LABEL: mul73:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 73
+; RV32I-NEXT:    li a1, 73
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -449,7 +449,7 @@ define i32 @mul73(i32 %a) {
 define i32 @mul27(i32 %a) {
 ; RV32I-LABEL: mul27:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 27
+; RV32I-NEXT:    li a1, 27
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -465,7 +465,7 @@ define i32 @mul27(i32 %a) {
 define i32 @mul45(i32 %a) {
 ; RV32I-LABEL: mul45:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 45
+; RV32I-NEXT:    li a1, 45
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -481,7 +481,7 @@ define i32 @mul45(i32 %a) {
 define i32 @mul81(i32 %a) {
 ; RV32I-LABEL: mul81:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 81
+; RV32I-NEXT:    li a1, 81
 ; RV32I-NEXT:    mul a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -555,7 +555,7 @@ define i32 @add4104(i32 %a) {
 ;
 ; RV32ZBA-LABEL: add4104:
 ; RV32ZBA:       # %bb.0:
-; RV32ZBA-NEXT:    addi a1, zero, 1026
+; RV32ZBA-NEXT:    li a1, 1026
 ; RV32ZBA-NEXT:    sh2add a0, a1, a0
 ; RV32ZBA-NEXT:    ret
   %c = add i32 %a, 4104
@@ -572,7 +572,7 @@ define i32 @add8208(i32 %a) {
 ;
 ; RV32ZBA-LABEL: add8208:
 ; RV32ZBA:       # %bb.0:
-; RV32ZBA-NEXT:    addi a1, zero, 1026
+; RV32ZBA-NEXT:    li a1, 1026
 ; RV32ZBA-NEXT:    sh3add a0, a1, a0
 ; RV32ZBA-NEXT:    ret
   %c = add i32 %a, 8208

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 89e6409f452b..356c4abe1765 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -48,7 +48,7 @@ define i32 @ctlz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB0_2:
-; RV32I-NEXT:    addi a0, zero, 32
+; RV32I-NEXT:    li a0, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: ctlz_i32:
@@ -138,7 +138,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:  .LBB1_2:
 ; RV32I-NEXT:    srli a0, s2, 24
 ; RV32I-NEXT:  .LBB1_3:
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
@@ -156,11 +156,11 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV32ZBB-NEXT:  # %bb.1:
 ; RV32ZBB-NEXT:    clz a0, a0
 ; RV32ZBB-NEXT:    addi a0, a0, 32
-; RV32ZBB-NEXT:    mv a1, zero
+; RV32ZBB-NEXT:    li a1, 0
 ; RV32ZBB-NEXT:    ret
 ; RV32ZBB-NEXT:  .LBB1_2:
 ; RV32ZBB-NEXT:    clz a0, a1
-; RV32ZBB-NEXT:    mv a1, zero
+; RV32ZBB-NEXT:    li a1, 0
 ; RV32ZBB-NEXT:    ret
   %1 = call i64 @llvm.ctlz.i64(i64 %a, i1 false)
   ret i64 %1
@@ -202,7 +202,7 @@ define i32 @cttz_i32(i32 %a) nounwind {
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB2_2:
-; RV32I-NEXT:    addi a0, zero, 32
+; RV32I-NEXT:    li a0, 32
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: cttz_i32:
@@ -276,7 +276,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV32I-NEXT:  .LBB3_2:
 ; RV32I-NEXT:    srli a0, s2, 24
 ; RV32I-NEXT:  .LBB3_3:
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
@@ -294,11 +294,11 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV32ZBB-NEXT:  # %bb.1:
 ; RV32ZBB-NEXT:    ctz a0, a1
 ; RV32ZBB-NEXT:    addi a0, a0, 32
-; RV32ZBB-NEXT:    mv a1, zero
+; RV32ZBB-NEXT:    li a1, 0
 ; RV32ZBB-NEXT:    ret
 ; RV32ZBB-NEXT:  .LBB3_2:
 ; RV32ZBB-NEXT:    ctz a0, a0
-; RV32ZBB-NEXT:    mv a1, zero
+; RV32ZBB-NEXT:    li a1, 0
 ; RV32ZBB-NEXT:    ret
   %1 = call i64 @llvm.cttz.i64(i64 %a, i1 false)
   ret i64 %1
@@ -392,7 +392,7 @@ define i64 @ctpop_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    call __mulsi3 at plt
 ; RV32I-NEXT:    srli a0, a0, 24
 ; RV32I-NEXT:    add a0, a0, s5
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
@@ -408,7 +408,7 @@ define i64 @ctpop_i64(i64 %a) nounwind {
 ; RV32ZBB-NEXT:    cpop a1, a1
 ; RV32ZBB-NEXT:    cpop a0, a0
 ; RV32ZBB-NEXT:    add a0, a0, a1
-; RV32ZBB-NEXT:    mv a1, zero
+; RV32ZBB-NEXT:    li a1, 0
 ; RV32ZBB-NEXT:    ret
   %1 = call i64 @llvm.ctpop.i64(i64 %a)
   ret i64 %1
@@ -795,13 +795,13 @@ define i64 @zexth_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: zexth_i64:
 ; RV32ZBB:       # %bb.0:
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    mv a1, zero
+; RV32ZBB-NEXT:    li a1, 0
 ; RV32ZBB-NEXT:    ret
   %and = and i64 %a, 65535
   ret i64 %and

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbp.ll b/llvm/test/CodeGen/RISCV/rv32zbp.ll
index 24e28ba72014..f67ef05006fa 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbp.ll
@@ -2944,13 +2944,13 @@ define i64 @packh_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    slli a1, a2, 24
 ; RV32I-NEXT:    srli a1, a1, 16
 ; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBP-LABEL: packh_i64:
 ; RV32ZBP:       # %bb.0:
 ; RV32ZBP-NEXT:    packh a0, a0, a2
-; RV32ZBP-NEXT:    mv a1, zero
+; RV32ZBP-NEXT:    li a1, 0
 ; RV32ZBP-NEXT:    ret
   %and = and i64 %a, 255
   %and1 = shl i64 %b, 8
@@ -2981,13 +2981,13 @@ define i64 @zexth_i64(i64 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBP-LABEL: zexth_i64:
 ; RV32ZBP:       # %bb.0:
 ; RV32ZBP-NEXT:    zext.h a0, a0
-; RV32ZBP-NEXT:    mv a1, zero
+; RV32ZBP-NEXT:    li a1, 0
 ; RV32ZBP-NEXT:    ret
   %and = and i64 %a, 65535
   ret i64 %and

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbs.ll b/llvm/test/CodeGen/RISCV/rv32zbs.ll
index 7f2fde933a5d..9730a160f7cf 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbs.ll
@@ -7,7 +7,7 @@
 define i32 @sbclr_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sbclr_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    li a2, 1
 ; RV32I-NEXT:    sll a1, a2, a1
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    and a0, a1, a0
@@ -27,7 +27,7 @@ define i32 @sbclr_i32(i32 %a, i32 %b) nounwind {
 define i32 @sbclr_i32_no_mask(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sbclr_i32_no_mask:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    li a2, 1
 ; RV32I-NEXT:    sll a1, a2, a1
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    and a0, a1, a0
@@ -48,7 +48,7 @@ define i64 @sbclr_i64(i64 %a, i64 %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a3, a2, 63
 ; RV32I-NEXT:    addi a4, a3, -32
-; RV32I-NEXT:    addi a3, zero, 1
+; RV32I-NEXT:    li a3, 1
 ; RV32I-NEXT:    bltz a4, .LBB2_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sll a2, a3, a4
@@ -82,7 +82,7 @@ define i64 @sbclr_i64(i64 %a, i64 %b) nounwind {
 define i32 @sbset_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sbset_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    li a2, 1
 ; RV32I-NEXT:    sll a1, a2, a1
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    ret
@@ -100,7 +100,7 @@ define i32 @sbset_i32(i32 %a, i32 %b) nounwind {
 define i32 @sbset_i32_no_mask(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sbset_i32_no_mask:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    li a2, 1
 ; RV32I-NEXT:    sll a1, a2, a1
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    ret
@@ -118,7 +118,7 @@ define i32 @sbset_i32_no_mask(i32 %a, i32 %b) nounwind {
 define signext i32 @sbset_i32_zero(i32 signext %a) nounwind {
 ; RV32I-LABEL: sbset_i32_zero:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 1
+; RV32I-NEXT:    li a1, 1
 ; RV32I-NEXT:    sll a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
@@ -138,7 +138,7 @@ define signext i32 @sbset_i32_zero(i32 signext %a) nounwind {
 define i64 @sbset_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: sbset_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a3, zero, 1
+; RV32I-NEXT:    li a3, 1
 ; RV32I-NEXT:    sll a2, a3, a2
 ; RV32I-NEXT:    srai a3, a2, 31
 ; RV32I-NEXT:    or a0, a2, a0
@@ -164,14 +164,14 @@ define signext i64 @sbset_i64_zero(i64 signext %a) nounwind {
 ; RV32I-LABEL: sbset_i64_zero:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a1, a0, -32
-; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    li a2, 1
 ; RV32I-NEXT:    bltz a1, .LBB7_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    sll a1, a2, a1
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB7_2:
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    sll a0, a2, a0
 ; RV32I-NEXT:    ret
 ;
@@ -180,11 +180,11 @@ define signext i64 @sbset_i64_zero(i64 signext %a) nounwind {
 ; RV32ZBS-NEXT:    addi a1, a0, -32
 ; RV32ZBS-NEXT:    bltz a1, .LBB7_2
 ; RV32ZBS-NEXT:  # %bb.1:
-; RV32ZBS-NEXT:    mv a0, zero
+; RV32ZBS-NEXT:    li a0, 0
 ; RV32ZBS-NEXT:    bset a1, zero, a1
 ; RV32ZBS-NEXT:    ret
 ; RV32ZBS-NEXT:  .LBB7_2:
-; RV32ZBS-NEXT:    mv a1, zero
+; RV32ZBS-NEXT:    li a1, 0
 ; RV32ZBS-NEXT:    bset a0, zero, a0
 ; RV32ZBS-NEXT:    ret
   %shl = shl i64 1, %a
@@ -194,7 +194,7 @@ define signext i64 @sbset_i64_zero(i64 signext %a) nounwind {
 define i32 @sbinv_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: sbinv_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    li a2, 1
 ; RV32I-NEXT:    sll a1, a2, a1
 ; RV32I-NEXT:    xor a0, a1, a0
 ; RV32I-NEXT:    ret
@@ -217,7 +217,7 @@ define i32 @sbinv_i32(i32 %a, i32 %b) nounwind {
 define i64 @sbinv_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-LABEL: sbinv_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a3, zero, 1
+; RV32I-NEXT:    li a3, 1
 ; RV32I-NEXT:    sll a2, a3, a2
 ; RV32I-NEXT:    srai a3, a2, 31
 ; RV32I-NEXT:    xor a0, a2, a0
@@ -288,14 +288,14 @@ define i64 @sbext_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    j .LBB12_3
 ; RV32I-NEXT:  .LBB12_2:
 ; RV32I-NEXT:    srl a0, a0, a2
-; RV32I-NEXT:    addi a2, zero, 31
+; RV32I-NEXT:    li a2, 31
 ; RV32I-NEXT:    sub a2, a2, a3
 ; RV32I-NEXT:    slli a1, a1, 1
 ; RV32I-NEXT:    sll a1, a1, a2
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:  .LBB12_3:
 ; RV32I-NEXT:    andi a0, a0, 1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBS-LABEL: sbext_i64:
@@ -308,14 +308,14 @@ define i64 @sbext_i64(i64 %a, i64 %b) nounwind {
 ; RV32ZBS-NEXT:    j .LBB12_3
 ; RV32ZBS-NEXT:  .LBB12_2:
 ; RV32ZBS-NEXT:    srl a0, a0, a2
-; RV32ZBS-NEXT:    addi a2, zero, 31
+; RV32ZBS-NEXT:    li a2, 31
 ; RV32ZBS-NEXT:    sub a2, a2, a3
 ; RV32ZBS-NEXT:    slli a1, a1, 1
 ; RV32ZBS-NEXT:    sll a1, a1, a2
 ; RV32ZBS-NEXT:    or a0, a0, a1
 ; RV32ZBS-NEXT:  .LBB12_3:
 ; RV32ZBS-NEXT:    andi a0, a0, 1
-; RV32ZBS-NEXT:    mv a1, zero
+; RV32ZBS-NEXT:    li a1, 0
 ; RV32ZBS-NEXT:    ret
   %conv = and i64 %b, 63
   %shr = lshr i64 %a, %conv
@@ -344,13 +344,13 @@ define i64 @sbexti_i64(i64 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    srli a0, a0, 5
 ; RV32I-NEXT:    andi a0, a0, 1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBS-LABEL: sbexti_i64:
 ; RV32ZBS:       # %bb.0:
 ; RV32ZBS-NEXT:    bexti a0, a0, 5
-; RV32ZBS-NEXT:    mv a1, zero
+; RV32ZBS-NEXT:    li a1, 0
 ; RV32ZBS-NEXT:    ret
   %shr = lshr i64 %a, 5
   %and = and i64 %shr, 1

diff  --git a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll
index 40db46ad1cae..9b9823489dc1 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll
@@ -20,7 +20,7 @@ define signext i32 @addw(i32 signext %s, i32 signext %n, i32 signext %k) nounwin
 ; CHECK-NEXT:    addw a0, a0, a1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB0_2:
-; CHECK-NEXT:    mv a0, zero
+; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    ret
 entry:
   %cmp6 = icmp slt i32 %s, %n
@@ -65,7 +65,7 @@ define signext i32 @subw(i32 signext %s, i32 signext %n, i32 signext %k) nounwin
 ; CHECK-NEXT:    subw a0, a0, a1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB1_2:
-; CHECK-NEXT:    mv a0, zero
+; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    ret
 entry:
   %cmp6 = icmp slt i32 %s, %n

diff  --git a/llvm/test/CodeGen/RISCV/rv64m-w-insts-legalization.ll b/llvm/test/CodeGen/RISCV/rv64m-w-insts-legalization.ll
index 8af41d260844..f69909e76d4c 100644
--- a/llvm/test/CodeGen/RISCV/rv64m-w-insts-legalization.ll
+++ b/llvm/test/CodeGen/RISCV/rv64m-w-insts-legalization.ll
@@ -4,10 +4,10 @@
 define signext i32 @mulw(i32 signext %s, i32 signext %n, i32 signext %k) nounwind {
 ; CHECK-LABEL: mulw:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a2, zero, 1
+; CHECK-NEXT:    li a2, 1
 ; CHECK-NEXT:    bge a0, a1, .LBB0_3
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
-; CHECK-NEXT:    addi a2, zero, 1
+; CHECK-NEXT:    li a2, 1
 ; CHECK-NEXT:  .LBB0_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    mulw a2, a0, a2

diff  --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index bee999a76179..20edc0ad6391 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -318,7 +318,7 @@ define i64 @sh2add_extra_sext(i32 %x, i32 %y, i32 %z) {
 define i64 @addmul6(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul6:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 6
+; RV64I-NEXT:    li a2, 6
 ; RV64I-NEXT:    mul a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -336,7 +336,7 @@ define i64 @addmul6(i64 %a, i64 %b) {
 define i64 @addmul10(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul10:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 10
+; RV64I-NEXT:    li a2, 10
 ; RV64I-NEXT:    mul a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -354,7 +354,7 @@ define i64 @addmul10(i64 %a, i64 %b) {
 define i64 @addmul12(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul12:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 12
+; RV64I-NEXT:    li a2, 12
 ; RV64I-NEXT:    mul a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -372,7 +372,7 @@ define i64 @addmul12(i64 %a, i64 %b) {
 define i64 @addmul18(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul18:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 18
+; RV64I-NEXT:    li a2, 18
 ; RV64I-NEXT:    mul a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -390,7 +390,7 @@ define i64 @addmul18(i64 %a, i64 %b) {
 define i64 @addmul20(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul20:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 20
+; RV64I-NEXT:    li a2, 20
 ; RV64I-NEXT:    mul a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -408,7 +408,7 @@ define i64 @addmul20(i64 %a, i64 %b) {
 define i64 @addmul24(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul24:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 24
+; RV64I-NEXT:    li a2, 24
 ; RV64I-NEXT:    mul a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -426,7 +426,7 @@ define i64 @addmul24(i64 %a, i64 %b) {
 define i64 @addmul36(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul36:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 36
+; RV64I-NEXT:    li a2, 36
 ; RV64I-NEXT:    mul a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -444,7 +444,7 @@ define i64 @addmul36(i64 %a, i64 %b) {
 define i64 @addmul40(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul40:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 40
+; RV64I-NEXT:    li a2, 40
 ; RV64I-NEXT:    mul a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -462,7 +462,7 @@ define i64 @addmul40(i64 %a, i64 %b) {
 define i64 @addmul72(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul72:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 72
+; RV64I-NEXT:    li a2, 72
 ; RV64I-NEXT:    mul a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -480,7 +480,7 @@ define i64 @addmul72(i64 %a, i64 %b) {
 define i64 @mul96(i64 %a) {
 ; RV64I-LABEL: mul96:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 96
+; RV64I-NEXT:    li a1, 96
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -496,7 +496,7 @@ define i64 @mul96(i64 %a) {
 define i64 @mul160(i64 %a) {
 ; RV64I-LABEL: mul160:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 160
+; RV64I-NEXT:    li a1, 160
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -512,7 +512,7 @@ define i64 @mul160(i64 %a) {
 define i64 @mul288(i64 %a) {
 ; RV64I-LABEL: mul288:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 288
+; RV64I-NEXT:    li a1, 288
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -654,13 +654,13 @@ define i64 @adduw_imm(i32 signext %0) nounwind {
 define i64 @mul258(i64 %a) {
 ; RV64I-LABEL: mul258:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 258
+; RV64I-NEXT:    li a1, 258
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: mul258:
 ; RV64ZBA:       # %bb.0:
-; RV64ZBA-NEXT:    addi a1, zero, 258
+; RV64ZBA-NEXT:    li a1, 258
 ; RV64ZBA-NEXT:    mul a0, a0, a1
 ; RV64ZBA-NEXT:    ret
   %c = mul i64 %a, 258
@@ -670,13 +670,13 @@ define i64 @mul258(i64 %a) {
 define i64 @mul260(i64 %a) {
 ; RV64I-LABEL: mul260:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 260
+; RV64I-NEXT:    li a1, 260
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: mul260:
 ; RV64ZBA:       # %bb.0:
-; RV64ZBA-NEXT:    addi a1, zero, 260
+; RV64ZBA-NEXT:    li a1, 260
 ; RV64ZBA-NEXT:    mul a0, a0, a1
 ; RV64ZBA-NEXT:    ret
   %c = mul i64 %a, 260
@@ -686,13 +686,13 @@ define i64 @mul260(i64 %a) {
 define i64 @mul264(i64 %a) {
 ; RV64I-LABEL: mul264:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 264
+; RV64I-NEXT:    li a1, 264
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: mul264:
 ; RV64ZBA:       # %bb.0:
-; RV64ZBA-NEXT:    addi a1, zero, 264
+; RV64ZBA-NEXT:    li a1, 264
 ; RV64ZBA-NEXT:    mul a0, a0, a1
 ; RV64ZBA-NEXT:    ret
   %c = mul i64 %a, 264
@@ -702,14 +702,14 @@ define i64 @mul264(i64 %a) {
 define i64 @imm_zextw() nounwind {
 ; RV64I-LABEL: imm_zextw:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    addi a0, a0, -2
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: imm_zextw:
 ; RV64ZBA:       # %bb.0:
-; RV64ZBA-NEXT:    addi a0, zero, -2
+; RV64ZBA-NEXT:    li a0, -2
 ; RV64ZBA-NEXT:    zext.w a0, a0
 ; RV64ZBA-NEXT:    ret
   ret i64 4294967294 ; -2 in 32 bits.
@@ -736,7 +736,7 @@ define i64 @imm_zextw2() nounwind {
 define i64 @mul11(i64 %a) {
 ; RV64I-LABEL: mul11:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 11
+; RV64I-NEXT:    li a1, 11
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -752,7 +752,7 @@ define i64 @mul11(i64 %a) {
 define i64 @mul19(i64 %a) {
 ; RV64I-LABEL: mul19:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 19
+; RV64I-NEXT:    li a1, 19
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -768,7 +768,7 @@ define i64 @mul19(i64 %a) {
 define i64 @mul13(i64 %a) {
 ; RV64I-LABEL: mul13:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 13
+; RV64I-NEXT:    li a1, 13
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -784,7 +784,7 @@ define i64 @mul13(i64 %a) {
 define i64 @mul21(i64 %a) {
 ; RV64I-LABEL: mul21:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 21
+; RV64I-NEXT:    li a1, 21
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -800,7 +800,7 @@ define i64 @mul21(i64 %a) {
 define i64 @mul37(i64 %a) {
 ; RV64I-LABEL: mul37:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 37
+; RV64I-NEXT:    li a1, 37
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -816,7 +816,7 @@ define i64 @mul37(i64 %a) {
 define i64 @mul25(i64 %a) {
 ; RV64I-LABEL: mul25:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 25
+; RV64I-NEXT:    li a1, 25
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -832,7 +832,7 @@ define i64 @mul25(i64 %a) {
 define i64 @mul41(i64 %a) {
 ; RV64I-LABEL: mul41:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 41
+; RV64I-NEXT:    li a1, 41
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -848,7 +848,7 @@ define i64 @mul41(i64 %a) {
 define i64 @mul73(i64 %a) {
 ; RV64I-LABEL: mul73:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 73
+; RV64I-NEXT:    li a1, 73
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -864,7 +864,7 @@ define i64 @mul73(i64 %a) {
 define i64 @mul27(i64 %a) {
 ; RV64I-LABEL: mul27:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 27
+; RV64I-NEXT:    li a1, 27
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -880,7 +880,7 @@ define i64 @mul27(i64 %a) {
 define i64 @mul45(i64 %a) {
 ; RV64I-LABEL: mul45:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 45
+; RV64I-NEXT:    li a1, 45
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -896,7 +896,7 @@ define i64 @mul45(i64 %a) {
 define i64 @mul81(i64 %a) {
 ; RV64I-LABEL: mul81:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 81
+; RV64I-NEXT:    li a1, 81
 ; RV64I-NEXT:    mul a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -963,7 +963,7 @@ define i64 @mul4104(i64 %a) {
 define signext i32 @mulw192(i32 signext %a) {
 ; RV64I-LABEL: mulw192:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 192
+; RV64I-NEXT:    li a1, 192
 ; RV64I-NEXT:    mulw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -979,7 +979,7 @@ define signext i32 @mulw192(i32 signext %a) {
 define signext i32 @mulw320(i32 signext %a) {
 ; RV64I-LABEL: mulw320:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 320
+; RV64I-NEXT:    li a1, 320
 ; RV64I-NEXT:    mulw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -995,7 +995,7 @@ define signext i32 @mulw320(i32 signext %a) {
 define signext i32 @mulw576(i32 signext %a) {
 ; RV64I-LABEL: mulw576:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 576
+; RV64I-NEXT:    li a1, 576
 ; RV64I-NEXT:    mulw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -1018,7 +1018,7 @@ define i64 @add4104(i64 %a) {
 ;
 ; RV64ZBA-LABEL: add4104:
 ; RV64ZBA:       # %bb.0:
-; RV64ZBA-NEXT:    addi a1, zero, 1026
+; RV64ZBA-NEXT:    li a1, 1026
 ; RV64ZBA-NEXT:    sh2add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
   %c = add i64 %a, 4104
@@ -1035,7 +1035,7 @@ define i64 @add8208(i64 %a) {
 ;
 ; RV64ZBA-LABEL: add8208:
 ; RV64ZBA:       # %bb.0:
-; RV64ZBA-NEXT:    addi a1, zero, 1026
+; RV64ZBA-NEXT:    li a1, 1026
 ; RV64ZBA-NEXT:    sh3add a0, a1, a0
 ; RV64ZBA-NEXT:    ret
   %c = add i64 %a, 8208

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
index ef7b58f2d1b8..cdb8c0241020 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
@@ -186,7 +186,7 @@ define void @rol_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
 define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind {
 ; RV64I-LABEL: rol_i32_neg_constant_rhs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -2
+; RV64I-NEXT:    li a1, -2
 ; RV64I-NEXT:    sllw a2, a1, a0
 ; RV64I-NEXT:    negw a0, a0
 ; RV64I-NEXT:    srlw a0, a1, a0
@@ -195,13 +195,13 @@ define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind {
 ;
 ; RV64ZBB-LABEL: rol_i32_neg_constant_rhs:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a1, zero, -2
+; RV64ZBB-NEXT:    li a1, -2
 ; RV64ZBB-NEXT:    rolw a0, a1, a0
 ; RV64ZBB-NEXT:    ret
 ;
 ; RV64ZBP-LABEL: rol_i32_neg_constant_rhs:
 ; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    addi a1, zero, -2
+; RV64ZBP-NEXT:    li a1, -2
 ; RV64ZBP-NEXT:    rolw a0, a1, a0
 ; RV64ZBP-NEXT:    ret
   %1 = tail call i32 @llvm.fshl.i32(i32 -2, i32 -2, i32 %a)
@@ -286,7 +286,7 @@ define void @ror_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
 define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind {
 ; RV64I-LABEL: ror_i32_neg_constant_rhs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -2
+; RV64I-NEXT:    li a1, -2
 ; RV64I-NEXT:    srlw a2, a1, a0
 ; RV64I-NEXT:    negw a0, a0
 ; RV64I-NEXT:    sllw a0, a1, a0
@@ -295,13 +295,13 @@ define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind {
 ;
 ; RV64ZBB-LABEL: ror_i32_neg_constant_rhs:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a1, zero, -2
+; RV64ZBB-NEXT:    li a1, -2
 ; RV64ZBB-NEXT:    rorw a0, a1, a0
 ; RV64ZBB-NEXT:    ret
 ;
 ; RV64ZBP-LABEL: ror_i32_neg_constant_rhs:
 ; RV64ZBP:       # %bb.0:
-; RV64ZBP-NEXT:    addi a1, zero, -2
+; RV64ZBP-NEXT:    li a1, -2
 ; RV64ZBP-NEXT:    rorw a0, a1, a0
 ; RV64ZBP-NEXT:    ret
   %1 = tail call i32 @llvm.fshr.i32(i32 -2, i32 -2, i32 %a)

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index d10f54f67d6f..a5766e71d887 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -48,7 +48,7 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB0_2:
-; RV64I-NEXT:    addi a0, zero, 32
+; RV64I-NEXT:    li a0, 32
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: ctlz_i32:
@@ -101,16 +101,16 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    j .LBB1_3
 ; RV64I-NEXT:  .LBB1_2:
-; RV64I-NEXT:    addi a0, zero, 32
+; RV64I-NEXT:    li a0, 32
 ; RV64I-NEXT:  .LBB1_3: # %cond.end
-; RV64I-NEXT:    addi a1, zero, 31
+; RV64I-NEXT:    li a1, 31
 ; RV64I-NEXT:    sub a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: log2_i32:
 ; RV64ZBB:       # %bb.0:
 ; RV64ZBB-NEXT:    clzw a0, a0
-; RV64ZBB-NEXT:    addi a1, zero, 31
+; RV64ZBB-NEXT:    li a1, 31
 ; RV64ZBB-NEXT:    sub a0, a1, a0
 ; RV64ZBB-NEXT:    ret
   %1 = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
@@ -125,8 +125,8 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    addiw a0, a0, -1
-; RV64I-NEXT:    addi s0, zero, 32
-; RV64I-NEXT:    addi a1, zero, 32
+; RV64I-NEXT:    li s0, 32
+; RV64I-NEXT:    li a1, 32
 ; RV64I-NEXT:    beqz a0, .LBB2_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    srliw a1, a0, 1
@@ -171,7 +171,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
 ; RV64ZBB:       # %bb.0:
 ; RV64ZBB-NEXT:    addiw a0, a0, -1
 ; RV64ZBB-NEXT:    clzw a0, a0
-; RV64ZBB-NEXT:    addi a1, zero, 32
+; RV64ZBB-NEXT:    li a1, 32
 ; RV64ZBB-NEXT:    sub a0, a1, a0
 ; RV64ZBB-NEXT:    ret
   %1 = sub i32 %a, 1
@@ -218,7 +218,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    beqz s0, .LBB3_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    srliw a0, a1, 24
@@ -232,7 +232,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
 ; RV64ZBB-LABEL: findLastSet_i32:
 ; RV64ZBB:       # %bb.0:
 ; RV64ZBB-NEXT:    mv a1, a0
-; RV64ZBB-NEXT:    addi a0, zero, -1
+; RV64ZBB-NEXT:    li a0, -1
 ; RV64ZBB-NEXT:    beqz a1, .LBB3_2
 ; RV64ZBB-NEXT:  # %bb.1:
 ; RV64ZBB-NEXT:    clzw a0, a1
@@ -293,7 +293,7 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB4_2:
-; RV64I-NEXT:    addi a0, zero, 32
+; RV64I-NEXT:    li a0, 32
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: ctlz_lshr_i32:
@@ -374,7 +374,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB5_2:
-; RV64I-NEXT:    addi a0, zero, 64
+; RV64I-NEXT:    li a0, 64
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: ctlz_i64:
@@ -421,7 +421,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB6_2:
-; RV64I-NEXT:    addi a0, zero, 32
+; RV64I-NEXT:    li a0, 32
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: cttz_i32:
@@ -502,7 +502,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    beqz s0, .LBB8_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    srliw a0, a1, 24
@@ -515,7 +515,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 ; RV64ZBB-LABEL: findFirstSet_i32:
 ; RV64ZBB:       # %bb.0:
 ; RV64ZBB-NEXT:    mv a1, a0
-; RV64ZBB-NEXT:    addi a0, zero, -1
+; RV64ZBB-NEXT:    li a0, -1
 ; RV64ZBB-NEXT:    beqz a1, .LBB8_2
 ; RV64ZBB-NEXT:  # %bb.1:
 ; RV64ZBB-NEXT:    ctzw a0, a1
@@ -557,7 +557,7 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ; RV64I-NEXT:    addiw a1, a1, 257
 ; RV64I-NEXT:    call __muldi3 at plt
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    beqz s0, .LBB9_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    srliw a0, a1, 24
@@ -571,7 +571,7 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ; RV64ZBB-LABEL: ffs_i32:
 ; RV64ZBB:       # %bb.0:
 ; RV64ZBB-NEXT:    mv a1, a0
-; RV64ZBB-NEXT:    mv a0, zero
+; RV64ZBB-NEXT:    li a0, 0
 ; RV64ZBB-NEXT:    beqz a1, .LBB9_2
 ; RV64ZBB-NEXT:  # %bb.1:
 ; RV64ZBB-NEXT:    ctzw a0, a1
@@ -643,7 +643,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB10_2:
-; RV64I-NEXT:    addi a0, zero, 64
+; RV64I-NEXT:    li a0, 64
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: cttz_i64:
@@ -1141,7 +1141,7 @@ define i64 @bswap_i64(i64 %a) {
 ; RV64I-NEXT:    lui a2, 4080
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    srli a2, a0, 8
-; RV64I-NEXT:    addi a3, zero, 255
+; RV64I-NEXT:    li a3, 255
 ; RV64I-NEXT:    slli a4, a3, 24
 ; RV64I-NEXT:    and a2, a2, a4
 ; RV64I-NEXT:    or a1, a2, a1

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbp.ll
index c8ef684a05f1..a26823b32959 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbp.ll
@@ -2445,7 +2445,7 @@ define i64 @bswap_i64(i64 %a) {
 ; RV64I-NEXT:    lui a2, 4080
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    srli a2, a0, 8
-; RV64I-NEXT:    addi a3, zero, 255
+; RV64I-NEXT:    li a3, 255
 ; RV64I-NEXT:    slli a4, a3, 24
 ; RV64I-NEXT:    and a2, a2, a4
 ; RV64I-NEXT:    or a1, a2, a1
@@ -2654,7 +2654,7 @@ define i64 @bitreverse_i64(i64 %a) nounwind {
 ; RV64I-NEXT:    lui a2, 4080
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    srli a2, a0, 8
-; RV64I-NEXT:    addi a3, zero, 255
+; RV64I-NEXT:    li a3, 255
 ; RV64I-NEXT:    slli a4, a3, 24
 ; RV64I-NEXT:    and a2, a2, a4
 ; RV64I-NEXT:    or a1, a2, a1
@@ -2842,7 +2842,7 @@ define i64 @bitreverse_bswap_i64(i64 %a) {
 ; RV64I-NEXT:    lui a6, 4080
 ; RV64I-NEXT:    and a3, a2, a6
 ; RV64I-NEXT:    srli a4, a0, 8
-; RV64I-NEXT:    addi a1, zero, 255
+; RV64I-NEXT:    li a1, 255
 ; RV64I-NEXT:    slli a7, a1, 24
 ; RV64I-NEXT:    and a4, a4, a7
 ; RV64I-NEXT:    or a3, a4, a3
@@ -3209,7 +3209,7 @@ define i64 @shfl8_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-NEXT:    addi a1, a1, 255
 ; RV64I-NEXT:    and a1, a0, a1
 ; RV64I-NEXT:    slli a2, a0, 8
-; RV64I-NEXT:    addi a3, zero, 255
+; RV64I-NEXT:    li a3, 255
 ; RV64I-NEXT:    slli a4, a3, 32
 ; RV64I-NEXT:    addi a4, a4, 255
 ; RV64I-NEXT:    slli a4, a4, 16
@@ -3241,7 +3241,7 @@ define i64 @shfl8_i64(i64 %a, i64 %b) nounwind {
 define i64 @shfl16(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: shfl16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 32
 ; RV64I-NEXT:    addi a1, a1, 1
 ; RV64I-NEXT:    slli a1, a1, 16
@@ -3333,7 +3333,7 @@ define i64 @packu_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: packu_i64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    addi a2, zero, -1
+; RV64I-NEXT:    li a2, -1
 ; RV64I-NEXT:    slli a2, a2, 32
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    or a0, a1, a0

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll
index bf96ba42de12..251f3cf0bf0a 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll
@@ -7,7 +7,7 @@
 define signext i32 @sbclr_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sbclr_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sllw a1, a2, a1
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    and a0, a1, a0
@@ -15,7 +15,7 @@ define signext i32 @sbclr_i32(i32 signext %a, i32 signext %b) nounwind {
 ;
 ; RV64ZBS-LABEL: sbclr_i32:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    addi a2, zero, 1
+; RV64ZBS-NEXT:    li a2, 1
 ; RV64ZBS-NEXT:    sllw a1, a2, a1
 ; RV64ZBS-NEXT:    not a1, a1
 ; RV64ZBS-NEXT:    and a0, a1, a0
@@ -30,7 +30,7 @@ define signext i32 @sbclr_i32(i32 signext %a, i32 signext %b) nounwind {
 define signext i32 @sbclr_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sbclr_i32_no_mask:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sllw a1, a2, a1
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    and a0, a1, a0
@@ -38,7 +38,7 @@ define signext i32 @sbclr_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
 ;
 ; RV64ZBS-LABEL: sbclr_i32_no_mask:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    addi a2, zero, 1
+; RV64ZBS-NEXT:    li a2, 1
 ; RV64ZBS-NEXT:    sllw a1, a2, a1
 ; RV64ZBS-NEXT:    not a1, a1
 ; RV64ZBS-NEXT:    and a0, a1, a0
@@ -53,7 +53,7 @@ define signext i32 @sbclr_i32_load(i32* %p, i32 signext %b) nounwind {
 ; RV64I-LABEL: sbclr_i32_load:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lw a0, 0(a0)
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sllw a1, a2, a1
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    and a0, a1, a0
@@ -63,7 +63,7 @@ define signext i32 @sbclr_i32_load(i32* %p, i32 signext %b) nounwind {
 ; RV64ZBS-LABEL: sbclr_i32_load:
 ; RV64ZBS:       # %bb.0:
 ; RV64ZBS-NEXT:    lw a0, 0(a0)
-; RV64ZBS-NEXT:    addi a2, zero, 1
+; RV64ZBS-NEXT:    li a2, 1
 ; RV64ZBS-NEXT:    sllw a1, a2, a1
 ; RV64ZBS-NEXT:    not a1, a1
 ; RV64ZBS-NEXT:    and a0, a1, a0
@@ -79,7 +79,7 @@ define signext i32 @sbclr_i32_load(i32* %p, i32 signext %b) nounwind {
 define i64 @sbclr_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: sbclr_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sll a1, a2, a1
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    and a0, a1, a0
@@ -99,7 +99,7 @@ define i64 @sbclr_i64(i64 %a, i64 %b) nounwind {
 define i64 @sbclr_i64_no_mask(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: sbclr_i64_no_mask:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sll a1, a2, a1
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    and a0, a1, a0
@@ -118,14 +118,14 @@ define i64 @sbclr_i64_no_mask(i64 %a, i64 %b) nounwind {
 define signext i32 @sbset_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sbset_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sllw a1, a2, a1
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBS-LABEL: sbset_i32:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    addi a2, zero, 1
+; RV64ZBS-NEXT:    li a2, 1
 ; RV64ZBS-NEXT:    sllw a1, a2, a1
 ; RV64ZBS-NEXT:    or a0, a1, a0
 ; RV64ZBS-NEXT:    ret
@@ -138,14 +138,14 @@ define signext i32 @sbset_i32(i32 signext %a, i32 signext %b) nounwind {
 define signext i32 @sbset_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sbset_i32_no_mask:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sllw a1, a2, a1
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBS-LABEL: sbset_i32_no_mask:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    addi a2, zero, 1
+; RV64ZBS-NEXT:    li a2, 1
 ; RV64ZBS-NEXT:    sllw a1, a2, a1
 ; RV64ZBS-NEXT:    or a0, a1, a0
 ; RV64ZBS-NEXT:    ret
@@ -158,7 +158,7 @@ define signext i32 @sbset_i32_load(i32* %p, i32 signext %b) nounwind {
 ; RV64I-LABEL: sbset_i32_load:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lw a0, 0(a0)
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sllw a1, a2, a1
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    sext.w a0, a0
@@ -167,7 +167,7 @@ define signext i32 @sbset_i32_load(i32* %p, i32 signext %b) nounwind {
 ; RV64ZBS-LABEL: sbset_i32_load:
 ; RV64ZBS:       # %bb.0:
 ; RV64ZBS-NEXT:    lw a0, 0(a0)
-; RV64ZBS-NEXT:    addi a2, zero, 1
+; RV64ZBS-NEXT:    li a2, 1
 ; RV64ZBS-NEXT:    sllw a1, a2, a1
 ; RV64ZBS-NEXT:    or a0, a1, a0
 ; RV64ZBS-NEXT:    sext.w a0, a0
@@ -182,13 +182,13 @@ define signext i32 @sbset_i32_load(i32* %p, i32 signext %b) nounwind {
 define signext i32 @sbset_i32_zero(i32 signext %a) nounwind {
 ; RV64I-LABEL: sbset_i32_zero:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    sllw a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBS-LABEL: sbset_i32_zero:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    addi a1, zero, 1
+; RV64ZBS-NEXT:    li a1, 1
 ; RV64ZBS-NEXT:    sllw a0, a1, a0
 ; RV64ZBS-NEXT:    ret
   %shl = shl i32 1, %a
@@ -198,7 +198,7 @@ define signext i32 @sbset_i32_zero(i32 signext %a) nounwind {
 define i64 @sbset_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: sbset_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sll a1, a2, a1
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
@@ -216,7 +216,7 @@ define i64 @sbset_i64(i64 %a, i64 %b) nounwind {
 define i64 @sbset_i64_no_mask(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: sbset_i64_no_mask:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sll a1, a2, a1
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
@@ -234,7 +234,7 @@ define i64 @sbset_i64_no_mask(i64 %a, i64 %b) nounwind {
 define signext i64 @sbset_i64_zero(i64 signext %a) nounwind {
 ; RV64I-LABEL: sbset_i64_zero:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    sll a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
@@ -249,14 +249,14 @@ define signext i64 @sbset_i64_zero(i64 signext %a) nounwind {
 define signext i32 @sbinv_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sbinv_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sllw a1, a2, a1
 ; RV64I-NEXT:    xor a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBS-LABEL: sbinv_i32:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    addi a2, zero, 1
+; RV64ZBS-NEXT:    li a2, 1
 ; RV64ZBS-NEXT:    sllw a1, a2, a1
 ; RV64ZBS-NEXT:    xor a0, a1, a0
 ; RV64ZBS-NEXT:    ret
@@ -269,14 +269,14 @@ define signext i32 @sbinv_i32(i32 signext %a, i32 signext %b) nounwind {
 define signext i32 @sbinv_i32_no_mask(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: sbinv_i32_no_mask:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sllw a1, a2, a1
 ; RV64I-NEXT:    xor a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBS-LABEL: sbinv_i32_no_mask:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    addi a2, zero, 1
+; RV64ZBS-NEXT:    li a2, 1
 ; RV64ZBS-NEXT:    sllw a1, a2, a1
 ; RV64ZBS-NEXT:    xor a0, a1, a0
 ; RV64ZBS-NEXT:    ret
@@ -289,7 +289,7 @@ define signext i32 @sbinv_i32_load(i32* %p, i32 signext %b) nounwind {
 ; RV64I-LABEL: sbinv_i32_load:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lw a0, 0(a0)
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sllw a1, a2, a1
 ; RV64I-NEXT:    xor a0, a1, a0
 ; RV64I-NEXT:    sext.w a0, a0
@@ -298,7 +298,7 @@ define signext i32 @sbinv_i32_load(i32* %p, i32 signext %b) nounwind {
 ; RV64ZBS-LABEL: sbinv_i32_load:
 ; RV64ZBS:       # %bb.0:
 ; RV64ZBS-NEXT:    lw a0, 0(a0)
-; RV64ZBS-NEXT:    addi a2, zero, 1
+; RV64ZBS-NEXT:    li a2, 1
 ; RV64ZBS-NEXT:    sllw a1, a2, a1
 ; RV64ZBS-NEXT:    xor a0, a1, a0
 ; RV64ZBS-NEXT:    sext.w a0, a0
@@ -312,7 +312,7 @@ define signext i32 @sbinv_i32_load(i32* %p, i32 signext %b) nounwind {
 define i64 @sbinv_i64(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: sbinv_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sll a1, a2, a1
 ; RV64I-NEXT:    xor a0, a1, a0
 ; RV64I-NEXT:    ret
@@ -330,7 +330,7 @@ define i64 @sbinv_i64(i64 %a, i64 %b) nounwind {
 define i64 @sbinv_i64_no_mask(i64 %a, i64 %b) nounwind {
 ; RV64I-LABEL: sbinv_i64_no_mask:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a2, zero, 1
+; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    sll a1, a2, a1
 ; RV64I-NEXT:    xor a0, a1, a0
 ; RV64I-NEXT:    ret
@@ -557,7 +557,7 @@ define i64 @sbclri_i64_30(i64 %a) nounwind {
 define i64 @sbclri_i64_31(i64 %a) nounwind {
 ; RV64I-LABEL: sbclri_i64_31:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 31
 ; RV64I-NEXT:    addi a1, a1, -1
 ; RV64I-NEXT:    and a0, a0, a1
@@ -574,7 +574,7 @@ define i64 @sbclri_i64_31(i64 %a) nounwind {
 define i64 @sbclri_i64_62(i64 %a) nounwind {
 ; RV64I-LABEL: sbclri_i64_62:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 62
 ; RV64I-NEXT:    addi a1, a1, -1
 ; RV64I-NEXT:    and a0, a0, a1
@@ -591,7 +591,7 @@ define i64 @sbclri_i64_62(i64 %a) nounwind {
 define i64 @sbclri_i64_63(i64 %a) nounwind {
 ; RV64I-LABEL: sbclri_i64_63:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    srli a1, a1, 1
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -747,7 +747,7 @@ define i64 @sbseti_i64_30(i64 %a) nounwind {
 define i64 @sbseti_i64_31(i64 %a) nounwind {
 ; RV64I-LABEL: sbseti_i64_31:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    slli a1, a1, 31
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -763,7 +763,7 @@ define i64 @sbseti_i64_31(i64 %a) nounwind {
 define i64 @sbseti_i64_62(i64 %a) nounwind {
 ; RV64I-LABEL: sbseti_i64_62:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    slli a1, a1, 62
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -779,7 +779,7 @@ define i64 @sbseti_i64_62(i64 %a) nounwind {
 define i64 @sbseti_i64_63(i64 %a) nounwind {
 ; RV64I-LABEL: sbseti_i64_63:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -901,7 +901,7 @@ define i64 @sbinvi_i64_30(i64 %a) nounwind {
 define i64 @sbinvi_i64_31(i64 %a) nounwind {
 ; RV64I-LABEL: sbinvi_i64_31:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    slli a1, a1, 31
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -917,7 +917,7 @@ define i64 @sbinvi_i64_31(i64 %a) nounwind {
 define i64 @sbinvi_i64_62(i64 %a) nounwind {
 ; RV64I-LABEL: sbinvi_i64_62:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    slli a1, a1, 62
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -933,7 +933,7 @@ define i64 @sbinvi_i64_62(i64 %a) nounwind {
 define i64 @sbinvi_i64_63(i64 %a) nounwind {
 ; RV64I-LABEL: sbinvi_i64_63:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -949,7 +949,7 @@ define i64 @sbinvi_i64_63(i64 %a) nounwind {
 define i64 @xor_i64_large(i64 %a) nounwind {
 ; RV64I-LABEL: xor_i64_large:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    slli a1, a1, 32
 ; RV64I-NEXT:    addi a1, a1, 1
 ; RV64I-NEXT:    xor a0, a0, a1
@@ -998,7 +998,7 @@ define i64 @xor_i64_96(i64 %a) nounwind {
 define i64 @or_i64_large(i64 %a) nounwind {
 ; RV64I-LABEL: or_i64_large:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    li a1, 1
 ; RV64I-NEXT:    slli a1, a1, 32
 ; RV64I-NEXT:    addi a1, a1, 1
 ; RV64I-NEXT:    or a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/allocate-lmul-2-4-8.ll b/llvm/test/CodeGen/RISCV/rvv/allocate-lmul-2-4-8.ll
index 773b8e4e32fb..44e11962b5f8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/allocate-lmul-2-4-8.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/allocate-lmul-2-4-8.ll
@@ -93,7 +93,7 @@ define void @lmul2_and_4() nounwind {
 ; CHECK-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    addi s0, sp, 32
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a1, zero, 6
+; CHECK-NEXT:    li a1, 6
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    sub sp, sp, a0
 ; CHECK-NEXT:    andi sp, sp, -32
@@ -176,7 +176,7 @@ define void @lmul4_and_2() nounwind {
 ; CHECK-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    addi s0, sp, 32
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a1, zero, 6
+; CHECK-NEXT:    li a1, 6
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    sub sp, sp, a0
 ; CHECK-NEXT:    andi sp, sp, -32
@@ -198,7 +198,7 @@ define void @lmul4_and_2_x2_0() nounwind {
 ; CHECK-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    addi s0, sp, 32
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a1, zero, 12
+; CHECK-NEXT:    li a1, 12
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    sub sp, sp, a0
 ; CHECK-NEXT:    andi sp, sp, -32
@@ -222,7 +222,7 @@ define void @lmul4_and_2_x2_1() nounwind {
 ; CHECK-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    addi s0, sp, 32
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a1, zero, 12
+; CHECK-NEXT:    li a1, 12
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    sub sp, sp, a0
 ; CHECK-NEXT:    andi sp, sp, -32
@@ -247,7 +247,7 @@ define void @gpr_and_lmul1_and_2() nounwind {
 ; CHECK-NEXT:    slli a1, a0, 1
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    sub sp, sp, a0
-; CHECK-NEXT:    addi a0, zero, 3
+; CHECK-NEXT:    li a0, 3
 ; CHECK-NEXT:    sd a0, 8(sp)
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a1, a0, 1
@@ -274,7 +274,7 @@ define void @gpr_and_lmul1_and_4() nounwind {
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    sub sp, sp, a0
 ; CHECK-NEXT:    andi sp, sp, -32
-; CHECK-NEXT:    addi a0, zero, 3
+; CHECK-NEXT:    li a0, 3
 ; CHECK-NEXT:    sd a0, 8(sp)
 ; CHECK-NEXT:    addi sp, s0, -32
 ; CHECK-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -320,7 +320,7 @@ define void @lmul_1_2_4_8_x2_0() nounwind {
 ; CHECK-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    addi s0, sp, 64
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a1, zero, 30
+; CHECK-NEXT:    li a1, 30
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    sub sp, sp, a0
 ; CHECK-NEXT:    andi sp, sp, -64
@@ -348,7 +348,7 @@ define void @lmul_1_2_4_8_x2_1() nounwind {
 ; CHECK-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    addi s0, sp, 64
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a1, zero, 30
+; CHECK-NEXT:    li a1, 30
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    sub sp, sp, a0
 ; CHECK-NEXT:    andi sp, sp, -64

diff  --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
index 053b9d85de7f..164dfc68e538 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll
@@ -12,13 +12,13 @@ define <vscale x 1 x i8> @bitreverse_nxv1i8(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    vsrl.vi v9, v8, 2
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    vsrl.vi v9, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
@@ -39,13 +39,13 @@ define <vscale x 2 x i8> @bitreverse_nxv2i8(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    vsrl.vi v9, v8, 2
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    vsrl.vi v9, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
@@ -66,13 +66,13 @@ define <vscale x 4 x i8> @bitreverse_nxv4i8(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    vsrl.vi v9, v8, 2
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    vsrl.vi v9, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
@@ -93,13 +93,13 @@ define <vscale x 8 x i8> @bitreverse_nxv8i8(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    vor.vv v8, v8, v9
 ; CHECK-NEXT:    vsrl.vi v9, v8, 2
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    vor.vv v8, v9, v8
 ; CHECK-NEXT:    vsrl.vi v9, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
@@ -120,13 +120,13 @@ define <vscale x 16 x i8> @bitreverse_nxv16i8(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    vor.vv v8, v8, v10
 ; CHECK-NEXT:    vsrl.vi v10, v8, 2
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v10, v10, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    vor.vv v8, v10, v8
 ; CHECK-NEXT:    vsrl.vi v10, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v10, v10, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
@@ -147,13 +147,13 @@ define <vscale x 32 x i8> @bitreverse_nxv32i8(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    vor.vv v8, v8, v12
 ; CHECK-NEXT:    vsrl.vi v12, v8, 2
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v12, v12, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    vor.vv v8, v12, v8
 ; CHECK-NEXT:    vsrl.vi v12, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v12, v12, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
@@ -174,13 +174,13 @@ define <vscale x 64 x i8> @bitreverse_nxv64i8(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vand.vi v8, v8, 15
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    vsrl.vi v16, v8, 2
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v16, v16, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    vor.vv v8, v16, v8
 ; CHECK-NEXT:    vsrl.vi v16, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v16, v16, a0
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
@@ -985,7 +985,7 @@ define <vscale x 1 x i64> @bitreverse_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    lui a0, 4080
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw zero, 8(sp)
-; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    li a1, 255
 ; RV32-NEXT:    sw a1, 12(sp)
 ; RV32-NEXT:    lui a1, 16
 ; RV32-NEXT:    addi a1, a1, -256
@@ -1002,10 +1002,10 @@ define <vscale x 1 x i64> @bitreverse_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    addi a2, a2, 1365
 ; RV32-NEXT:    sw a2, 12(sp)
 ; RV32-NEXT:    sw a2, 8(sp)
-; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    li a2, 56
 ; RV32-NEXT:    vsetvli a3, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v9, v8, a2
-; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    li a3, 40
 ; RV32-NEXT:    vsrl.vx v10, v8, a3
 ; RV32-NEXT:    vand.vx v10, v10, a1
 ; RV32-NEXT:    vor.vv v9, v10, v9
@@ -1060,10 +1060,10 @@ define <vscale x 1 x i64> @bitreverse_nxv1i64(<vscale x 1 x i64> %va) {
 ;
 ; RV64-LABEL: bitreverse_nxv1i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vsrl.vx v9, v8, a0
-; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    li a1, 40
 ; RV64-NEXT:    vsrl.vx v10, v8, a1
 ; RV64-NEXT:    lui a2, 16
 ; RV64-NEXT:    addiw a2, a2, -256
@@ -1073,7 +1073,7 @@ define <vscale x 1 x i64> @bitreverse_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    lui a2, 4080
 ; RV64-NEXT:    vand.vx v10, v10, a2
 ; RV64-NEXT:    vsrl.vi v11, v8, 8
-; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    li a2, 255
 ; RV64-NEXT:    slli a3, a2, 24
 ; RV64-NEXT:    vand.vx v11, v11, a3
 ; RV64-NEXT:    vor.vv v10, v11, v10
@@ -1148,7 +1148,7 @@ define <vscale x 2 x i64> @bitreverse_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    lui a0, 4080
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw zero, 8(sp)
-; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    li a1, 255
 ; RV32-NEXT:    sw a1, 12(sp)
 ; RV32-NEXT:    lui a1, 16
 ; RV32-NEXT:    addi a1, a1, -256
@@ -1165,10 +1165,10 @@ define <vscale x 2 x i64> @bitreverse_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    addi a2, a2, 1365
 ; RV32-NEXT:    sw a2, 12(sp)
 ; RV32-NEXT:    sw a2, 8(sp)
-; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    li a2, 56
 ; RV32-NEXT:    vsetvli a3, zero, e64, m2, ta, mu
 ; RV32-NEXT:    vsrl.vx v10, v8, a2
-; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    li a3, 40
 ; RV32-NEXT:    vsrl.vx v12, v8, a3
 ; RV32-NEXT:    vand.vx v12, v12, a1
 ; RV32-NEXT:    vor.vv v10, v12, v10
@@ -1223,10 +1223,10 @@ define <vscale x 2 x i64> @bitreverse_nxv2i64(<vscale x 2 x i64> %va) {
 ;
 ; RV64-LABEL: bitreverse_nxv2i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV64-NEXT:    vsrl.vx v10, v8, a0
-; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    li a1, 40
 ; RV64-NEXT:    vsrl.vx v12, v8, a1
 ; RV64-NEXT:    lui a2, 16
 ; RV64-NEXT:    addiw a2, a2, -256
@@ -1236,7 +1236,7 @@ define <vscale x 2 x i64> @bitreverse_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    lui a2, 4080
 ; RV64-NEXT:    vand.vx v12, v12, a2
 ; RV64-NEXT:    vsrl.vi v14, v8, 8
-; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    li a2, 255
 ; RV64-NEXT:    slli a3, a2, 24
 ; RV64-NEXT:    vand.vx v14, v14, a3
 ; RV64-NEXT:    vor.vv v12, v14, v12
@@ -1311,7 +1311,7 @@ define <vscale x 4 x i64> @bitreverse_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    lui a0, 4080
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw zero, 8(sp)
-; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    li a1, 255
 ; RV32-NEXT:    sw a1, 12(sp)
 ; RV32-NEXT:    lui a1, 16
 ; RV32-NEXT:    addi a1, a1, -256
@@ -1328,10 +1328,10 @@ define <vscale x 4 x i64> @bitreverse_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    addi a2, a2, 1365
 ; RV32-NEXT:    sw a2, 12(sp)
 ; RV32-NEXT:    sw a2, 8(sp)
-; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    li a2, 56
 ; RV32-NEXT:    vsetvli a3, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsrl.vx v12, v8, a2
-; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    li a3, 40
 ; RV32-NEXT:    vsrl.vx v16, v8, a3
 ; RV32-NEXT:    vand.vx v16, v16, a1
 ; RV32-NEXT:    vor.vv v12, v16, v12
@@ -1386,10 +1386,10 @@ define <vscale x 4 x i64> @bitreverse_nxv4i64(<vscale x 4 x i64> %va) {
 ;
 ; RV64-LABEL: bitreverse_nxv4i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV64-NEXT:    vsrl.vx v12, v8, a0
-; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    li a1, 40
 ; RV64-NEXT:    vsrl.vx v16, v8, a1
 ; RV64-NEXT:    lui a2, 16
 ; RV64-NEXT:    addiw a2, a2, -256
@@ -1399,7 +1399,7 @@ define <vscale x 4 x i64> @bitreverse_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    lui a2, 4080
 ; RV64-NEXT:    vand.vx v16, v16, a2
 ; RV64-NEXT:    vsrl.vi v20, v8, 8
-; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    li a2, 255
 ; RV64-NEXT:    slli a3, a2, 24
 ; RV64-NEXT:    vand.vx v20, v20, a3
 ; RV64-NEXT:    vor.vv v16, v20, v16
@@ -1477,7 +1477,7 @@ define <vscale x 8 x i64> @bitreverse_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    lui a0, 4080
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw zero, 8(sp)
-; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    li a1, 255
 ; RV32-NEXT:    sw a1, 12(sp)
 ; RV32-NEXT:    lui a1, 16
 ; RV32-NEXT:    addi a1, a1, -256
@@ -1494,10 +1494,10 @@ define <vscale x 8 x i64> @bitreverse_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    addi a2, a2, 1365
 ; RV32-NEXT:    sw a2, 12(sp)
 ; RV32-NEXT:    sw a2, 8(sp)
-; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    li a2, 56
 ; RV32-NEXT:    vsetvli a3, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsrl.vx v16, v8, a2
-; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    li a3, 40
 ; RV32-NEXT:    vsrl.vx v24, v8, a3
 ; RV32-NEXT:    addi a4, sp, 8
 ; RV32-NEXT:    vlse64.v v0, (a4), zero
@@ -1579,10 +1579,10 @@ define <vscale x 8 x i64> @bitreverse_nxv8i64(<vscale x 8 x i64> %va) {
 ;
 ; RV64-LABEL: bitreverse_nxv8i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vsrl.vx v16, v8, a0
-; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    li a1, 40
 ; RV64-NEXT:    vsrl.vx v24, v8, a1
 ; RV64-NEXT:    lui a2, 16
 ; RV64-NEXT:    addiw a2, a2, -256
@@ -1592,7 +1592,7 @@ define <vscale x 8 x i64> @bitreverse_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    lui a2, 4080
 ; RV64-NEXT:    vand.vx v24, v24, a2
 ; RV64-NEXT:    vsrl.vi v0, v8, 8
-; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    li a2, 255
 ; RV64-NEXT:    slli a3, a2, 24
 ; RV64-NEXT:    vand.vx v0, v0, a3
 ; RV64-NEXT:    vor.vv v24, v0, v24

diff  --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
index 6c02583847b2..8c1fa0686168 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll
@@ -286,15 +286,15 @@ define <vscale x 1 x i64> @bswap_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    lui a0, 4080
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw zero, 8(sp)
-; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    li a1, 255
 ; RV32-NEXT:    sw a1, 12(sp)
 ; RV32-NEXT:    lui a1, 16
 ; RV32-NEXT:    addi a1, a1, -256
 ; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    li a2, 56
 ; RV32-NEXT:    vsetvli a3, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v9, v8, a2
-; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    li a3, 40
 ; RV32-NEXT:    vsrl.vx v10, v8, a3
 ; RV32-NEXT:    vand.vx v10, v10, a1
 ; RV32-NEXT:    vor.vv v9, v10, v9
@@ -328,10 +328,10 @@ define <vscale x 1 x i64> @bswap_nxv1i64(<vscale x 1 x i64> %va) {
 ;
 ; RV64-LABEL: bswap_nxv1i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vsrl.vx v9, v8, a0
-; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    li a1, 40
 ; RV64-NEXT:    vsrl.vx v10, v8, a1
 ; RV64-NEXT:    lui a2, 16
 ; RV64-NEXT:    addiw a2, a2, -256
@@ -341,7 +341,7 @@ define <vscale x 1 x i64> @bswap_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    lui a2, 4080
 ; RV64-NEXT:    vand.vx v10, v10, a2
 ; RV64-NEXT:    vsrl.vi v11, v8, 8
-; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    li a2, 255
 ; RV64-NEXT:    slli a3, a2, 24
 ; RV64-NEXT:    vand.vx v11, v11, a3
 ; RV64-NEXT:    vor.vv v10, v11, v10
@@ -377,15 +377,15 @@ define <vscale x 2 x i64> @bswap_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    lui a0, 4080
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw zero, 8(sp)
-; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    li a1, 255
 ; RV32-NEXT:    sw a1, 12(sp)
 ; RV32-NEXT:    lui a1, 16
 ; RV32-NEXT:    addi a1, a1, -256
 ; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    li a2, 56
 ; RV32-NEXT:    vsetvli a3, zero, e64, m2, ta, mu
 ; RV32-NEXT:    vsrl.vx v10, v8, a2
-; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    li a3, 40
 ; RV32-NEXT:    vsrl.vx v12, v8, a3
 ; RV32-NEXT:    vand.vx v12, v12, a1
 ; RV32-NEXT:    vor.vv v10, v12, v10
@@ -419,10 +419,10 @@ define <vscale x 2 x i64> @bswap_nxv2i64(<vscale x 2 x i64> %va) {
 ;
 ; RV64-LABEL: bswap_nxv2i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV64-NEXT:    vsrl.vx v10, v8, a0
-; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    li a1, 40
 ; RV64-NEXT:    vsrl.vx v12, v8, a1
 ; RV64-NEXT:    lui a2, 16
 ; RV64-NEXT:    addiw a2, a2, -256
@@ -432,7 +432,7 @@ define <vscale x 2 x i64> @bswap_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    lui a2, 4080
 ; RV64-NEXT:    vand.vx v12, v12, a2
 ; RV64-NEXT:    vsrl.vi v14, v8, 8
-; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    li a2, 255
 ; RV64-NEXT:    slli a3, a2, 24
 ; RV64-NEXT:    vand.vx v14, v14, a3
 ; RV64-NEXT:    vor.vv v12, v14, v12
@@ -468,15 +468,15 @@ define <vscale x 4 x i64> @bswap_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    lui a0, 4080
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw zero, 8(sp)
-; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    li a1, 255
 ; RV32-NEXT:    sw a1, 12(sp)
 ; RV32-NEXT:    lui a1, 16
 ; RV32-NEXT:    addi a1, a1, -256
 ; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    li a2, 56
 ; RV32-NEXT:    vsetvli a3, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsrl.vx v12, v8, a2
-; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    li a3, 40
 ; RV32-NEXT:    vsrl.vx v16, v8, a3
 ; RV32-NEXT:    vand.vx v16, v16, a1
 ; RV32-NEXT:    vor.vv v12, v16, v12
@@ -510,10 +510,10 @@ define <vscale x 4 x i64> @bswap_nxv4i64(<vscale x 4 x i64> %va) {
 ;
 ; RV64-LABEL: bswap_nxv4i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV64-NEXT:    vsrl.vx v12, v8, a0
-; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    li a1, 40
 ; RV64-NEXT:    vsrl.vx v16, v8, a1
 ; RV64-NEXT:    lui a2, 16
 ; RV64-NEXT:    addiw a2, a2, -256
@@ -523,7 +523,7 @@ define <vscale x 4 x i64> @bswap_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    lui a2, 4080
 ; RV64-NEXT:    vand.vx v16, v16, a2
 ; RV64-NEXT:    vsrl.vi v20, v8, 8
-; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    li a2, 255
 ; RV64-NEXT:    slli a3, a2, 24
 ; RV64-NEXT:    vand.vx v20, v20, a3
 ; RV64-NEXT:    vor.vv v16, v20, v16
@@ -562,15 +562,15 @@ define <vscale x 8 x i64> @bswap_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    lui a0, 4080
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw zero, 8(sp)
-; RV32-NEXT:    addi a1, zero, 255
+; RV32-NEXT:    li a1, 255
 ; RV32-NEXT:    sw a1, 12(sp)
 ; RV32-NEXT:    lui a1, 16
 ; RV32-NEXT:    addi a1, a1, -256
 ; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    addi a2, zero, 56
+; RV32-NEXT:    li a2, 56
 ; RV32-NEXT:    vsetvli a3, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsrl.vx v16, v8, a2
-; RV32-NEXT:    addi a3, zero, 40
+; RV32-NEXT:    li a3, 40
 ; RV32-NEXT:    vsrl.vx v24, v8, a3
 ; RV32-NEXT:    addi a4, sp, 8
 ; RV32-NEXT:    vlse64.v v0, (a4), zero
@@ -631,10 +631,10 @@ define <vscale x 8 x i64> @bswap_nxv8i64(<vscale x 8 x i64> %va) {
 ;
 ; RV64-LABEL: bswap_nxv8i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vsrl.vx v16, v8, a0
-; RV64-NEXT:    addi a1, zero, 40
+; RV64-NEXT:    li a1, 40
 ; RV64-NEXT:    vsrl.vx v24, v8, a1
 ; RV64-NEXT:    lui a2, 16
 ; RV64-NEXT:    addiw a2, a2, -256
@@ -644,7 +644,7 @@ define <vscale x 8 x i64> @bswap_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    lui a2, 4080
 ; RV64-NEXT:    vand.vx v24, v24, a2
 ; RV64-NEXT:    vsrl.vi v0, v8, 8
-; RV64-NEXT:    addi a2, zero, 255
+; RV64-NEXT:    li a2, 255
 ; RV64-NEXT:    slli a3, a2, 24
 ; RV64-NEXT:    vand.vx v0, v0, a3
 ; RV64-NEXT:    vor.vv v24, v0, v24

diff  --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index e1d29e9a47b8..0955eba777f0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -74,7 +74,7 @@ define fastcc <vscale x 64 x i32> @ret_split_nxv64i32(<vscale x 64 x i32>* %x) {
 ; CHECK-NEXT:    add a4, a1, a3
 ; CHECK-NEXT:    vl8re32.v v8, (a4)
 ; CHECK-NEXT:    slli a4, a2, 4
-; CHECK-NEXT:    addi a5, zero, 24
+; CHECK-NEXT:    li a5, 24
 ; CHECK-NEXT:    mul a2, a2, a5
 ; CHECK-NEXT:    add a5, a1, a4
 ; CHECK-NEXT:    vl8re32.v v16, (a1)
@@ -107,7 +107,7 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(<vscale x 128 x i32>* %x
 ; CHECK-NEXT:    add a4, a1, a6
 ; CHECK-NEXT:    vl8re32.v v8, (a4)
 ; CHECK-NEXT:    csrr a3, vlenb
-; CHECK-NEXT:    addi a4, zero, 24
+; CHECK-NEXT:    li a4, 24
 ; CHECK-NEXT:    mul a3, a3, a4
 ; CHECK-NEXT:    add a3, sp, a3
 ; CHECK-NEXT:    addi a3, a3, 16
@@ -120,7 +120,7 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(<vscale x 128 x i32>* %x
 ; CHECK-NEXT:    add a3, sp, a3
 ; CHECK-NEXT:    addi a3, a3, 16
 ; CHECK-NEXT:    vs8r.v v8, (a3) # Unknown-size Folded Spill
-; CHECK-NEXT:    addi a5, zero, 24
+; CHECK-NEXT:    li a5, 24
 ; CHECK-NEXT:    mul t1, a2, a5
 ; CHECK-NEXT:    add a3, a1, t1
 ; CHECK-NEXT:    vl8re32.v v8, (a3)
@@ -134,13 +134,13 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(<vscale x 128 x i32>* %x
 ; CHECK-NEXT:    vl8re32.v v8, (a4)
 ; CHECK-NEXT:    addi a3, sp, 16
 ; CHECK-NEXT:    vs8r.v v8, (a3) # Unknown-size Folded Spill
-; CHECK-NEXT:    addi a4, zero, 40
+; CHECK-NEXT:    li a4, 40
 ; CHECK-NEXT:    mul a4, a2, a4
 ; CHECK-NEXT:    add t0, a1, a4
-; CHECK-NEXT:    addi a5, zero, 48
+; CHECK-NEXT:    li a5, 48
 ; CHECK-NEXT:    mul a5, a2, a5
 ; CHECK-NEXT:    add t2, a1, a5
-; CHECK-NEXT:    addi a3, zero, 56
+; CHECK-NEXT:    li a3, 56
 ; CHECK-NEXT:    mul a2, a2, a3
 ; CHECK-NEXT:    add a3, a1, a2
 ; CHECK-NEXT:    vl8re32.v v8, (a1)
@@ -174,7 +174,7 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(<vscale x 128 x i32>* %x
 ; CHECK-NEXT:    vs8r.v v8, (a1)
 ; CHECK-NEXT:    add a0, a0, a6
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    addi a2, zero, 24
+; CHECK-NEXT:    li a2, 24
 ; CHECK-NEXT:    mul a1, a1, a2
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 16
@@ -302,7 +302,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
 ; RV32-NEXT:    add a0, a0, a1
 ; RV32-NEXT:    vs8r.v v16, (a0)
 ; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    addi a3, zero, 2
+; RV32-NEXT:    li a3, 2
 ; RV32-NEXT:    addi a1, sp, 16
 ; RV32-NEXT:    vs8r.v v8, (a1)
 ; RV32-NEXT:    vmv8r.v v8, v0
@@ -333,7 +333,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
 ; RV64-NEXT:    add a0, a0, a1
 ; RV64-NEXT:    vs8r.v v16, (a0)
 ; RV64-NEXT:    addi a0, sp, 24
-; RV64-NEXT:    addi a3, zero, 2
+; RV64-NEXT:    li a3, 2
 ; RV64-NEXT:    addi a1, sp, 24
 ; RV64-NEXT:    vs8r.v v8, (a1)
 ; RV64-NEXT:    vmv8r.v v8, v0
@@ -357,7 +357,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
 ; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    .cfi_offset ra, -4
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    addi a3, zero, 48
+; RV32-NEXT:    li a3, 48
 ; RV32-NEXT:    mul a1, a1, a3
 ; RV32-NEXT:    sub sp, sp, a1
 ; RV32-NEXT:    csrr a1, vlenb
@@ -400,7 +400,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
 ; RV32-NEXT:    slli a1, a1, 4
 ; RV32-NEXT:    add a1, sp, a1
 ; RV32-NEXT:    addi a2, a1, 16
-; RV32-NEXT:    addi a5, zero, 42
+; RV32-NEXT:    li a5, 42
 ; RV32-NEXT:    csrr a1, vlenb
 ; RV32-NEXT:    slli a1, a1, 5
 ; RV32-NEXT:    add a1, sp, a1
@@ -415,7 +415,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
 ; RV32-NEXT:    vl8re8.v v16, (a1) # Unknown-size Folded Reload
 ; RV32-NEXT:    call ext3 at plt
 ; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    addi a1, zero, 48
+; RV32-NEXT:    li a1, 48
 ; RV32-NEXT:    mul a0, a0, a1
 ; RV32-NEXT:    add sp, sp, a0
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -429,7 +429,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
 ; RV64-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    .cfi_offset ra, -8
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    addi a3, zero, 48
+; RV64-NEXT:    li a3, 48
 ; RV64-NEXT:    mul a1, a1, a3
 ; RV64-NEXT:    sub sp, sp, a1
 ; RV64-NEXT:    csrr a1, vlenb
@@ -472,7 +472,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
 ; RV64-NEXT:    slli a1, a1, 4
 ; RV64-NEXT:    add a1, sp, a1
 ; RV64-NEXT:    addi a2, a1, 24
-; RV64-NEXT:    addi a5, zero, 42
+; RV64-NEXT:    li a5, 42
 ; RV64-NEXT:    csrr a1, vlenb
 ; RV64-NEXT:    slli a1, a1, 5
 ; RV64-NEXT:    add a1, sp, a1
@@ -487,7 +487,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
 ; RV64-NEXT:    vl8re8.v v16, (a1) # Unknown-size Folded Reload
 ; RV64-NEXT:    call ext3 at plt
 ; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    addi a1, zero, 48
+; RV64-NEXT:    li a1, 48
 ; RV64-NEXT:    mul a0, a0, a1
 ; RV64-NEXT:    add sp, sp, a0
 ; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -542,25 +542,25 @@ define fastcc <vscale x 32 x i32> @pass_vector_arg_indirect_stack(<vscale x 32 x
 ; RV32-NEXT:    vs8r.v v8, (a0)
 ; RV32-NEXT:    addi a0, sp, 16
 ; RV32-NEXT:    vs8r.v v8, (a0)
-; RV32-NEXT:    addi a1, zero, 1
-; RV32-NEXT:    addi a2, zero, 2
-; RV32-NEXT:    addi a3, zero, 3
-; RV32-NEXT:    addi a4, zero, 4
-; RV32-NEXT:    addi a5, zero, 5
-; RV32-NEXT:    addi a6, zero, 6
-; RV32-NEXT:    addi a7, zero, 7
+; RV32-NEXT:    li a1, 1
+; RV32-NEXT:    li a2, 2
+; RV32-NEXT:    li a3, 3
+; RV32-NEXT:    li a4, 4
+; RV32-NEXT:    li a5, 5
+; RV32-NEXT:    li a6, 6
+; RV32-NEXT:    li a7, 7
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add a0, sp, a0
 ; RV32-NEXT:    addi t2, a0, 16
 ; RV32-NEXT:    addi t4, sp, 16
-; RV32-NEXT:    addi t6, zero, 8
+; RV32-NEXT:    li t6, 8
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 4
 ; RV32-NEXT:    add a0, sp, a0
 ; RV32-NEXT:    addi a0, a0, 16
 ; RV32-NEXT:    vs8r.v v8, (a0)
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    vmv.v.i v16, 0
 ; RV32-NEXT:    call vector_arg_indirect_stack at plt
 ; RV32-NEXT:    csrr a0, vlenb
@@ -594,25 +594,25 @@ define fastcc <vscale x 32 x i32> @pass_vector_arg_indirect_stack(<vscale x 32 x
 ; RV64-NEXT:    vs8r.v v8, (a0)
 ; RV64-NEXT:    addi a0, sp, 24
 ; RV64-NEXT:    vs8r.v v8, (a0)
-; RV64-NEXT:    addi a1, zero, 1
-; RV64-NEXT:    addi a2, zero, 2
-; RV64-NEXT:    addi a3, zero, 3
-; RV64-NEXT:    addi a4, zero, 4
-; RV64-NEXT:    addi a5, zero, 5
-; RV64-NEXT:    addi a6, zero, 6
-; RV64-NEXT:    addi a7, zero, 7
+; RV64-NEXT:    li a1, 1
+; RV64-NEXT:    li a2, 2
+; RV64-NEXT:    li a3, 3
+; RV64-NEXT:    li a4, 4
+; RV64-NEXT:    li a5, 5
+; RV64-NEXT:    li a6, 6
+; RV64-NEXT:    li a7, 7
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 4
 ; RV64-NEXT:    add a0, sp, a0
 ; RV64-NEXT:    addi t2, a0, 24
 ; RV64-NEXT:    addi t4, sp, 24
-; RV64-NEXT:    addi t6, zero, 8
+; RV64-NEXT:    li t6, 8
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 4
 ; RV64-NEXT:    add a0, sp, a0
 ; RV64-NEXT:    addi a0, a0, 24
 ; RV64-NEXT:    vs8r.v v8, (a0)
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    vmv.v.i v16, 0
 ; RV64-NEXT:    call vector_arg_indirect_stack at plt
 ; RV64-NEXT:    csrr a0, vlenb

diff  --git a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll
index ada166fa672a..5400f86e7575 100644
--- a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll
@@ -9,7 +9,7 @@
 define <2 x i64> @add_umax_v2i64(<2 x i64> %a0) {
 ; CHECK-LABEL: add_umax_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 7
+; CHECK-NEXT:    li a0, 7
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -21,7 +21,7 @@ define <2 x i64> @add_umax_v2i64(<2 x i64> %a0) {
 define <vscale x 2 x i64> @add_umax_nxv2i64(<vscale x 2 x i64> %a0) {
 ; CHECK-LABEL: add_umax_nxv2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 7
+; CHECK-NEXT:    li a0, 7
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -145,7 +145,7 @@ entry:
 define <2 x i64> @vselect_add_const_v2i64(<2 x i64> %a0) {
 ; CHECK-LABEL: vselect_add_const_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 6
+; CHECK-NEXT:    li a0, 6
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -158,7 +158,7 @@ define <2 x i64> @vselect_add_const_v2i64(<2 x i64> %a0) {
 define <vscale x 2 x i64> @vselect_add_const_nxv2i64(<vscale x 2 x i64> %a0) {
 ; CHECK-LABEL: vselect_add_const_nxv2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 6
+; CHECK-NEXT:    li a0, 6
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll
index 0aca89c978c1..1e0180f9ad97 100644
--- a/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll
@@ -59,10 +59,10 @@ define <vscale x 2 x i64> @or_and_nxv2i64_fold(<vscale x 2 x i64> %a0) {
 define <vscale x 4 x i32> @combine_vec_shl_shl(<vscale x 4 x i32> %x) {
 ; CHECK-LABEL: combine_vec_shl_shl:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    addi a0, zero, 4
+; CHECK-NEXT:    li a0, 4
 ; CHECK-NEXT:    vmv.s.x v12, a0
 ; CHECK-NEXT:    vsll.vv v8, v8, v10
 ; CHECK-NEXT:    vsll.vv v8, v8, v12

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
index 425ed47fb547..20bf367ff3e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
@@ -16,10 +16,10 @@ define <vscale x 1 x i8> @ctlz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -40,10 +40,10 @@ define <vscale x 1 x i8> @ctlz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -63,7 +63,7 @@ define <vscale x 1 x i8> @ctlz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v9, v9, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v9, 0
-; RV32D-NEXT:    addi a0, zero, 134
+; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v9, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -79,7 +79,7 @@ define <vscale x 1 x i8> @ctlz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v9, v9, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v9, v9, 0
-; RV64D-NEXT:    addi a0, zero, 134
+; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v9, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -101,10 +101,10 @@ define <vscale x 2 x i8> @ctlz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -125,10 +125,10 @@ define <vscale x 2 x i8> @ctlz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -148,7 +148,7 @@ define <vscale x 2 x i8> @ctlz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v9, v9, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v9, 0
-; RV32D-NEXT:    addi a0, zero, 134
+; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v9, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -164,7 +164,7 @@ define <vscale x 2 x i8> @ctlz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v9, v9, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v9, v9, 0
-; RV64D-NEXT:    addi a0, zero, 134
+; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v9, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -186,10 +186,10 @@ define <vscale x 4 x i8> @ctlz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -210,10 +210,10 @@ define <vscale x 4 x i8> @ctlz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -233,7 +233,7 @@ define <vscale x 4 x i8> @ctlz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v9, v10, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v9, 0
-; RV32D-NEXT:    addi a0, zero, 134
+; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v9, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -249,7 +249,7 @@ define <vscale x 4 x i8> @ctlz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v9, v10, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v9, v9, 0
-; RV64D-NEXT:    addi a0, zero, 134
+; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v9, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -271,10 +271,10 @@ define <vscale x 8 x i8> @ctlz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -295,10 +295,10 @@ define <vscale x 8 x i8> @ctlz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -318,7 +318,7 @@ define <vscale x 8 x i8> @ctlz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v10, v12, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v10, 0
-; RV32D-NEXT:    addi a0, zero, 134
+; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v9, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -334,7 +334,7 @@ define <vscale x 8 x i8> @ctlz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v10, v12, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v9, v10, 0
-; RV64D-NEXT:    addi a0, zero, 134
+; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v9, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -356,10 +356,10 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v10
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v10, v10, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v10
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v10, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -380,10 +380,10 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v10
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v10, v10, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v10
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v10, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -403,7 +403,7 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v12, v16, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v10, v12, 0
-; RV32D-NEXT:    addi a0, zero, 134
+; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
 ; RV32D-NEXT:    vrsub.vx v8, v10, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -419,7 +419,7 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v12, v16, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v10, v12, 0
-; RV64D-NEXT:    addi a0, zero, 134
+; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
 ; RV64D-NEXT:    vrsub.vx v8, v10, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -441,10 +441,10 @@ define <vscale x 32 x i8> @ctlz_nxv32i8(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vor.vv v8, v8, v12
 ; CHECK-NEXT:    vxor.vi v8, v8, -1
 ; CHECK-NEXT:    vsrl.vi v12, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v12, v12, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v12, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -470,10 +470,10 @@ define <vscale x 64 x i8> @ctlz_nxv64i8(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    vxor.vi v8, v8, -1
 ; CHECK-NEXT:    vsrl.vi v16, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v16, v16, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v16, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -516,7 +516,7 @@ define <vscale x 1 x i16> @ctlz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
@@ -549,7 +549,7 @@ define <vscale x 1 x i16> @ctlz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -562,10 +562,10 @@ define <vscale x 1 x i16> @ctlz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v9, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v9, 0
-; RV32D-NEXT:    addi a0, zero, 142
+; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
-; RV32D-NEXT:    addi a0, zero, 16
+; RV32D-NEXT:    li a0, 16
 ; RV32D-NEXT:    vmerge.vxm v8, v9, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -577,10 +577,10 @@ define <vscale x 1 x i16> @ctlz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v9, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v9, v9, 0
-; RV64D-NEXT:    addi a0, zero, 142
+; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
-; RV64D-NEXT:    addi a0, zero, 16
+; RV64D-NEXT:    li a0, 16
 ; RV64D-NEXT:    vmerge.vxm v8, v9, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 1 x i16> @llvm.ctlz.nxv1i16(<vscale x 1 x i16> %va, i1 false)
@@ -617,7 +617,7 @@ define <vscale x 2 x i16> @ctlz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
@@ -650,7 +650,7 @@ define <vscale x 2 x i16> @ctlz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -663,10 +663,10 @@ define <vscale x 2 x i16> @ctlz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v9, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v9, 0
-; RV32D-NEXT:    addi a0, zero, 142
+; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
-; RV32D-NEXT:    addi a0, zero, 16
+; RV32D-NEXT:    li a0, 16
 ; RV32D-NEXT:    vmerge.vxm v8, v9, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -678,10 +678,10 @@ define <vscale x 2 x i16> @ctlz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v9, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v9, v9, 0
-; RV64D-NEXT:    addi a0, zero, 142
+; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
-; RV64D-NEXT:    addi a0, zero, 16
+; RV64D-NEXT:    li a0, 16
 ; RV64D-NEXT:    vmerge.vxm v8, v9, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 2 x i16> @llvm.ctlz.nxv2i16(<vscale x 2 x i16> %va, i1 false)
@@ -718,7 +718,7 @@ define <vscale x 4 x i16> @ctlz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
@@ -751,7 +751,7 @@ define <vscale x 4 x i16> @ctlz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -764,10 +764,10 @@ define <vscale x 4 x i16> @ctlz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v10, v10, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v10, 0
-; RV32D-NEXT:    addi a0, zero, 142
+; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
-; RV32D-NEXT:    addi a0, zero, 16
+; RV32D-NEXT:    li a0, 16
 ; RV32D-NEXT:    vmerge.vxm v8, v9, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -779,10 +779,10 @@ define <vscale x 4 x i16> @ctlz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v10, v10, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v9, v10, 0
-; RV64D-NEXT:    addi a0, zero, 142
+; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
-; RV64D-NEXT:    addi a0, zero, 16
+; RV64D-NEXT:    li a0, 16
 ; RV64D-NEXT:    vmerge.vxm v8, v9, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 4 x i16> @llvm.ctlz.nxv4i16(<vscale x 4 x i16> %va, i1 false)
@@ -819,7 +819,7 @@ define <vscale x 8 x i16> @ctlz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
@@ -852,7 +852,7 @@ define <vscale x 8 x i16> @ctlz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -865,10 +865,10 @@ define <vscale x 8 x i16> @ctlz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v12, v12, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v10, v12, 0
-; RV32D-NEXT:    addi a0, zero, 142
+; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v10, v10, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
-; RV32D-NEXT:    addi a0, zero, 16
+; RV32D-NEXT:    li a0, 16
 ; RV32D-NEXT:    vmerge.vxm v8, v10, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -880,10 +880,10 @@ define <vscale x 8 x i16> @ctlz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v12, v12, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v10, v12, 0
-; RV64D-NEXT:    addi a0, zero, 142
+; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v10, v10, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
-; RV64D-NEXT:    addi a0, zero, 16
+; RV64D-NEXT:    li a0, 16
 ; RV64D-NEXT:    vmerge.vxm v8, v10, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 8 x i16> @llvm.ctlz.nxv8i16(<vscale x 8 x i16> %va, i1 false)
@@ -920,7 +920,7 @@ define <vscale x 16 x i16> @ctlz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
@@ -953,7 +953,7 @@ define <vscale x 16 x i16> @ctlz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -966,10 +966,10 @@ define <vscale x 16 x i16> @ctlz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v16, v16, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v12, v16, 0
-; RV32D-NEXT:    addi a0, zero, 142
+; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v12, v12, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
-; RV32D-NEXT:    addi a0, zero, 16
+; RV32D-NEXT:    li a0, 16
 ; RV32D-NEXT:    vmerge.vxm v8, v12, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -981,10 +981,10 @@ define <vscale x 16 x i16> @ctlz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v16, v16, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v12, v16, 0
-; RV64D-NEXT:    addi a0, zero, 142
+; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v12, v12, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
-; RV64D-NEXT:    addi a0, zero, 16
+; RV64D-NEXT:    li a0, 16
 ; RV64D-NEXT:    vmerge.vxm v8, v12, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 16 x i16> @llvm.ctlz.nxv16i16(<vscale x 16 x i16> %va, i1 false)
@@ -1021,7 +1021,7 @@ define <vscale x 32 x i16> @ctlz_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV32-NEXT:    lui a0, 1
 ; RV32-NEXT:    addi a0, a0, -241
 ; RV32-NEXT:    vand.vx v8, v8, a0
-; RV32-NEXT:    addi a0, zero, 257
+; RV32-NEXT:    li a0, 257
 ; RV32-NEXT:    vmul.vx v8, v8, a0
 ; RV32-NEXT:    vsrl.vi v8, v8, 8
 ; RV32-NEXT:    ret
@@ -1054,7 +1054,7 @@ define <vscale x 32 x i16> @ctlz_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV64-NEXT:    lui a0, 1
 ; RV64-NEXT:    addiw a0, a0, -241
 ; RV64-NEXT:    vand.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 257
+; RV64-NEXT:    li a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
 ; RV64-NEXT:    vsrl.vi v8, v8, 8
 ; RV64-NEXT:    ret
@@ -1140,15 +1140,15 @@ define <vscale x 1 x i32> @ctlz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vx v9, v9, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v9, 0
-; RV32D-NEXT:    addi a0, zero, 1054
+; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
-; RV32D-NEXT:    addi a0, zero, 32
+; RV32D-NEXT:    li a0, 32
 ; RV32D-NEXT:    vmerge.vxm v8, v9, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -1156,15 +1156,15 @@ define <vscale x 1 x i32> @ctlz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vx v9, v9, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v9, v9, 0
-; RV64D-NEXT:    addi a0, zero, 1054
+; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
-; RV64D-NEXT:    addi a0, zero, 32
+; RV64D-NEXT:    li a0, 32
 ; RV64D-NEXT:    vmerge.vxm v8, v9, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 1 x i32> @llvm.ctlz.nxv1i32(<vscale x 1 x i32> %va, i1 false)
@@ -1249,15 +1249,15 @@ define <vscale x 2 x i32> @ctlz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vx v10, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v10, 0
-; RV32D-NEXT:    addi a0, zero, 1054
+; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
-; RV32D-NEXT:    addi a0, zero, 32
+; RV32D-NEXT:    li a0, 32
 ; RV32D-NEXT:    vmerge.vxm v8, v9, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -1265,15 +1265,15 @@ define <vscale x 2 x i32> @ctlz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vx v10, v10, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v9, v10, 0
-; RV64D-NEXT:    addi a0, zero, 1054
+; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
-; RV64D-NEXT:    addi a0, zero, 32
+; RV64D-NEXT:    li a0, 32
 ; RV64D-NEXT:    vmerge.vxm v8, v9, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.ctlz.nxv2i32(<vscale x 2 x i32> %va, i1 false)
@@ -1358,15 +1358,15 @@ define <vscale x 4 x i32> @ctlz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vx v12, v12, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v10, v12, 0
-; RV32D-NEXT:    addi a0, zero, 1054
+; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v10, v10, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
-; RV32D-NEXT:    addi a0, zero, 32
+; RV32D-NEXT:    li a0, 32
 ; RV32D-NEXT:    vmerge.vxm v8, v10, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -1374,15 +1374,15 @@ define <vscale x 4 x i32> @ctlz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vx v12, v12, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v10, v12, 0
-; RV64D-NEXT:    addi a0, zero, 1054
+; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v10, v10, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
-; RV64D-NEXT:    addi a0, zero, 32
+; RV64D-NEXT:    li a0, 32
 ; RV64D-NEXT:    vmerge.vxm v8, v10, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 4 x i32> @llvm.ctlz.nxv4i32(<vscale x 4 x i32> %va, i1 false)
@@ -1467,15 +1467,15 @@ define <vscale x 8 x i32> @ctlz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vx v16, v16, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v12, v16, 0
-; RV32D-NEXT:    addi a0, zero, 1054
+; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v12, v12, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
-; RV32D-NEXT:    addi a0, zero, 32
+; RV32D-NEXT:    li a0, 32
 ; RV32D-NEXT:    vmerge.vxm v8, v12, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -1483,15 +1483,15 @@ define <vscale x 8 x i32> @ctlz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vx v16, v16, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v12, v16, 0
-; RV64D-NEXT:    addi a0, zero, 1054
+; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v12, v12, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
-; RV64D-NEXT:    addi a0, zero, 32
+; RV64D-NEXT:    li a0, 32
 ; RV64D-NEXT:    vmerge.vxm v8, v12, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 8 x i32> @llvm.ctlz.nxv8i32(<vscale x 8 x i32> %va, i1 false)
@@ -1608,7 +1608,7 @@ define <vscale x 1 x i64> @ctlz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v9
 ; RV32-NEXT:    vsrl.vi v9, v8, 16
 ; RV32-NEXT:    vor.vv v8, v8, v9
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vor.vv v8, v8, v9
 ; RV32-NEXT:    addi a0, sp, 8
@@ -1631,7 +1631,7 @@ define <vscale x 1 x i64> @ctlz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v11
 ; RV32-NEXT:    vand.vv v8, v8, v9
 ; RV32-NEXT:    vmul.vv v8, v8, v10
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -1649,7 +1649,7 @@ define <vscale x 1 x i64> @ctlz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    vor.vv v8, v8, v9
 ; RV64-NEXT:    vsrl.vi v9, v8, 16
 ; RV64-NEXT:    vor.vv v8, v8, v9
-; RV64-NEXT:    addi a0, zero, 32
+; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v9, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v9
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -1694,7 +1694,7 @@ define <vscale x 1 x i64> @ctlz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 1 x i64> @llvm.ctlz.nxv1i64(<vscale x 1 x i64> %va, i1 false)
@@ -1734,7 +1734,7 @@ define <vscale x 2 x i64> @ctlz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v10
 ; RV32-NEXT:    vsrl.vi v10, v8, 16
 ; RV32-NEXT:    vor.vv v8, v8, v10
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v10, v8, a0
 ; RV32-NEXT:    vor.vv v8, v8, v10
 ; RV32-NEXT:    addi a0, sp, 8
@@ -1757,7 +1757,7 @@ define <vscale x 2 x i64> @ctlz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v14
 ; RV32-NEXT:    vand.vv v8, v8, v10
 ; RV32-NEXT:    vmul.vv v8, v8, v12
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -1775,7 +1775,7 @@ define <vscale x 2 x i64> @ctlz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    vor.vv v8, v8, v10
 ; RV64-NEXT:    vsrl.vi v10, v8, 16
 ; RV64-NEXT:    vor.vv v8, v8, v10
-; RV64-NEXT:    addi a0, zero, 32
+; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v10, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v10
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -1820,7 +1820,7 @@ define <vscale x 2 x i64> @ctlz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 2 x i64> @llvm.ctlz.nxv2i64(<vscale x 2 x i64> %va, i1 false)
@@ -1860,7 +1860,7 @@ define <vscale x 4 x i64> @ctlz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v12
 ; RV32-NEXT:    vsrl.vi v12, v8, 16
 ; RV32-NEXT:    vor.vv v8, v8, v12
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v12, v8, a0
 ; RV32-NEXT:    vor.vv v8, v8, v12
 ; RV32-NEXT:    addi a0, sp, 8
@@ -1883,7 +1883,7 @@ define <vscale x 4 x i64> @ctlz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v20
 ; RV32-NEXT:    vand.vv v8, v8, v12
 ; RV32-NEXT:    vmul.vv v8, v8, v16
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -1901,7 +1901,7 @@ define <vscale x 4 x i64> @ctlz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    vor.vv v8, v8, v12
 ; RV64-NEXT:    vsrl.vi v12, v8, 16
 ; RV64-NEXT:    vor.vv v8, v8, v12
-; RV64-NEXT:    addi a0, zero, 32
+; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v12, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v12
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -1946,7 +1946,7 @@ define <vscale x 4 x i64> @ctlz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 4 x i64> @llvm.ctlz.nxv4i64(<vscale x 4 x i64> %va, i1 false)
@@ -1986,7 +1986,7 @@ define <vscale x 8 x i64> @ctlz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v16
 ; RV32-NEXT:    vsrl.vi v16, v8, 16
 ; RV32-NEXT:    vor.vv v8, v8, v16
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v16, v8, a0
 ; RV32-NEXT:    vor.vv v8, v8, v16
 ; RV32-NEXT:    addi a0, sp, 8
@@ -2009,7 +2009,7 @@ define <vscale x 8 x i64> @ctlz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v0
 ; RV32-NEXT:    vand.vv v8, v8, v16
 ; RV32-NEXT:    vmul.vv v8, v8, v24
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -2027,7 +2027,7 @@ define <vscale x 8 x i64> @ctlz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    vor.vv v8, v8, v16
 ; RV64-NEXT:    vsrl.vi v16, v8, 16
 ; RV64-NEXT:    vor.vv v8, v8, v16
-; RV64-NEXT:    addi a0, zero, 32
+; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v16, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v16
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -2072,7 +2072,7 @@ define <vscale x 8 x i64> @ctlz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 8 x i64> @llvm.ctlz.nxv8i64(<vscale x 8 x i64> %va, i1 false)
@@ -2092,10 +2092,10 @@ define <vscale x 1 x i8> @ctlz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -2116,10 +2116,10 @@ define <vscale x 1 x i8> @ctlz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -2139,7 +2139,7 @@ define <vscale x 1 x i8> @ctlz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 134
+; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2153,7 +2153,7 @@ define <vscale x 1 x i8> @ctlz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 134
+; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 1 x i8> @llvm.ctlz.nxv1i8(<vscale x 1 x i8> %va, i1 true)
@@ -2172,10 +2172,10 @@ define <vscale x 2 x i8> @ctlz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -2196,10 +2196,10 @@ define <vscale x 2 x i8> @ctlz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -2219,7 +2219,7 @@ define <vscale x 2 x i8> @ctlz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 134
+; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2233,7 +2233,7 @@ define <vscale x 2 x i8> @ctlz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 134
+; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 2 x i8> @llvm.ctlz.nxv2i8(<vscale x 2 x i8> %va, i1 true)
@@ -2252,10 +2252,10 @@ define <vscale x 4 x i8> @ctlz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -2276,10 +2276,10 @@ define <vscale x 4 x i8> @ctlz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -2299,7 +2299,7 @@ define <vscale x 4 x i8> @ctlz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v10, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v10, 0
-; RV32D-NEXT:    addi a0, zero, 134
+; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2313,7 +2313,7 @@ define <vscale x 4 x i8> @ctlz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v10, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v10, 0
-; RV64D-NEXT:    addi a0, zero, 134
+; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 4 x i8> @llvm.ctlz.nxv4i8(<vscale x 4 x i8> %va, i1 true)
@@ -2332,10 +2332,10 @@ define <vscale x 8 x i8> @ctlz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v9
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -2356,10 +2356,10 @@ define <vscale x 8 x i8> @ctlz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v9
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -2379,7 +2379,7 @@ define <vscale x 8 x i8> @ctlz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v12, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v12, 0
-; RV32D-NEXT:    addi a0, zero, 134
+; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2393,7 +2393,7 @@ define <vscale x 8 x i8> @ctlz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v12, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v12, 0
-; RV64D-NEXT:    addi a0, zero, 134
+; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 8 x i8> @llvm.ctlz.nxv8i8(<vscale x 8 x i8> %va, i1 true)
@@ -2412,10 +2412,10 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32I-NEXT:    vor.vv v8, v8, v10
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v10, v10, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v10
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v10, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -2436,10 +2436,10 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64I-NEXT:    vor.vv v8, v8, v10
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v10, v10, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v10
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v10, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -2459,7 +2459,7 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v16, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v16, 0
-; RV32D-NEXT:    addi a0, zero, 134
+; RV32D-NEXT:    li a0, 134
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2473,7 +2473,7 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v16, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v16, 0
-; RV64D-NEXT:    addi a0, zero, 134
+; RV64D-NEXT:    li a0, 134
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 16 x i8> @llvm.ctlz.nxv16i8(<vscale x 16 x i8> %va, i1 true)
@@ -2492,10 +2492,10 @@ define <vscale x 32 x i8> @ctlz_zero_undef_nxv32i8(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vor.vv v8, v8, v12
 ; CHECK-NEXT:    vxor.vi v8, v8, -1
 ; CHECK-NEXT:    vsrl.vi v12, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v12, v12, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v12, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -2520,10 +2520,10 @@ define <vscale x 64 x i8> @ctlz_zero_undef_nxv64i8(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vor.vv v8, v8, v16
 ; CHECK-NEXT:    vxor.vi v8, v8, -1
 ; CHECK-NEXT:    vsrl.vi v16, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v16, v16, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v16, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -2565,7 +2565,7 @@ define <vscale x 1 x i16> @ctlz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
@@ -2598,7 +2598,7 @@ define <vscale x 1 x i16> @ctlz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -2611,7 +2611,7 @@ define <vscale x 1 x i16> @ctlz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 142
+; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2623,7 +2623,7 @@ define <vscale x 1 x i16> @ctlz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 142
+; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 1 x i16> @llvm.ctlz.nxv1i16(<vscale x 1 x i16> %va, i1 true)
@@ -2659,7 +2659,7 @@ define <vscale x 2 x i16> @ctlz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
@@ -2692,7 +2692,7 @@ define <vscale x 2 x i16> @ctlz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -2705,7 +2705,7 @@ define <vscale x 2 x i16> @ctlz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 142
+; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2717,7 +2717,7 @@ define <vscale x 2 x i16> @ctlz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 142
+; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 2 x i16> @llvm.ctlz.nxv2i16(<vscale x 2 x i16> %va, i1 true)
@@ -2753,7 +2753,7 @@ define <vscale x 4 x i16> @ctlz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
@@ -2786,7 +2786,7 @@ define <vscale x 4 x i16> @ctlz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -2799,7 +2799,7 @@ define <vscale x 4 x i16> @ctlz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v10, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v10, v8, 0
-; RV32D-NEXT:    addi a0, zero, 142
+; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v8, v10, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2811,7 +2811,7 @@ define <vscale x 4 x i16> @ctlz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v10, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v10, v8, 0
-; RV64D-NEXT:    addi a0, zero, 142
+; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v8, v10, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 4 x i16> @llvm.ctlz.nxv4i16(<vscale x 4 x i16> %va, i1 true)
@@ -2847,7 +2847,7 @@ define <vscale x 8 x i16> @ctlz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
@@ -2880,7 +2880,7 @@ define <vscale x 8 x i16> @ctlz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -2893,7 +2893,7 @@ define <vscale x 8 x i16> @ctlz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v12, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v12, v8, 0
-; RV32D-NEXT:    addi a0, zero, 142
+; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v8, v12, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2905,7 +2905,7 @@ define <vscale x 8 x i16> @ctlz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v12, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v12, v8, 0
-; RV64D-NEXT:    addi a0, zero, 142
+; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v8, v12, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 8 x i16> @llvm.ctlz.nxv8i16(<vscale x 8 x i16> %va, i1 true)
@@ -2941,7 +2941,7 @@ define <vscale x 16 x i16> @ctlz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
@@ -2974,7 +2974,7 @@ define <vscale x 16 x i16> @ctlz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -2987,7 +2987,7 @@ define <vscale x 16 x i16> @ctlz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v16, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v16, v8, 0
-; RV32D-NEXT:    addi a0, zero, 142
+; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v8, v16, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2999,7 +2999,7 @@ define <vscale x 16 x i16> @ctlz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v16, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v16, v8, 0
-; RV64D-NEXT:    addi a0, zero, 142
+; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v8, v16, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 16 x i16> @llvm.ctlz.nxv16i16(<vscale x 16 x i16> %va, i1 true)
@@ -3035,7 +3035,7 @@ define <vscale x 32 x i16> @ctlz_zero_undef_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV32-NEXT:    lui a0, 1
 ; RV32-NEXT:    addi a0, a0, -241
 ; RV32-NEXT:    vand.vx v8, v8, a0
-; RV32-NEXT:    addi a0, zero, 257
+; RV32-NEXT:    li a0, 257
 ; RV32-NEXT:    vmul.vx v8, v8, a0
 ; RV32-NEXT:    vsrl.vi v8, v8, 8
 ; RV32-NEXT:    ret
@@ -3068,7 +3068,7 @@ define <vscale x 32 x i16> @ctlz_zero_undef_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV64-NEXT:    lui a0, 1
 ; RV64-NEXT:    addiw a0, a0, -241
 ; RV64-NEXT:    vand.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 257
+; RV64-NEXT:    li a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
 ; RV64-NEXT:    vsrl.vi v8, v8, 8
 ; RV64-NEXT:    ret
@@ -3153,12 +3153,12 @@ define <vscale x 1 x i32> @ctlz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v9, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 1054
+; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -3166,12 +3166,12 @@ define <vscale x 1 x i32> @ctlz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v9, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 1054
+; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 1 x i32> @llvm.ctlz.nxv1i32(<vscale x 1 x i32> %va, i1 true)
@@ -3255,12 +3255,12 @@ define <vscale x 2 x i32> @ctlz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v10, v8, 0
-; RV32D-NEXT:    addi a0, zero, 1054
+; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v8, v10, a0
 ; RV32D-NEXT:    ret
 ;
@@ -3268,12 +3268,12 @@ define <vscale x 2 x i32> @ctlz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v10, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v10, v8, 0
-; RV64D-NEXT:    addi a0, zero, 1054
+; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v8, v10, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.ctlz.nxv2i32(<vscale x 2 x i32> %va, i1 true)
@@ -3357,12 +3357,12 @@ define <vscale x 4 x i32> @ctlz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v12, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v12, v8, 0
-; RV32D-NEXT:    addi a0, zero, 1054
+; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v8, v12, a0
 ; RV32D-NEXT:    ret
 ;
@@ -3370,12 +3370,12 @@ define <vscale x 4 x i32> @ctlz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v12, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v12, v8, 0
-; RV64D-NEXT:    addi a0, zero, 1054
+; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v8, v12, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 4 x i32> @llvm.ctlz.nxv4i32(<vscale x 4 x i32> %va, i1 true)
@@ -3459,12 +3459,12 @@ define <vscale x 8 x i32> @ctlz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v16, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v16, v8, 0
-; RV32D-NEXT:    addi a0, zero, 1054
+; RV32D-NEXT:    li a0, 1054
 ; RV32D-NEXT:    vrsub.vx v8, v16, a0
 ; RV32D-NEXT:    ret
 ;
@@ -3472,12 +3472,12 @@ define <vscale x 8 x i32> @ctlz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v16, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v16, v8, 0
-; RV64D-NEXT:    addi a0, zero, 1054
+; RV64D-NEXT:    li a0, 1054
 ; RV64D-NEXT:    vrsub.vx v8, v16, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 8 x i32> @llvm.ctlz.nxv8i32(<vscale x 8 x i32> %va, i1 true)
@@ -3592,7 +3592,7 @@ define <vscale x 1 x i64> @ctlz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v9
 ; RV32-NEXT:    vsrl.vi v9, v8, 16
 ; RV32-NEXT:    vor.vv v8, v8, v9
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vor.vv v8, v8, v9
 ; RV32-NEXT:    addi a0, sp, 8
@@ -3615,7 +3615,7 @@ define <vscale x 1 x i64> @ctlz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v11
 ; RV32-NEXT:    vand.vv v8, v8, v9
 ; RV32-NEXT:    vmul.vv v8, v8, v10
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -3633,7 +3633,7 @@ define <vscale x 1 x i64> @ctlz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    vor.vv v8, v8, v9
 ; RV64-NEXT:    vsrl.vi v9, v8, 16
 ; RV64-NEXT:    vor.vv v8, v8, v9
-; RV64-NEXT:    addi a0, zero, 32
+; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v9, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v9
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -3678,7 +3678,7 @@ define <vscale x 1 x i64> @ctlz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 1 x i64> @llvm.ctlz.nxv1i64(<vscale x 1 x i64> %va, i1 true)
@@ -3717,7 +3717,7 @@ define <vscale x 2 x i64> @ctlz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v10
 ; RV32-NEXT:    vsrl.vi v10, v8, 16
 ; RV32-NEXT:    vor.vv v8, v8, v10
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v10, v8, a0
 ; RV32-NEXT:    vor.vv v8, v8, v10
 ; RV32-NEXT:    addi a0, sp, 8
@@ -3740,7 +3740,7 @@ define <vscale x 2 x i64> @ctlz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v14
 ; RV32-NEXT:    vand.vv v8, v8, v10
 ; RV32-NEXT:    vmul.vv v8, v8, v12
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -3758,7 +3758,7 @@ define <vscale x 2 x i64> @ctlz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    vor.vv v8, v8, v10
 ; RV64-NEXT:    vsrl.vi v10, v8, 16
 ; RV64-NEXT:    vor.vv v8, v8, v10
-; RV64-NEXT:    addi a0, zero, 32
+; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v10, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v10
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -3803,7 +3803,7 @@ define <vscale x 2 x i64> @ctlz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 2 x i64> @llvm.ctlz.nxv2i64(<vscale x 2 x i64> %va, i1 true)
@@ -3842,7 +3842,7 @@ define <vscale x 4 x i64> @ctlz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v12
 ; RV32-NEXT:    vsrl.vi v12, v8, 16
 ; RV32-NEXT:    vor.vv v8, v8, v12
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v12, v8, a0
 ; RV32-NEXT:    vor.vv v8, v8, v12
 ; RV32-NEXT:    addi a0, sp, 8
@@ -3865,7 +3865,7 @@ define <vscale x 4 x i64> @ctlz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v20
 ; RV32-NEXT:    vand.vv v8, v8, v12
 ; RV32-NEXT:    vmul.vv v8, v8, v16
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -3883,7 +3883,7 @@ define <vscale x 4 x i64> @ctlz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    vor.vv v8, v8, v12
 ; RV64-NEXT:    vsrl.vi v12, v8, 16
 ; RV64-NEXT:    vor.vv v8, v8, v12
-; RV64-NEXT:    addi a0, zero, 32
+; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v12, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v12
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -3928,7 +3928,7 @@ define <vscale x 4 x i64> @ctlz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 4 x i64> @llvm.ctlz.nxv4i64(<vscale x 4 x i64> %va, i1 true)
@@ -3967,7 +3967,7 @@ define <vscale x 8 x i64> @ctlz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    vor.vv v8, v8, v16
 ; RV32-NEXT:    vsrl.vi v16, v8, 16
 ; RV32-NEXT:    vor.vv v8, v8, v16
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v16, v8, a0
 ; RV32-NEXT:    vor.vv v8, v8, v16
 ; RV32-NEXT:    addi a0, sp, 8
@@ -3990,7 +3990,7 @@ define <vscale x 8 x i64> @ctlz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v0
 ; RV32-NEXT:    vand.vv v8, v8, v16
 ; RV32-NEXT:    vmul.vv v8, v8, v24
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -4008,7 +4008,7 @@ define <vscale x 8 x i64> @ctlz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    vor.vv v8, v8, v16
 ; RV64-NEXT:    vsrl.vi v16, v8, 16
 ; RV64-NEXT:    vor.vv v8, v8, v16
-; RV64-NEXT:    addi a0, zero, 32
+; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsrl.vx v16, v8, a0
 ; RV64-NEXT:    vor.vv v8, v8, v16
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -4053,7 +4053,7 @@ define <vscale x 8 x i64> @ctlz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 8 x i64> @llvm.ctlz.nxv8i64(<vscale x 8 x i64> %va, i1 true)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll
index 7aa71e846b92..f60b0380c265 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll
@@ -7,10 +7,10 @@ define <vscale x 1 x i8> @ctpop_nxv1i8(<vscale x 1 x i8> %va) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vsrl.vi v9, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v9, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -29,10 +29,10 @@ define <vscale x 2 x i8> @ctpop_nxv2i8(<vscale x 2 x i8> %va) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vsrl.vi v9, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v9, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -51,10 +51,10 @@ define <vscale x 4 x i8> @ctpop_nxv4i8(<vscale x 4 x i8> %va) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vsrl.vi v9, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v9, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -73,10 +73,10 @@ define <vscale x 8 x i8> @ctpop_nxv8i8(<vscale x 8 x i8> %va) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vi v9, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v9, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -95,10 +95,10 @@ define <vscale x 16 x i8> @ctpop_nxv16i8(<vscale x 16 x i8> %va) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vsrl.vi v10, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v10, v10, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v10, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -117,10 +117,10 @@ define <vscale x 32 x i8> @ctpop_nxv32i8(<vscale x 32 x i8> %va) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vsrl.vi v12, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v12, v12, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v12, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -139,10 +139,10 @@ define <vscale x 64 x i8> @ctpop_nxv64i8(<vscale x 64 x i8> %va) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vsrl.vi v16, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v16, v16, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v16, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -176,7 +176,7 @@ define <vscale x 1 x i16> @ctpop_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32-NEXT:    lui a0, 1
 ; RV32-NEXT:    addi a0, a0, -241
 ; RV32-NEXT:    vand.vx v8, v8, a0
-; RV32-NEXT:    addi a0, zero, 257
+; RV32-NEXT:    li a0, 257
 ; RV32-NEXT:    vmul.vx v8, v8, a0
 ; RV32-NEXT:    vsrl.vi v8, v8, 8
 ; RV32-NEXT:    ret
@@ -200,7 +200,7 @@ define <vscale x 1 x i16> @ctpop_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64-NEXT:    lui a0, 1
 ; RV64-NEXT:    addiw a0, a0, -241
 ; RV64-NEXT:    vand.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 257
+; RV64-NEXT:    li a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
 ; RV64-NEXT:    vsrl.vi v8, v8, 8
 ; RV64-NEXT:    ret
@@ -229,7 +229,7 @@ define <vscale x 2 x i16> @ctpop_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32-NEXT:    lui a0, 1
 ; RV32-NEXT:    addi a0, a0, -241
 ; RV32-NEXT:    vand.vx v8, v8, a0
-; RV32-NEXT:    addi a0, zero, 257
+; RV32-NEXT:    li a0, 257
 ; RV32-NEXT:    vmul.vx v8, v8, a0
 ; RV32-NEXT:    vsrl.vi v8, v8, 8
 ; RV32-NEXT:    ret
@@ -253,7 +253,7 @@ define <vscale x 2 x i16> @ctpop_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64-NEXT:    lui a0, 1
 ; RV64-NEXT:    addiw a0, a0, -241
 ; RV64-NEXT:    vand.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 257
+; RV64-NEXT:    li a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
 ; RV64-NEXT:    vsrl.vi v8, v8, 8
 ; RV64-NEXT:    ret
@@ -282,7 +282,7 @@ define <vscale x 4 x i16> @ctpop_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32-NEXT:    lui a0, 1
 ; RV32-NEXT:    addi a0, a0, -241
 ; RV32-NEXT:    vand.vx v8, v8, a0
-; RV32-NEXT:    addi a0, zero, 257
+; RV32-NEXT:    li a0, 257
 ; RV32-NEXT:    vmul.vx v8, v8, a0
 ; RV32-NEXT:    vsrl.vi v8, v8, 8
 ; RV32-NEXT:    ret
@@ -306,7 +306,7 @@ define <vscale x 4 x i16> @ctpop_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64-NEXT:    lui a0, 1
 ; RV64-NEXT:    addiw a0, a0, -241
 ; RV64-NEXT:    vand.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 257
+; RV64-NEXT:    li a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
 ; RV64-NEXT:    vsrl.vi v8, v8, 8
 ; RV64-NEXT:    ret
@@ -335,7 +335,7 @@ define <vscale x 8 x i16> @ctpop_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32-NEXT:    lui a0, 1
 ; RV32-NEXT:    addi a0, a0, -241
 ; RV32-NEXT:    vand.vx v8, v8, a0
-; RV32-NEXT:    addi a0, zero, 257
+; RV32-NEXT:    li a0, 257
 ; RV32-NEXT:    vmul.vx v8, v8, a0
 ; RV32-NEXT:    vsrl.vi v8, v8, 8
 ; RV32-NEXT:    ret
@@ -359,7 +359,7 @@ define <vscale x 8 x i16> @ctpop_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64-NEXT:    lui a0, 1
 ; RV64-NEXT:    addiw a0, a0, -241
 ; RV64-NEXT:    vand.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 257
+; RV64-NEXT:    li a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
 ; RV64-NEXT:    vsrl.vi v8, v8, 8
 ; RV64-NEXT:    ret
@@ -388,7 +388,7 @@ define <vscale x 16 x i16> @ctpop_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32-NEXT:    lui a0, 1
 ; RV32-NEXT:    addi a0, a0, -241
 ; RV32-NEXT:    vand.vx v8, v8, a0
-; RV32-NEXT:    addi a0, zero, 257
+; RV32-NEXT:    li a0, 257
 ; RV32-NEXT:    vmul.vx v8, v8, a0
 ; RV32-NEXT:    vsrl.vi v8, v8, 8
 ; RV32-NEXT:    ret
@@ -412,7 +412,7 @@ define <vscale x 16 x i16> @ctpop_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64-NEXT:    lui a0, 1
 ; RV64-NEXT:    addiw a0, a0, -241
 ; RV64-NEXT:    vand.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 257
+; RV64-NEXT:    li a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
 ; RV64-NEXT:    vsrl.vi v8, v8, 8
 ; RV64-NEXT:    ret
@@ -441,7 +441,7 @@ define <vscale x 32 x i16> @ctpop_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV32-NEXT:    lui a0, 1
 ; RV32-NEXT:    addi a0, a0, -241
 ; RV32-NEXT:    vand.vx v8, v8, a0
-; RV32-NEXT:    addi a0, zero, 257
+; RV32-NEXT:    li a0, 257
 ; RV32-NEXT:    vmul.vx v8, v8, a0
 ; RV32-NEXT:    vsrl.vi v8, v8, 8
 ; RV32-NEXT:    ret
@@ -465,7 +465,7 @@ define <vscale x 32 x i16> @ctpop_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV64-NEXT:    lui a0, 1
 ; RV64-NEXT:    addiw a0, a0, -241
 ; RV64-NEXT:    vand.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 257
+; RV64-NEXT:    li a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
 ; RV64-NEXT:    vsrl.vi v8, v8, 8
 ; RV64-NEXT:    ret
@@ -790,7 +790,7 @@ define <vscale x 1 x i64> @ctpop_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v11
 ; RV32-NEXT:    vand.vv v8, v8, v9
 ; RV32-NEXT:    vmul.vv v8, v8, v10
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -839,7 +839,7 @@ define <vscale x 1 x i64> @ctpop_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 1 x i64> @llvm.ctpop.nxv1i64(<vscale x 1 x i64> %va)
@@ -888,7 +888,7 @@ define <vscale x 2 x i64> @ctpop_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v14
 ; RV32-NEXT:    vand.vv v8, v8, v10
 ; RV32-NEXT:    vmul.vv v8, v8, v12
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -937,7 +937,7 @@ define <vscale x 2 x i64> @ctpop_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 2 x i64> @llvm.ctpop.nxv2i64(<vscale x 2 x i64> %va)
@@ -986,7 +986,7 @@ define <vscale x 4 x i64> @ctpop_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v20
 ; RV32-NEXT:    vand.vv v8, v8, v12
 ; RV32-NEXT:    vmul.vv v8, v8, v16
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -1035,7 +1035,7 @@ define <vscale x 4 x i64> @ctpop_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 4 x i64> @llvm.ctpop.nxv4i64(<vscale x 4 x i64> %va)
@@ -1084,7 +1084,7 @@ define <vscale x 8 x i64> @ctpop_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v0
 ; RV32-NEXT:    vand.vv v8, v8, v16
 ; RV32-NEXT:    vmul.vv v8, v8, v24
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -1133,7 +1133,7 @@ define <vscale x 8 x i64> @ctpop_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 8 x i64> @llvm.ctpop.nxv8i64(<vscale x 8 x i64> %va)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
index 796863e98e50..bbe4d82d9d12 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
@@ -7,16 +7,16 @@
 define <vscale x 1 x i8> @cttz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32I-LABEL: cttz_nxv1i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -28,16 +28,16 @@ define <vscale x 1 x i8> @cttz_nxv1i8(<vscale x 1 x i8> %va) {
 ;
 ; RV64I-LABEL: cttz_nxv1i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -62,7 +62,7 @@ define <vscale x 1 x i8> @cttz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
 ; RV32D-NEXT:    ret
@@ -82,7 +82,7 @@ define <vscale x 1 x i8> @cttz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
 ; RV64D-NEXT:    ret
@@ -94,16 +94,16 @@ declare <vscale x 1 x i8> @llvm.cttz.nxv1i8(<vscale x 1 x i8>, i1)
 define <vscale x 2 x i8> @cttz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32I-LABEL: cttz_nxv2i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -115,16 +115,16 @@ define <vscale x 2 x i8> @cttz_nxv2i8(<vscale x 2 x i8> %va) {
 ;
 ; RV64I-LABEL: cttz_nxv2i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -149,7 +149,7 @@ define <vscale x 2 x i8> @cttz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
 ; RV32D-NEXT:    ret
@@ -169,7 +169,7 @@ define <vscale x 2 x i8> @cttz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
 ; RV64D-NEXT:    ret
@@ -181,16 +181,16 @@ declare <vscale x 2 x i8> @llvm.cttz.nxv2i8(<vscale x 2 x i8>, i1)
 define <vscale x 4 x i8> @cttz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32I-LABEL: cttz_nxv4i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -202,16 +202,16 @@ define <vscale x 4 x i8> @cttz_nxv4i8(<vscale x 4 x i8> %va) {
 ;
 ; RV64I-LABEL: cttz_nxv4i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -236,7 +236,7 @@ define <vscale x 4 x i8> @cttz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v10, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v10, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
 ; RV32D-NEXT:    ret
@@ -256,7 +256,7 @@ define <vscale x 4 x i8> @cttz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v10, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v10, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
 ; RV64D-NEXT:    ret
@@ -268,16 +268,16 @@ declare <vscale x 4 x i8> @llvm.cttz.nxv4i8(<vscale x 4 x i8>, i1)
 define <vscale x 8 x i8> @cttz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32I-LABEL: cttz_nxv8i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -289,16 +289,16 @@ define <vscale x 8 x i8> @cttz_nxv8i8(<vscale x 8 x i8> %va) {
 ;
 ; RV64I-LABEL: cttz_nxv8i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -323,7 +323,7 @@ define <vscale x 8 x i8> @cttz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v12, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v12, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
 ; RV32D-NEXT:    ret
@@ -343,7 +343,7 @@ define <vscale x 8 x i8> @cttz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v12, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v12, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
 ; RV64D-NEXT:    ret
@@ -355,16 +355,16 @@ declare <vscale x 8 x i8> @llvm.cttz.nxv8i8(<vscale x 8 x i8>, i1)
 define <vscale x 16 x i8> @cttz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32I-LABEL: cttz_nxv16i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; RV32I-NEXT:    vsub.vx v10, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vand.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v10, v10, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v10
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v10, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -376,16 +376,16 @@ define <vscale x 16 x i8> @cttz_nxv16i8(<vscale x 16 x i8> %va) {
 ;
 ; RV64I-LABEL: cttz_nxv16i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; RV64I-NEXT:    vsub.vx v10, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vand.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v10, v10, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v10
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v10, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -410,7 +410,7 @@ define <vscale x 16 x i8> @cttz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v16, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v16, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    vmerge.vim v8, v8, 8, v0
 ; RV32D-NEXT:    ret
@@ -430,7 +430,7 @@ define <vscale x 16 x i8> @cttz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v16, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v16, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    vmerge.vim v8, v8, 8, v0
 ; RV64D-NEXT:    ret
@@ -442,16 +442,16 @@ declare <vscale x 16 x i8> @llvm.cttz.nxv16i8(<vscale x 16 x i8>, i1)
 define <vscale x 32 x i8> @cttz_nxv32i8(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: cttz_nxv32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vsub.vx v12, v8, a0
 ; CHECK-NEXT:    vxor.vi v8, v8, -1
 ; CHECK-NEXT:    vand.vv v8, v8, v12
 ; CHECK-NEXT:    vsrl.vi v12, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v12, v12, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v12, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -468,16 +468,16 @@ declare <vscale x 32 x i8> @llvm.cttz.nxv32i8(<vscale x 32 x i8>, i1)
 define <vscale x 64 x i8> @cttz_nxv64i8(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: cttz_nxv64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vsub.vx v16, v8, a0
 ; CHECK-NEXT:    vxor.vi v8, v8, -1
 ; CHECK-NEXT:    vand.vv v8, v8, v16
 ; CHECK-NEXT:    vsrl.vi v16, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v16, v16, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v16, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -494,7 +494,7 @@ declare <vscale x 64 x i8> @llvm.cttz.nxv64i8(<vscale x 64 x i8>, i1)
 define <vscale x 1 x i16> @cttz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32I-LABEL: cttz_nxv1i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -515,14 +515,14 @@ define <vscale x 1 x i16> @cttz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: cttz_nxv1i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -543,7 +543,7 @@ define <vscale x 1 x i16> @cttz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -560,9 +560,9 @@ define <vscale x 1 x i16> @cttz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
-; RV32D-NEXT:    addi a0, zero, 16
+; RV32D-NEXT:    li a0, 16
 ; RV32D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -578,9 +578,9 @@ define <vscale x 1 x i16> @cttz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
-; RV64D-NEXT:    addi a0, zero, 16
+; RV64D-NEXT:    li a0, 16
 ; RV64D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 1 x i16> @llvm.cttz.nxv1i16(<vscale x 1 x i16> %va, i1 false)
@@ -591,7 +591,7 @@ declare <vscale x 1 x i16> @llvm.cttz.nxv1i16(<vscale x 1 x i16>, i1)
 define <vscale x 2 x i16> @cttz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32I-LABEL: cttz_nxv2i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -612,14 +612,14 @@ define <vscale x 2 x i16> @cttz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: cttz_nxv2i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -640,7 +640,7 @@ define <vscale x 2 x i16> @cttz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -657,9 +657,9 @@ define <vscale x 2 x i16> @cttz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
-; RV32D-NEXT:    addi a0, zero, 16
+; RV32D-NEXT:    li a0, 16
 ; RV32D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -675,9 +675,9 @@ define <vscale x 2 x i16> @cttz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
-; RV64D-NEXT:    addi a0, zero, 16
+; RV64D-NEXT:    li a0, 16
 ; RV64D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 2 x i16> @llvm.cttz.nxv2i16(<vscale x 2 x i16> %va, i1 false)
@@ -688,7 +688,7 @@ declare <vscale x 2 x i16> @llvm.cttz.nxv2i16(<vscale x 2 x i16>, i1)
 define <vscale x 4 x i16> @cttz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32I-LABEL: cttz_nxv4i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -709,14 +709,14 @@ define <vscale x 4 x i16> @cttz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: cttz_nxv4i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -737,7 +737,7 @@ define <vscale x 4 x i16> @cttz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -754,9 +754,9 @@ define <vscale x 4 x i16> @cttz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v10, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v10, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v10, a0
-; RV32D-NEXT:    addi a0, zero, 16
+; RV32D-NEXT:    li a0, 16
 ; RV32D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -772,9 +772,9 @@ define <vscale x 4 x i16> @cttz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v10, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v10, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v10, a0
-; RV64D-NEXT:    addi a0, zero, 16
+; RV64D-NEXT:    li a0, 16
 ; RV64D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 4 x i16> @llvm.cttz.nxv4i16(<vscale x 4 x i16> %va, i1 false)
@@ -785,7 +785,7 @@ declare <vscale x 4 x i16> @llvm.cttz.nxv4i16(<vscale x 4 x i16>, i1)
 define <vscale x 8 x i16> @cttz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32I-LABEL: cttz_nxv8i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; RV32I-NEXT:    vsub.vx v10, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -806,14 +806,14 @@ define <vscale x 8 x i16> @cttz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: cttz_nxv8i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; RV64I-NEXT:    vsub.vx v10, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -834,7 +834,7 @@ define <vscale x 8 x i16> @cttz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -851,9 +851,9 @@ define <vscale x 8 x i16> @cttz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v12, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v12, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v12, a0
-; RV32D-NEXT:    addi a0, zero, 16
+; RV32D-NEXT:    li a0, 16
 ; RV32D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -869,9 +869,9 @@ define <vscale x 8 x i16> @cttz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v12, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v12, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v12, a0
-; RV64D-NEXT:    addi a0, zero, 16
+; RV64D-NEXT:    li a0, 16
 ; RV64D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 8 x i16> @llvm.cttz.nxv8i16(<vscale x 8 x i16> %va, i1 false)
@@ -882,7 +882,7 @@ declare <vscale x 8 x i16> @llvm.cttz.nxv8i16(<vscale x 8 x i16>, i1)
 define <vscale x 16 x i16> @cttz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32I-LABEL: cttz_nxv16i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; RV32I-NEXT:    vsub.vx v12, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -903,14 +903,14 @@ define <vscale x 16 x i16> @cttz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: cttz_nxv16i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; RV64I-NEXT:    vsub.vx v12, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -931,7 +931,7 @@ define <vscale x 16 x i16> @cttz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -948,9 +948,9 @@ define <vscale x 16 x i16> @cttz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v16, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v16, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v16, a0
-; RV32D-NEXT:    addi a0, zero, 16
+; RV32D-NEXT:    li a0, 16
 ; RV32D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -966,9 +966,9 @@ define <vscale x 16 x i16> @cttz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v16, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v16, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v16, a0
-; RV64D-NEXT:    addi a0, zero, 16
+; RV64D-NEXT:    li a0, 16
 ; RV64D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 16 x i16> @llvm.cttz.nxv16i16(<vscale x 16 x i16> %va, i1 false)
@@ -979,7 +979,7 @@ declare <vscale x 16 x i16> @llvm.cttz.nxv16i16(<vscale x 16 x i16>, i1)
 define <vscale x 32 x i16> @cttz_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV32-LABEL: cttz_nxv32i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; RV32-NEXT:    vsub.vx v16, v8, a0
 ; RV32-NEXT:    vxor.vi v8, v8, -1
@@ -1000,14 +1000,14 @@ define <vscale x 32 x i16> @cttz_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV32-NEXT:    lui a0, 1
 ; RV32-NEXT:    addi a0, a0, -241
 ; RV32-NEXT:    vand.vx v8, v8, a0
-; RV32-NEXT:    addi a0, zero, 257
+; RV32-NEXT:    li a0, 257
 ; RV32-NEXT:    vmul.vx v8, v8, a0
 ; RV32-NEXT:    vsrl.vi v8, v8, 8
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: cttz_nxv32i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; RV64-NEXT:    vsub.vx v16, v8, a0
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -1028,7 +1028,7 @@ define <vscale x 32 x i16> @cttz_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV64-NEXT:    lui a0, 1
 ; RV64-NEXT:    addiw a0, a0, -241
 ; RV64-NEXT:    vand.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 257
+; RV64-NEXT:    li a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
 ; RV64-NEXT:    vsrl.vi v8, v8, 8
 ; RV64-NEXT:    ret
@@ -1040,7 +1040,7 @@ declare <vscale x 32 x i16> @llvm.cttz.nxv32i16(<vscale x 32 x i16>, i1)
 define <vscale x 1 x i32> @cttz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32I-LABEL: cttz_nxv1i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -1069,7 +1069,7 @@ define <vscale x 1 x i32> @cttz_nxv1i32(<vscale x 1 x i32> %va) {
 ;
 ; RV64I-LABEL: cttz_nxv1i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -1102,15 +1102,15 @@ define <vscale x 1 x i32> @cttz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32D-NEXT:    vrsub.vi v9, v8, 0
 ; RV32D-NEXT:    vand.vv v9, v8, v9
 ; RV32D-NEXT:    vfwcvt.f.xu.v v10, v9
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vx v9, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v9, 0
-; RV32D-NEXT:    addi a0, zero, 1023
+; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
-; RV32D-NEXT:    addi a0, zero, 32
+; RV32D-NEXT:    li a0, 32
 ; RV32D-NEXT:    vmerge.vxm v8, v9, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -1122,14 +1122,14 @@ define <vscale x 1 x i32> @cttz_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64D-NEXT:    vrsub.vi v9, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v9
 ; RV64D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v9, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 1023
+; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
-; RV64D-NEXT:    addi a0, zero, 32
+; RV64D-NEXT:    li a0, 32
 ; RV64D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 1 x i32> @llvm.cttz.nxv1i32(<vscale x 1 x i32> %va, i1 false)
@@ -1140,7 +1140,7 @@ declare <vscale x 1 x i32> @llvm.cttz.nxv1i32(<vscale x 1 x i32>, i1)
 define <vscale x 2 x i32> @cttz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32I-LABEL: cttz_nxv2i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -1169,7 +1169,7 @@ define <vscale x 2 x i32> @cttz_nxv2i32(<vscale x 2 x i32> %va) {
 ;
 ; RV64I-LABEL: cttz_nxv2i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -1202,15 +1202,15 @@ define <vscale x 2 x i32> @cttz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32D-NEXT:    vrsub.vi v9, v8, 0
 ; RV32D-NEXT:    vand.vv v9, v8, v9
 ; RV32D-NEXT:    vfwcvt.f.xu.v v10, v9
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vx v10, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v9, v10, 0
-; RV32D-NEXT:    addi a0, zero, 1023
+; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
-; RV32D-NEXT:    addi a0, zero, 32
+; RV32D-NEXT:    li a0, 32
 ; RV32D-NEXT:    vmerge.vxm v8, v9, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -1222,14 +1222,14 @@ define <vscale x 2 x i32> @cttz_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64D-NEXT:    vrsub.vi v9, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v9
 ; RV64D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v10, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v10, v8, 0
-; RV64D-NEXT:    addi a0, zero, 1023
+; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v10, a0
-; RV64D-NEXT:    addi a0, zero, 32
+; RV64D-NEXT:    li a0, 32
 ; RV64D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.cttz.nxv2i32(<vscale x 2 x i32> %va, i1 false)
@@ -1240,7 +1240,7 @@ declare <vscale x 2 x i32> @llvm.cttz.nxv2i32(<vscale x 2 x i32>, i1)
 define <vscale x 4 x i32> @cttz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32I-LABEL: cttz_nxv4i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; RV32I-NEXT:    vsub.vx v10, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -1269,7 +1269,7 @@ define <vscale x 4 x i32> @cttz_nxv4i32(<vscale x 4 x i32> %va) {
 ;
 ; RV64I-LABEL: cttz_nxv4i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; RV64I-NEXT:    vsub.vx v10, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -1302,15 +1302,15 @@ define <vscale x 4 x i32> @cttz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32D-NEXT:    vrsub.vi v10, v8, 0
 ; RV32D-NEXT:    vand.vv v10, v8, v10
 ; RV32D-NEXT:    vfwcvt.f.xu.v v12, v10
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vx v12, v12, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v10, v12, 0
-; RV32D-NEXT:    addi a0, zero, 1023
+; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v10, v10, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
-; RV32D-NEXT:    addi a0, zero, 32
+; RV32D-NEXT:    li a0, 32
 ; RV32D-NEXT:    vmerge.vxm v8, v10, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -1322,14 +1322,14 @@ define <vscale x 4 x i32> @cttz_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64D-NEXT:    vrsub.vi v10, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v10
 ; RV64D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v12, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v12, v8, 0
-; RV64D-NEXT:    addi a0, zero, 1023
+; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v12, a0
-; RV64D-NEXT:    addi a0, zero, 32
+; RV64D-NEXT:    li a0, 32
 ; RV64D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 4 x i32> @llvm.cttz.nxv4i32(<vscale x 4 x i32> %va, i1 false)
@@ -1340,7 +1340,7 @@ declare <vscale x 4 x i32> @llvm.cttz.nxv4i32(<vscale x 4 x i32>, i1)
 define <vscale x 8 x i32> @cttz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32I-LABEL: cttz_nxv8i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; RV32I-NEXT:    vsub.vx v12, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -1369,7 +1369,7 @@ define <vscale x 8 x i32> @cttz_nxv8i32(<vscale x 8 x i32> %va) {
 ;
 ; RV64I-LABEL: cttz_nxv8i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; RV64I-NEXT:    vsub.vx v12, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -1402,15 +1402,15 @@ define <vscale x 8 x i32> @cttz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32D-NEXT:    vrsub.vi v12, v8, 0
 ; RV32D-NEXT:    vand.vv v12, v8, v12
 ; RV32D-NEXT:    vfwcvt.f.xu.v v16, v12
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vx v16, v16, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v12, v16, 0
-; RV32D-NEXT:    addi a0, zero, 1023
+; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v12, v12, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
-; RV32D-NEXT:    addi a0, zero, 32
+; RV32D-NEXT:    li a0, 32
 ; RV32D-NEXT:    vmerge.vxm v8, v12, a0, v0
 ; RV32D-NEXT:    ret
 ;
@@ -1422,14 +1422,14 @@ define <vscale x 8 x i32> @cttz_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64D-NEXT:    vrsub.vi v12, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v12
 ; RV64D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v16, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v16, v8, 0
-; RV64D-NEXT:    addi a0, zero, 1023
+; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v16, a0
-; RV64D-NEXT:    addi a0, zero, 32
+; RV64D-NEXT:    li a0, 32
 ; RV64D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 8 x i32> @llvm.cttz.nxv8i32(<vscale x 8 x i32> %va, i1 false)
@@ -1440,7 +1440,7 @@ declare <vscale x 8 x i32> @llvm.cttz.nxv8i32(<vscale x 8 x i32>, i1)
 define <vscale x 16 x i32> @cttz_nxv16i32(<vscale x 16 x i32> %va) {
 ; RV32-LABEL: cttz_nxv16i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; RV32-NEXT:    vsub.vx v16, v8, a0
 ; RV32-NEXT:    vxor.vi v8, v8, -1
@@ -1469,7 +1469,7 @@ define <vscale x 16 x i32> @cttz_nxv16i32(<vscale x 16 x i32> %va) {
 ;
 ; RV64-LABEL: cttz_nxv16i32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; RV64-NEXT:    vsub.vx v16, v8, a0
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -1521,7 +1521,7 @@ define <vscale x 1 x i64> @cttz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    addi a0, a0, 257
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsub.vx v9, v8, a0
 ; RV32-NEXT:    vxor.vi v8, v8, -1
@@ -1545,14 +1545,14 @@ define <vscale x 1 x i64> @cttz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v11
 ; RV32-NEXT:    vand.vv v8, v8, v9
 ; RV32-NEXT:    vmul.vv v8, v8, v10
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: cttz_nxv1i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vsub.vx v9, v8, a0
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -1598,7 +1598,7 @@ define <vscale x 1 x i64> @cttz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 1 x i64> @llvm.cttz.nxv1i64(<vscale x 1 x i64> %va, i1 false)
@@ -1627,7 +1627,7 @@ define <vscale x 2 x i64> @cttz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    addi a0, a0, 257
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV32-NEXT:    vsub.vx v10, v8, a0
 ; RV32-NEXT:    vxor.vi v8, v8, -1
@@ -1651,14 +1651,14 @@ define <vscale x 2 x i64> @cttz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v14
 ; RV32-NEXT:    vand.vv v8, v8, v10
 ; RV32-NEXT:    vmul.vv v8, v8, v12
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: cttz_nxv2i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV64-NEXT:    vsub.vx v10, v8, a0
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -1704,7 +1704,7 @@ define <vscale x 2 x i64> @cttz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 2 x i64> @llvm.cttz.nxv2i64(<vscale x 2 x i64> %va, i1 false)
@@ -1733,7 +1733,7 @@ define <vscale x 4 x i64> @cttz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    addi a0, a0, 257
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsub.vx v12, v8, a0
 ; RV32-NEXT:    vxor.vi v8, v8, -1
@@ -1757,14 +1757,14 @@ define <vscale x 4 x i64> @cttz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v20
 ; RV32-NEXT:    vand.vv v8, v8, v12
 ; RV32-NEXT:    vmul.vv v8, v8, v16
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: cttz_nxv4i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV64-NEXT:    vsub.vx v12, v8, a0
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -1810,7 +1810,7 @@ define <vscale x 4 x i64> @cttz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 4 x i64> @llvm.cttz.nxv4i64(<vscale x 4 x i64> %va, i1 false)
@@ -1839,7 +1839,7 @@ define <vscale x 8 x i64> @cttz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    addi a0, a0, 257
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsub.vx v16, v8, a0
 ; RV32-NEXT:    vxor.vi v8, v8, -1
@@ -1863,14 +1863,14 @@ define <vscale x 8 x i64> @cttz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v0
 ; RV32-NEXT:    vand.vv v8, v8, v16
 ; RV32-NEXT:    vmul.vv v8, v8, v24
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: cttz_nxv8i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vsub.vx v16, v8, a0
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -1916,7 +1916,7 @@ define <vscale x 8 x i64> @cttz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 8 x i64> @llvm.cttz.nxv8i64(<vscale x 8 x i64> %va, i1 false)
@@ -1927,16 +1927,16 @@ declare <vscale x 8 x i64> @llvm.cttz.nxv8i64(<vscale x 8 x i64>, i1)
 define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv1i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -1948,16 +1948,16 @@ define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv1i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -1980,7 +1980,7 @@ define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -1997,7 +1997,7 @@ define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 1 x i8> @llvm.cttz.nxv1i8(<vscale x 1 x i8> %va, i1 true)
@@ -2007,16 +2007,16 @@ define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv2i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -2028,16 +2028,16 @@ define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv2i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -2060,7 +2060,7 @@ define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2077,7 +2077,7 @@ define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 2 x i8> @llvm.cttz.nxv2i8(<vscale x 2 x i8> %va, i1 true)
@@ -2087,16 +2087,16 @@ define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv4i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -2108,16 +2108,16 @@ define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv4i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -2140,7 +2140,7 @@ define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v10, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v10, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2157,7 +2157,7 @@ define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v10, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v10, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 4 x i8> @llvm.cttz.nxv4i8(<vscale x 4 x i8> %va, i1 true)
@@ -2167,16 +2167,16 @@ define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv8i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vand.vv v8, v8, v9
 ; RV32I-NEXT:    vsrl.vi v9, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v9, v9, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v9
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v9, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -2188,16 +2188,16 @@ define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv8i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vand.vv v8, v8, v9
 ; RV64I-NEXT:    vsrl.vi v9, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v9, v9, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v9
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v9, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -2220,7 +2220,7 @@ define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v12, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v12, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2237,7 +2237,7 @@ define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v12, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v12, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 8 x i8> @llvm.cttz.nxv8i8(<vscale x 8 x i8> %va, i1 true)
@@ -2247,16 +2247,16 @@ define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv16i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; RV32I-NEXT:    vsub.vx v10, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
 ; RV32I-NEXT:    vand.vv v8, v8, v10
 ; RV32I-NEXT:    vsrl.vi v10, v8, 1
-; RV32I-NEXT:    addi a0, zero, 85
+; RV32I-NEXT:    li a0, 85
 ; RV32I-NEXT:    vand.vx v10, v10, a0
 ; RV32I-NEXT:    vsub.vv v8, v8, v10
-; RV32I-NEXT:    addi a0, zero, 51
+; RV32I-NEXT:    li a0, 51
 ; RV32I-NEXT:    vand.vx v10, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 2
 ; RV32I-NEXT:    vand.vx v8, v8, a0
@@ -2268,16 +2268,16 @@ define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv16i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; RV64I-NEXT:    vsub.vx v10, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
 ; RV64I-NEXT:    vand.vv v8, v8, v10
 ; RV64I-NEXT:    vsrl.vi v10, v8, 1
-; RV64I-NEXT:    addi a0, zero, 85
+; RV64I-NEXT:    li a0, 85
 ; RV64I-NEXT:    vand.vx v10, v10, a0
 ; RV64I-NEXT:    vsub.vv v8, v8, v10
-; RV64I-NEXT:    addi a0, zero, 51
+; RV64I-NEXT:    li a0, 51
 ; RV64I-NEXT:    vand.vx v10, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 2
 ; RV64I-NEXT:    vand.vx v8, v8, a0
@@ -2300,7 +2300,7 @@ define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vnsrl.wi v16, v8, 0
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v16, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2317,7 +2317,7 @@ define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vnsrl.wi v16, v8, 0
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v16, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 16 x i8> @llvm.cttz.nxv16i8(<vscale x 16 x i8> %va, i1 true)
@@ -2327,16 +2327,16 @@ define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 define <vscale x 32 x i8> @cttz_zero_undef_nxv32i8(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: cttz_zero_undef_nxv32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vsub.vx v12, v8, a0
 ; CHECK-NEXT:    vxor.vi v8, v8, -1
 ; CHECK-NEXT:    vand.vv v8, v8, v12
 ; CHECK-NEXT:    vsrl.vi v12, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v12, v12, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v12, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -2352,16 +2352,16 @@ define <vscale x 32 x i8> @cttz_zero_undef_nxv32i8(<vscale x 32 x i8> %va) {
 define <vscale x 64 x i8> @cttz_zero_undef_nxv64i8(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: cttz_zero_undef_nxv64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vsub.vx v16, v8, a0
 ; CHECK-NEXT:    vxor.vi v8, v8, -1
 ; CHECK-NEXT:    vand.vv v8, v8, v16
 ; CHECK-NEXT:    vsrl.vi v16, v8, 1
-; CHECK-NEXT:    addi a0, zero, 85
+; CHECK-NEXT:    li a0, 85
 ; CHECK-NEXT:    vand.vx v16, v16, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
-; CHECK-NEXT:    addi a0, zero, 51
+; CHECK-NEXT:    li a0, 51
 ; CHECK-NEXT:    vand.vx v16, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a0
@@ -2377,7 +2377,7 @@ define <vscale x 64 x i8> @cttz_zero_undef_nxv64i8(<vscale x 64 x i8> %va) {
 define <vscale x 1 x i16> @cttz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv1i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -2398,14 +2398,14 @@ define <vscale x 1 x i16> @cttz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv1i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -2426,7 +2426,7 @@ define <vscale x 1 x i16> @cttz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -2441,7 +2441,7 @@ define <vscale x 1 x i16> @cttz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2455,7 +2455,7 @@ define <vscale x 1 x i16> @cttz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 1 x i16> @llvm.cttz.nxv1i16(<vscale x 1 x i16> %va, i1 true)
@@ -2465,7 +2465,7 @@ define <vscale x 1 x i16> @cttz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 define <vscale x 2 x i16> @cttz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv2i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -2486,14 +2486,14 @@ define <vscale x 2 x i16> @cttz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv2i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -2514,7 +2514,7 @@ define <vscale x 2 x i16> @cttz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -2529,7 +2529,7 @@ define <vscale x 2 x i16> @cttz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2543,7 +2543,7 @@ define <vscale x 2 x i16> @cttz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 2 x i16> @llvm.cttz.nxv2i16(<vscale x 2 x i16> %va, i1 true)
@@ -2553,7 +2553,7 @@ define <vscale x 2 x i16> @cttz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 define <vscale x 4 x i16> @cttz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv4i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -2574,14 +2574,14 @@ define <vscale x 4 x i16> @cttz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv4i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -2602,7 +2602,7 @@ define <vscale x 4 x i16> @cttz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -2617,7 +2617,7 @@ define <vscale x 4 x i16> @cttz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v10, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v10, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v10, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2631,7 +2631,7 @@ define <vscale x 4 x i16> @cttz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v10, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v10, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v10, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 4 x i16> @llvm.cttz.nxv4i16(<vscale x 4 x i16> %va, i1 true)
@@ -2641,7 +2641,7 @@ define <vscale x 4 x i16> @cttz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 define <vscale x 8 x i16> @cttz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv8i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; RV32I-NEXT:    vsub.vx v10, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -2662,14 +2662,14 @@ define <vscale x 8 x i16> @cttz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv8i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; RV64I-NEXT:    vsub.vx v10, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -2690,7 +2690,7 @@ define <vscale x 8 x i16> @cttz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -2705,7 +2705,7 @@ define <vscale x 8 x i16> @cttz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v12, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v12, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v12, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2719,7 +2719,7 @@ define <vscale x 8 x i16> @cttz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v12, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v12, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v12, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 8 x i16> @llvm.cttz.nxv8i16(<vscale x 8 x i16> %va, i1 true)
@@ -2729,7 +2729,7 @@ define <vscale x 8 x i16> @cttz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 define <vscale x 16 x i16> @cttz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv16i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; RV32I-NEXT:    vsub.vx v12, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -2750,14 +2750,14 @@ define <vscale x 16 x i16> @cttz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32I-NEXT:    lui a0, 1
 ; RV32I-NEXT:    addi a0, a0, -241
 ; RV32I-NEXT:    vand.vx v8, v8, a0
-; RV32I-NEXT:    addi a0, zero, 257
+; RV32I-NEXT:    li a0, 257
 ; RV32I-NEXT:    vmul.vx v8, v8, a0
 ; RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv16i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; RV64I-NEXT:    vsub.vx v12, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -2778,7 +2778,7 @@ define <vscale x 16 x i16> @cttz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64I-NEXT:    lui a0, 1
 ; RV64I-NEXT:    addiw a0, a0, -241
 ; RV64I-NEXT:    vand.vx v8, v8, a0
-; RV64I-NEXT:    addi a0, zero, 257
+; RV64I-NEXT:    li a0, 257
 ; RV64I-NEXT:    vmul.vx v8, v8, a0
 ; RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; RV64I-NEXT:    ret
@@ -2793,7 +2793,7 @@ define <vscale x 16 x i16> @cttz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32D-NEXT:    vsrl.vi v8, v16, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v16, v8, 0
-; RV32D-NEXT:    addi a0, zero, 127
+; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v16, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2807,7 +2807,7 @@ define <vscale x 16 x i16> @cttz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64D-NEXT:    vsrl.vi v8, v16, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v16, v8, 0
-; RV64D-NEXT:    addi a0, zero, 127
+; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v16, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 16 x i16> @llvm.cttz.nxv16i16(<vscale x 16 x i16> %va, i1 true)
@@ -2817,7 +2817,7 @@ define <vscale x 16 x i16> @cttz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 define <vscale x 32 x i16> @cttz_zero_undef_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV32-LABEL: cttz_zero_undef_nxv32i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; RV32-NEXT:    vsub.vx v16, v8, a0
 ; RV32-NEXT:    vxor.vi v8, v8, -1
@@ -2838,14 +2838,14 @@ define <vscale x 32 x i16> @cttz_zero_undef_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV32-NEXT:    lui a0, 1
 ; RV32-NEXT:    addi a0, a0, -241
 ; RV32-NEXT:    vand.vx v8, v8, a0
-; RV32-NEXT:    addi a0, zero, 257
+; RV32-NEXT:    li a0, 257
 ; RV32-NEXT:    vmul.vx v8, v8, a0
 ; RV32-NEXT:    vsrl.vi v8, v8, 8
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: cttz_zero_undef_nxv32i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; RV64-NEXT:    vsub.vx v16, v8, a0
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -2866,7 +2866,7 @@ define <vscale x 32 x i16> @cttz_zero_undef_nxv32i16(<vscale x 32 x i16> %va) {
 ; RV64-NEXT:    lui a0, 1
 ; RV64-NEXT:    addiw a0, a0, -241
 ; RV64-NEXT:    vand.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 257
+; RV64-NEXT:    li a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
 ; RV64-NEXT:    vsrl.vi v8, v8, 8
 ; RV64-NEXT:    ret
@@ -2877,7 +2877,7 @@ define <vscale x 32 x i16> @cttz_zero_undef_nxv32i16(<vscale x 32 x i16> %va) {
 define <vscale x 1 x i32> @cttz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv1i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -2906,7 +2906,7 @@ define <vscale x 1 x i32> @cttz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv1i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -2939,12 +2939,12 @@ define <vscale x 1 x i32> @cttz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV32D-NEXT:    vrsub.vi v9, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v9
 ; RV32D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v9, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v8, v8, 0
-; RV32D-NEXT:    addi a0, zero, 1023
+; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
@@ -2954,12 +2954,12 @@ define <vscale x 1 x i32> @cttz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 ; RV64D-NEXT:    vrsub.vi v9, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v9
 ; RV64D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v9, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v8, v8, 0
-; RV64D-NEXT:    addi a0, zero, 1023
+; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 1 x i32> @llvm.cttz.nxv1i32(<vscale x 1 x i32> %va, i1 true)
@@ -2969,7 +2969,7 @@ define <vscale x 1 x i32> @cttz_zero_undef_nxv1i32(<vscale x 1 x i32> %va) {
 define <vscale x 2 x i32> @cttz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv2i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; RV32I-NEXT:    vsub.vx v9, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -2998,7 +2998,7 @@ define <vscale x 2 x i32> @cttz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv2i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; RV64I-NEXT:    vsub.vx v9, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -3031,12 +3031,12 @@ define <vscale x 2 x i32> @cttz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV32D-NEXT:    vrsub.vi v9, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v9
 ; RV32D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v10, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v10, v8, 0
-; RV32D-NEXT:    addi a0, zero, 1023
+; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v8, v10, a0
 ; RV32D-NEXT:    ret
 ;
@@ -3046,12 +3046,12 @@ define <vscale x 2 x i32> @cttz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 ; RV64D-NEXT:    vrsub.vi v9, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v9
 ; RV64D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v10, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v10, v8, 0
-; RV64D-NEXT:    addi a0, zero, 1023
+; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v10, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.cttz.nxv2i32(<vscale x 2 x i32> %va, i1 true)
@@ -3061,7 +3061,7 @@ define <vscale x 2 x i32> @cttz_zero_undef_nxv2i32(<vscale x 2 x i32> %va) {
 define <vscale x 4 x i32> @cttz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv4i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; RV32I-NEXT:    vsub.vx v10, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -3090,7 +3090,7 @@ define <vscale x 4 x i32> @cttz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv4i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; RV64I-NEXT:    vsub.vx v10, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -3123,12 +3123,12 @@ define <vscale x 4 x i32> @cttz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV32D-NEXT:    vrsub.vi v10, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v10
 ; RV32D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v12, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v12, v8, 0
-; RV32D-NEXT:    addi a0, zero, 1023
+; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v8, v12, a0
 ; RV32D-NEXT:    ret
 ;
@@ -3138,12 +3138,12 @@ define <vscale x 4 x i32> @cttz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 ; RV64D-NEXT:    vrsub.vi v10, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v10
 ; RV64D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v12, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v12, v8, 0
-; RV64D-NEXT:    addi a0, zero, 1023
+; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v12, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 4 x i32> @llvm.cttz.nxv4i32(<vscale x 4 x i32> %va, i1 true)
@@ -3153,7 +3153,7 @@ define <vscale x 4 x i32> @cttz_zero_undef_nxv4i32(<vscale x 4 x i32> %va) {
 define <vscale x 8 x i32> @cttz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32I-LABEL: cttz_zero_undef_nxv8i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; RV32I-NEXT:    vsub.vx v12, v8, a0
 ; RV32I-NEXT:    vxor.vi v8, v8, -1
@@ -3182,7 +3182,7 @@ define <vscale x 8 x i32> @cttz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ;
 ; RV64I-LABEL: cttz_zero_undef_nxv8i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, zero, 1
+; RV64I-NEXT:    li a0, 1
 ; RV64I-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; RV64I-NEXT:    vsub.vx v12, v8, a0
 ; RV64I-NEXT:    vxor.vi v8, v8, -1
@@ -3215,12 +3215,12 @@ define <vscale x 8 x i32> @cttz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV32D-NEXT:    vrsub.vi v12, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v12
 ; RV32D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV32D-NEXT:    addi a0, zero, 52
+; RV32D-NEXT:    li a0, 52
 ; RV32D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32D-NEXT:    vsrl.vx v8, v16, a0
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vnsrl.wi v16, v8, 0
-; RV32D-NEXT:    addi a0, zero, 1023
+; RV32D-NEXT:    li a0, 1023
 ; RV32D-NEXT:    vsub.vx v8, v16, a0
 ; RV32D-NEXT:    ret
 ;
@@ -3230,12 +3230,12 @@ define <vscale x 8 x i32> @cttz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 ; RV64D-NEXT:    vrsub.vi v12, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v12
 ; RV64D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV64D-NEXT:    addi a0, zero, 52
+; RV64D-NEXT:    li a0, 52
 ; RV64D-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64D-NEXT:    vsrl.vx v8, v16, a0
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vnsrl.wi v16, v8, 0
-; RV64D-NEXT:    addi a0, zero, 1023
+; RV64D-NEXT:    li a0, 1023
 ; RV64D-NEXT:    vsub.vx v8, v16, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 8 x i32> @llvm.cttz.nxv8i32(<vscale x 8 x i32> %va, i1 true)
@@ -3245,7 +3245,7 @@ define <vscale x 8 x i32> @cttz_zero_undef_nxv8i32(<vscale x 8 x i32> %va) {
 define <vscale x 16 x i32> @cttz_zero_undef_nxv16i32(<vscale x 16 x i32> %va) {
 ; RV32-LABEL: cttz_zero_undef_nxv16i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; RV32-NEXT:    vsub.vx v16, v8, a0
 ; RV32-NEXT:    vxor.vi v8, v8, -1
@@ -3274,7 +3274,7 @@ define <vscale x 16 x i32> @cttz_zero_undef_nxv16i32(<vscale x 16 x i32> %va) {
 ;
 ; RV64-LABEL: cttz_zero_undef_nxv16i32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; RV64-NEXT:    vsub.vx v16, v8, a0
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -3325,7 +3325,7 @@ define <vscale x 1 x i64> @cttz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    addi a0, a0, 257
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV32-NEXT:    vsub.vx v9, v8, a0
 ; RV32-NEXT:    vxor.vi v8, v8, -1
@@ -3349,14 +3349,14 @@ define <vscale x 1 x i64> @cttz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v11
 ; RV32-NEXT:    vand.vv v8, v8, v9
 ; RV32-NEXT:    vmul.vv v8, v8, v10
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: cttz_zero_undef_nxv1i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vsub.vx v9, v8, a0
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -3402,7 +3402,7 @@ define <vscale x 1 x i64> @cttz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 1 x i64> @llvm.cttz.nxv1i64(<vscale x 1 x i64> %va, i1 true)
@@ -3430,7 +3430,7 @@ define <vscale x 2 x i64> @cttz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    addi a0, a0, 257
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV32-NEXT:    vsub.vx v10, v8, a0
 ; RV32-NEXT:    vxor.vi v8, v8, -1
@@ -3454,14 +3454,14 @@ define <vscale x 2 x i64> @cttz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v14
 ; RV32-NEXT:    vand.vv v8, v8, v10
 ; RV32-NEXT:    vmul.vv v8, v8, v12
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: cttz_zero_undef_nxv2i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV64-NEXT:    vsub.vx v10, v8, a0
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -3507,7 +3507,7 @@ define <vscale x 2 x i64> @cttz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 2 x i64> @llvm.cttz.nxv2i64(<vscale x 2 x i64> %va, i1 true)
@@ -3535,7 +3535,7 @@ define <vscale x 4 x i64> @cttz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    addi a0, a0, 257
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vsub.vx v12, v8, a0
 ; RV32-NEXT:    vxor.vi v8, v8, -1
@@ -3559,14 +3559,14 @@ define <vscale x 4 x i64> @cttz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v20
 ; RV32-NEXT:    vand.vv v8, v8, v12
 ; RV32-NEXT:    vmul.vv v8, v8, v16
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: cttz_zero_undef_nxv4i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV64-NEXT:    vsub.vx v12, v8, a0
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -3612,7 +3612,7 @@ define <vscale x 4 x i64> @cttz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 4 x i64> @llvm.cttz.nxv4i64(<vscale x 4 x i64> %va, i1 true)
@@ -3640,7 +3640,7 @@ define <vscale x 8 x i64> @cttz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    addi a0, a0, 257
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsub.vx v16, v8, a0
 ; RV32-NEXT:    vxor.vi v8, v8, -1
@@ -3664,14 +3664,14 @@ define <vscale x 8 x i64> @cttz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    vadd.vv v8, v8, v0
 ; RV32-NEXT:    vand.vv v8, v8, v16
 ; RV32-NEXT:    vmul.vv v8, v8, v24
-; RV32-NEXT:    addi a0, zero, 56
+; RV32-NEXT:    li a0, 56
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: cttz_zero_undef_nxv8i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vsub.vx v16, v8, a0
 ; RV64-NEXT:    vxor.vi v8, v8, -1
@@ -3717,7 +3717,7 @@ define <vscale x 8 x i64> @cttz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    slli a0, a0, 16
 ; RV64-NEXT:    addi a0, a0, 257
 ; RV64-NEXT:    vmul.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 56
+; RV64-NEXT:    li a0, 56
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %a = call <vscale x 8 x i64> @llvm.cttz.nxv8i64(<vscale x 8 x i64> %va, i1 true)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
index 8da02c3333d6..0d015d7b3529 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
@@ -282,7 +282,7 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_6(<vscale x 32 x i8> %vec) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a1, zero, 6
+; CHECK-NEXT:    li a1, 6
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
@@ -305,7 +305,7 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_22(<vscale x 32 x i8> %vec) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a1, zero, 6
+; CHECK-NEXT:    li a1, 6
 ; CHECK-NEXT:    mul a0, a0, a1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v10, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
index 31f3c3004b19..c47aa951a059 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
@@ -581,7 +581,7 @@ define i32 @extractelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 %idx) {
 define i64 @extractelt_nxv1i64_0(<vscale x 1 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv1i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v9, v8, a0
 ; CHECK-NEXT:    vmv.x.s a1, v9
@@ -597,7 +597,7 @@ define i64 @extractelt_nxv1i64_imm(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
@@ -611,7 +611,7 @@ define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 %idx) {
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
@@ -622,7 +622,7 @@ define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 %idx) {
 define i64 @extractelt_nxv2i64_0(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv2i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, mu
 ; CHECK-NEXT:    vsrl.vx v10, v8, a0
 ; CHECK-NEXT:    vmv.x.s a1, v10
@@ -638,7 +638,7 @@ define i64 @extractelt_nxv2i64_imm(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
@@ -652,7 +652,7 @@ define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 %idx) {
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
@@ -663,7 +663,7 @@ define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 %idx) {
 define i64 @extractelt_nxv4i64_0(<vscale x 4 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv4i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, mu
 ; CHECK-NEXT:    vsrl.vx v12, v8, a0
 ; CHECK-NEXT:    vmv.x.s a1, v12
@@ -679,7 +679,7 @@ define i64 @extractelt_nxv4i64_imm(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
@@ -693,7 +693,7 @@ define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 %idx) {
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
@@ -704,7 +704,7 @@ define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 %idx) {
 define i64 @extractelt_nxv8i64_0(<vscale x 8 x i64> %v) {
 ; CHECK-LABEL: extractelt_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, mu
 ; CHECK-NEXT:    vsrl.vx v16, v8, a0
 ; CHECK-NEXT:    vmv.x.s a1, v16
@@ -720,7 +720,7 @@ define i64 @extractelt_nxv8i64_imm(<vscale x 8 x i64> %v) {
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
@@ -734,7 +734,7 @@ define i64 @extractelt_nxv8i64_idx(<vscale x 8 x i64> %v, i32 %idx) {
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll
index 33c68c1a8821..026e149c1a46 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll
@@ -31,10 +31,10 @@ define void @gather(i8* noalias nocapture %A, i8* noalias nocapture readonly %B)
 ;
 ; CHECK-ASM-LABEL: gather:
 ; CHECK-ASM:       # %bb.0: # %entry
-; CHECK-ASM-NEXT:    mv a2, zero
-; CHECK-ASM-NEXT:    addi a6, zero, 32
-; CHECK-ASM-NEXT:    addi a4, zero, 5
-; CHECK-ASM-NEXT:    addi a5, zero, 1024
+; CHECK-ASM-NEXT:    li a2, 0
+; CHECK-ASM-NEXT:    li a6, 32
+; CHECK-ASM-NEXT:    li a4, 5
+; CHECK-ASM-NEXT:    li a5, 1024
 ; CHECK-ASM-NEXT:  .LBB0_1: # %vector.body
 ; CHECK-ASM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-ASM-NEXT:    vsetvli zero, a6, e8, m1, ta, mu
@@ -96,14 +96,14 @@ define void @gather_masked(i8* noalias nocapture %A, i8* noalias nocapture reado
 ;
 ; CHECK-ASM-LABEL: gather_masked:
 ; CHECK-ASM:       # %bb.0: # %entry
-; CHECK-ASM-NEXT:    mv a2, zero
+; CHECK-ASM-NEXT:    li a2, 0
 ; CHECK-ASM-NEXT:    lui a3, 983765
 ; CHECK-ASM-NEXT:    addiw a3, a3, 873
 ; CHECK-ASM-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
 ; CHECK-ASM-NEXT:    vmv.s.x v0, a3
-; CHECK-ASM-NEXT:    addi a6, zero, 32
-; CHECK-ASM-NEXT:    addi a4, zero, 5
-; CHECK-ASM-NEXT:    addi a5, zero, 1024
+; CHECK-ASM-NEXT:    li a6, 32
+; CHECK-ASM-NEXT:    li a4, 5
+; CHECK-ASM-NEXT:    li a5, 1024
 ; CHECK-ASM-NEXT:  .LBB1_1: # %vector.body
 ; CHECK-ASM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-ASM-NEXT:    vsetvli zero, a6, e8, m1, ta, mu
@@ -166,11 +166,11 @@ define void @gather_negative_stride(i8* noalias nocapture %A, i8* noalias nocapt
 ;
 ; CHECK-ASM-LABEL: gather_negative_stride:
 ; CHECK-ASM:       # %bb.0: # %entry
-; CHECK-ASM-NEXT:    mv a2, zero
+; CHECK-ASM-NEXT:    li a2, 0
 ; CHECK-ASM-NEXT:    addi a1, a1, 155
-; CHECK-ASM-NEXT:    addi a6, zero, 32
-; CHECK-ASM-NEXT:    addi a4, zero, -5
-; CHECK-ASM-NEXT:    addi a5, zero, 1024
+; CHECK-ASM-NEXT:    li a6, 32
+; CHECK-ASM-NEXT:    li a4, -5
+; CHECK-ASM-NEXT:    li a5, 1024
 ; CHECK-ASM-NEXT:  .LBB2_1: # %vector.body
 ; CHECK-ASM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-ASM-NEXT:    vsetvli zero, a6, e8, m1, ta, mu
@@ -232,9 +232,9 @@ define void @gather_zero_stride(i8* noalias nocapture %A, i8* noalias nocapture
 ;
 ; CHECK-ASM-LABEL: gather_zero_stride:
 ; CHECK-ASM:       # %bb.0: # %entry
-; CHECK-ASM-NEXT:    mv a2, zero
-; CHECK-ASM-NEXT:    addi a3, zero, 32
-; CHECK-ASM-NEXT:    addi a4, zero, 1024
+; CHECK-ASM-NEXT:    li a2, 0
+; CHECK-ASM-NEXT:    li a3, 32
+; CHECK-ASM-NEXT:    li a4, 1024
 ; CHECK-ASM-NEXT:  .LBB3_1: # %vector.body
 ; CHECK-ASM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-ASM-NEXT:    vsetvli zero, a3, e8, m1, ta, mu
@@ -302,10 +302,10 @@ define void @scatter(i8* noalias nocapture %A, i8* noalias nocapture readonly %B
 ;
 ; CHECK-ASM-LABEL: scatter:
 ; CHECK-ASM:       # %bb.0: # %entry
-; CHECK-ASM-NEXT:    mv a2, zero
-; CHECK-ASM-NEXT:    addi a6, zero, 32
-; CHECK-ASM-NEXT:    addi a4, zero, 5
-; CHECK-ASM-NEXT:    addi a5, zero, 1024
+; CHECK-ASM-NEXT:    li a2, 0
+; CHECK-ASM-NEXT:    li a6, 32
+; CHECK-ASM-NEXT:    li a4, 5
+; CHECK-ASM-NEXT:    li a5, 1024
 ; CHECK-ASM-NEXT:  .LBB4_1: # %vector.body
 ; CHECK-ASM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-ASM-NEXT:    add a3, a1, a2
@@ -368,14 +368,14 @@ define void @scatter_masked(i8* noalias nocapture %A, i8* noalias nocapture read
 ;
 ; CHECK-ASM-LABEL: scatter_masked:
 ; CHECK-ASM:       # %bb.0: # %entry
-; CHECK-ASM-NEXT:    mv a2, zero
-; CHECK-ASM-NEXT:    addi a6, zero, 32
+; CHECK-ASM-NEXT:    li a2, 0
+; CHECK-ASM-NEXT:    li a6, 32
 ; CHECK-ASM-NEXT:    lui a4, 983765
 ; CHECK-ASM-NEXT:    addiw a4, a4, 873
 ; CHECK-ASM-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
 ; CHECK-ASM-NEXT:    vmv.s.x v0, a4
-; CHECK-ASM-NEXT:    addi a4, zero, 5
-; CHECK-ASM-NEXT:    addi a5, zero, 1024
+; CHECK-ASM-NEXT:    li a4, 5
+; CHECK-ASM-NEXT:    li a5, 1024
 ; CHECK-ASM-NEXT:  .LBB5_1: # %vector.body
 ; CHECK-ASM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-ASM-NEXT:    add a3, a1, a2
@@ -442,9 +442,9 @@ define void @gather_pow2(i32* noalias nocapture %A, i32* noalias nocapture reado
 ;
 ; CHECK-ASM-LABEL: gather_pow2:
 ; CHECK-ASM:       # %bb.0: # %entry
-; CHECK-ASM-NEXT:    addi a2, zero, 1024
-; CHECK-ASM-NEXT:    addi a3, zero, 16
-; CHECK-ASM-NEXT:    addi a4, zero, 32
+; CHECK-ASM-NEXT:    li a2, 1024
+; CHECK-ASM-NEXT:    li a3, 16
+; CHECK-ASM-NEXT:    li a4, 32
 ; CHECK-ASM-NEXT:  .LBB6_1: # %vector.body
 ; CHECK-ASM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-ASM-NEXT:    vsetivli zero, 8, e32, m1, ta, mu
@@ -517,9 +517,9 @@ define void @scatter_pow2(i32* noalias nocapture %A, i32* noalias nocapture read
 ;
 ; CHECK-ASM-LABEL: scatter_pow2:
 ; CHECK-ASM:       # %bb.0: # %entry
-; CHECK-ASM-NEXT:    addi a2, zero, 1024
-; CHECK-ASM-NEXT:    addi a3, zero, 32
-; CHECK-ASM-NEXT:    addi a4, zero, 16
+; CHECK-ASM-NEXT:    li a2, 1024
+; CHECK-ASM-NEXT:    li a3, 32
+; CHECK-ASM-NEXT:    li a4, 16
 ; CHECK-ASM-NEXT:  .LBB7_1: # %vector.body
 ; CHECK-ASM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-ASM-NEXT:    vsetvli zero, a3, e8, m1, ta, mu
@@ -603,8 +603,8 @@ define void @struct_gather(i32* noalias nocapture %A, %struct.foo* noalias nocap
 ; CHECK-ASM:       # %bb.0: # %entry
 ; CHECK-ASM-NEXT:    addi a0, a0, 32
 ; CHECK-ASM-NEXT:    addi a1, a1, 132
-; CHECK-ASM-NEXT:    addi a2, zero, 1024
-; CHECK-ASM-NEXT:    addi a3, zero, 16
+; CHECK-ASM-NEXT:    li a2, 1024
+; CHECK-ASM-NEXT:    li a3, 16
 ; CHECK-ASM-NEXT:  .LBB8_1: # %vector.body
 ; CHECK-ASM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-ASM-NEXT:    addi a4, a1, -128
@@ -742,9 +742,9 @@ define void @gather_unroll(i32* noalias nocapture %A, i32* noalias nocapture rea
 ;
 ; CHECK-ASM-LABEL: gather_unroll:
 ; CHECK-ASM:       # %bb.0: # %entry
-; CHECK-ASM-NEXT:    addi a2, zero, 256
-; CHECK-ASM-NEXT:    addi a3, zero, 64
-; CHECK-ASM-NEXT:    addi a4, zero, 16
+; CHECK-ASM-NEXT:    li a2, 256
+; CHECK-ASM-NEXT:    li a3, 64
+; CHECK-ASM-NEXT:    li a4, 16
 ; CHECK-ASM-NEXT:  .LBB9_1: # %vector.body
 ; CHECK-ASM-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-ASM-NEXT:    vsetivli zero, 8, e32, m1, ta, mu
@@ -858,8 +858,8 @@ define void @gather_of_pointers(i32** noalias nocapture %0, i32** noalias nocapt
 ; CHECK-ASM-LABEL: gather_of_pointers:
 ; CHECK-ASM:       # %bb.0:
 ; CHECK-ASM-NEXT:    addi a0, a0, 16
-; CHECK-ASM-NEXT:    addi a2, zero, 1024
-; CHECK-ASM-NEXT:    addi a3, zero, 40
+; CHECK-ASM-NEXT:    li a2, 1024
+; CHECK-ASM-NEXT:    li a3, 40
 ; CHECK-ASM-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
 ; CHECK-ASM-NEXT:    addi a4, a1, 80
 ; CHECK-ASM-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
@@ -934,8 +934,8 @@ define void @scatter_of_pointers(i32** noalias nocapture %0, i32** noalias nocap
 ; CHECK-ASM-LABEL: scatter_of_pointers:
 ; CHECK-ASM:       # %bb.0:
 ; CHECK-ASM-NEXT:    addi a1, a1, 16
-; CHECK-ASM-NEXT:    addi a2, zero, 1024
-; CHECK-ASM-NEXT:    addi a3, zero, 40
+; CHECK-ASM-NEXT:    li a2, 1024
+; CHECK-ASM-NEXT:    li a3, 40
 ; CHECK-ASM-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
 ; CHECK-ASM-NEXT:    addi a4, a1, -16
 ; CHECK-ASM-NEXT:    vsetivli zero, 2, e64, m1, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
index ce06f8faf4c3..519f042fd92a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
@@ -71,7 +71,7 @@ declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1)
 define void @abs_v32i8(<32 x i8>* %x) {
 ; LMULMAX2-LABEL: abs_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 32
+; LMULMAX2-NEXT:    li a1, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vrsub.vi v10, v8, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll
index 0ecd5de2b822..ba251f9fd335 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll
@@ -7,7 +7,7 @@ define <512 x i8> @bitcast_1024B(<256 x i16> %a, <512 x i8> %b) {
 ; VLEN256-LABEL: bitcast_1024B:
 ; VLEN256:       # %bb.0:
 ; VLEN256-NEXT:    addi a1, a0, 256
-; VLEN256-NEXT:    addi a2, zero, 256
+; VLEN256-NEXT:    li a2, 256
 ; VLEN256-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
 ; VLEN256-NEXT:    vle8.v v24, (a0)
 ; VLEN256-NEXT:    vle8.v v0, (a1)
@@ -17,14 +17,14 @@ define <512 x i8> @bitcast_1024B(<256 x i16> %a, <512 x i8> %b) {
 ;
 ; VLEN512-LABEL: bitcast_1024B:
 ; VLEN512:       # %bb.0:
-; VLEN512-NEXT:    addi a0, zero, 512
+; VLEN512-NEXT:    li a0, 512
 ; VLEN512-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
 ; VLEN512-NEXT:    vadd.vv v8, v16, v8
 ; VLEN512-NEXT:    ret
 ;
 ; VLEN1024-LABEL: bitcast_1024B:
 ; VLEN1024:       # %bb.0:
-; VLEN1024-NEXT:    addi a0, zero, 512
+; VLEN1024-NEXT:    li a0, 512
 ; VLEN1024-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; VLEN1024-NEXT:    vadd.vv v8, v12, v8
 ; VLEN1024-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
index bf6afbcce9b3..5d9fdc7bfbc2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
@@ -17,14 +17,14 @@
 define <32 x i1> @bitcast_v4i8_v32i1(<4 x i8> %a, <32 x i1> %b) {
 ; CHECK-LABEL: bitcast_v4i8_v32i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vmxor.mm v0, v0, v8
 ; CHECK-NEXT:    ret
 ;
 ; ELEN32-LABEL: bitcast_v4i8_v32i1:
 ; ELEN32:       # %bb.0:
-; ELEN32-NEXT:    addi a0, zero, 32
+; ELEN32-NEXT:    li a0, 32
 ; ELEN32-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; ELEN32-NEXT:    vmxor.mm v0, v0, v8
 ; ELEN32-NEXT:    ret
@@ -132,7 +132,7 @@ define i32 @bitcast_v1i32_i32(<1 x i32> %a) {
 define i64 @bitcast_v8i8_i64(<8 x i8> %a) {
 ; RV32-LABEL: bitcast_v8i8_i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
@@ -170,7 +170,7 @@ define i64 @bitcast_v8i8_i64(<8 x i8> %a) {
 define i64 @bitcast_v4i16_i64(<4 x i16> %a) {
 ; RV32-LABEL: bitcast_v4i16_i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
@@ -208,7 +208,7 @@ define i64 @bitcast_v4i16_i64(<4 x i16> %a) {
 define i64 @bitcast_v2i32_i64(<2 x i32> %a) {
 ; RV32-LABEL: bitcast_v2i32_i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
@@ -246,7 +246,7 @@ define i64 @bitcast_v2i32_i64(<2 x i32> %a) {
 define i64 @bitcast_v1i64_i64(<1 x i64> %a) {
 ; RV32-LABEL: bitcast_v1i64_i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll
index ca0b59eb2672..26dfff7fe57b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll
@@ -309,9 +309,9 @@ define void @bitreverse_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV32:       # %bb.0:
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV32-NEXT:    li a1, 56
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v9, v8, a1
-; LMULMAX2-RV32-NEXT:    addi a2, zero, 40
+; LMULMAX2-RV32-NEXT:    li a2, 40
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v10, v8, a2
 ; LMULMAX2-RV32-NEXT:    lui a3, 16
 ; LMULMAX2-RV32-NEXT:    addi a3, a3, -256
@@ -320,7 +320,7 @@ define void @bitreverse_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v10, v8, 24
 ; LMULMAX2-RV32-NEXT:    lui a4, 4080
 ; LMULMAX2-RV32-NEXT:    vand.vx v10, v10, a4
-; LMULMAX2-RV32-NEXT:    addi a5, zero, 5
+; LMULMAX2-RV32-NEXT:    li a5, 5
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a5
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -332,7 +332,7 @@ define void @bitreverse_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV32-NEXT:    vand.vv v11, v12, v11
 ; LMULMAX2-RV32-NEXT:    vor.vv v10, v11, v10
 ; LMULMAX2-RV32-NEXT:    vor.vv v9, v10, v9
-; LMULMAX2-RV32-NEXT:    addi a5, zero, 255
+; LMULMAX2-RV32-NEXT:    li a5, 255
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.v.x v10, a5
 ; LMULMAX2-RV32-NEXT:    vmerge.vim v10, v10, 0, v0
@@ -393,9 +393,9 @@ define void @bitreverse_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV64:       # %bb.0:
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV64-NEXT:    li a1, 56
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v9, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a2, zero, 40
+; LMULMAX2-RV64-NEXT:    li a2, 40
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v10, v8, a2
 ; LMULMAX2-RV64-NEXT:    lui a3, 16
 ; LMULMAX2-RV64-NEXT:    addiw a3, a3, -256
@@ -405,7 +405,7 @@ define void @bitreverse_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV64-NEXT:    lui a3, 4080
 ; LMULMAX2-RV64-NEXT:    vand.vx v10, v10, a3
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v11, v8, 8
-; LMULMAX2-RV64-NEXT:    addi a3, zero, 255
+; LMULMAX2-RV64-NEXT:    li a3, 255
 ; LMULMAX2-RV64-NEXT:    slli a4, a3, 24
 ; LMULMAX2-RV64-NEXT:    vand.vx v11, v11, a4
 ; LMULMAX2-RV64-NEXT:    vor.vv v10, v11, v10
@@ -470,9 +470,9 @@ define void @bitreverse_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV32:       # %bb.0:
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX1-RV32-NEXT:    li a1, 56
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v9, v8, a1
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 40
+; LMULMAX1-RV32-NEXT:    li a2, 40
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v10, v8, a2
 ; LMULMAX1-RV32-NEXT:    lui a3, 16
 ; LMULMAX1-RV32-NEXT:    addi a3, a3, -256
@@ -481,7 +481,7 @@ define void @bitreverse_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v8, 24
 ; LMULMAX1-RV32-NEXT:    lui a4, 4080
 ; LMULMAX1-RV32-NEXT:    vand.vx v10, v10, a4
-; LMULMAX1-RV32-NEXT:    addi a5, zero, 5
+; LMULMAX1-RV32-NEXT:    li a5, 5
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.s.x v0, a5
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -493,7 +493,7 @@ define void @bitreverse_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV32-NEXT:    vand.vv v11, v12, v11
 ; LMULMAX1-RV32-NEXT:    vor.vv v10, v11, v10
 ; LMULMAX1-RV32-NEXT:    vor.vv v9, v10, v9
-; LMULMAX1-RV32-NEXT:    addi a5, zero, 255
+; LMULMAX1-RV32-NEXT:    li a5, 255
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.v.x v10, a5
 ; LMULMAX1-RV32-NEXT:    vmerge.vim v10, v10, 0, v0
@@ -554,9 +554,9 @@ define void @bitreverse_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV64:       # %bb.0:
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX1-RV64-NEXT:    li a1, 56
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v9, v8, a1
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 40
+; LMULMAX1-RV64-NEXT:    li a2, 40
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v10, v8, a2
 ; LMULMAX1-RV64-NEXT:    lui a3, 16
 ; LMULMAX1-RV64-NEXT:    addiw a3, a3, -256
@@ -566,7 +566,7 @@ define void @bitreverse_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    lui a3, 4080
 ; LMULMAX1-RV64-NEXT:    vand.vx v10, v10, a3
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v11, v8, 8
-; LMULMAX1-RV64-NEXT:    addi a3, zero, 255
+; LMULMAX1-RV64-NEXT:    li a3, 255
 ; LMULMAX1-RV64-NEXT:    slli a4, a3, 24
 ; LMULMAX1-RV64-NEXT:    vand.vx v11, v11, a4
 ; LMULMAX1-RV64-NEXT:    vor.vv v10, v11, v10
@@ -1035,9 +1035,9 @@ define void @bitreverse_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-RV32:       # %bb.0:
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV32-NEXT:    li a1, 56
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v10, v8, a1
-; LMULMAX2-RV32-NEXT:    addi a2, zero, 40
+; LMULMAX2-RV32-NEXT:    li a2, 40
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v12, v8, a2
 ; LMULMAX2-RV32-NEXT:    lui a3, 16
 ; LMULMAX2-RV32-NEXT:    addi a3, a3, -256
@@ -1046,7 +1046,7 @@ define void @bitreverse_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v12, v8, 24
 ; LMULMAX2-RV32-NEXT:    lui a4, 4080
 ; LMULMAX2-RV32-NEXT:    vand.vx v12, v12, a4
-; LMULMAX2-RV32-NEXT:    addi a5, zero, 85
+; LMULMAX2-RV32-NEXT:    li a5, 85
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a5
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
@@ -1058,7 +1058,7 @@ define void @bitreverse_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-RV32-NEXT:    vand.vv v14, v16, v14
 ; LMULMAX2-RV32-NEXT:    vor.vv v12, v14, v12
 ; LMULMAX2-RV32-NEXT:    vor.vv v10, v12, v10
-; LMULMAX2-RV32-NEXT:    addi a5, zero, 255
+; LMULMAX2-RV32-NEXT:    li a5, 255
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.v.x v12, a5
 ; LMULMAX2-RV32-NEXT:    vmerge.vim v12, v12, 0, v0
@@ -1119,9 +1119,9 @@ define void @bitreverse_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-RV64:       # %bb.0:
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV64-NEXT:    li a1, 56
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v10, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a2, zero, 40
+; LMULMAX2-RV64-NEXT:    li a2, 40
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v12, v8, a2
 ; LMULMAX2-RV64-NEXT:    lui a3, 16
 ; LMULMAX2-RV64-NEXT:    addiw a3, a3, -256
@@ -1131,7 +1131,7 @@ define void @bitreverse_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-RV64-NEXT:    lui a3, 4080
 ; LMULMAX2-RV64-NEXT:    vand.vx v12, v12, a3
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v14, v8, 8
-; LMULMAX2-RV64-NEXT:    addi a3, zero, 255
+; LMULMAX2-RV64-NEXT:    li a3, 255
 ; LMULMAX2-RV64-NEXT:    slli a4, a3, 24
 ; LMULMAX2-RV64-NEXT:    vand.vx v14, v14, a4
 ; LMULMAX2-RV64-NEXT:    vor.vv v12, v14, v12
@@ -1198,9 +1198,9 @@ define void @bitreverse_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v12, (a1)
 ; LMULMAX1-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 56
+; LMULMAX1-RV32-NEXT:    li a2, 56
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v9, v12, a2
-; LMULMAX1-RV32-NEXT:    addi a3, zero, 40
+; LMULMAX1-RV32-NEXT:    li a3, 40
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v10, v12, a3
 ; LMULMAX1-RV32-NEXT:    lui a4, 16
 ; LMULMAX1-RV32-NEXT:    addi a4, a4, -256
@@ -1209,7 +1209,7 @@ define void @bitreverse_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v9, v12, 24
 ; LMULMAX1-RV32-NEXT:    lui a6, 4080
 ; LMULMAX1-RV32-NEXT:    vand.vx v11, v9, a6
-; LMULMAX1-RV32-NEXT:    addi a5, zero, 5
+; LMULMAX1-RV32-NEXT:    li a5, 5
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.s.x v0, a5
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -1221,7 +1221,7 @@ define void @bitreverse_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV32-NEXT:    vand.vv v13, v13, v9
 ; LMULMAX1-RV32-NEXT:    vor.vv v11, v13, v11
 ; LMULMAX1-RV32-NEXT:    vor.vv v13, v11, v10
-; LMULMAX1-RV32-NEXT:    addi a5, zero, 255
+; LMULMAX1-RV32-NEXT:    li a5, 255
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.v.x v10, a5
 ; LMULMAX1-RV32-NEXT:    vmerge.vim v10, v10, 0, v0
@@ -1321,9 +1321,9 @@ define void @bitreverse_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    addi a7, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v9, (a7)
 ; LMULMAX1-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT:    addi t0, zero, 56
+; LMULMAX1-RV64-NEXT:    li t0, 56
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v10, v9, t0
-; LMULMAX1-RV64-NEXT:    addi t1, zero, 40
+; LMULMAX1-RV64-NEXT:    li t1, 40
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v11, v9, t1
 ; LMULMAX1-RV64-NEXT:    lui a1, 16
 ; LMULMAX1-RV64-NEXT:    addiw t2, a1, -256
@@ -1333,7 +1333,7 @@ define void @bitreverse_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    lui a6, 4080
 ; LMULMAX1-RV64-NEXT:    vand.vx v11, v11, a6
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v12, v9, 8
-; LMULMAX1-RV64-NEXT:    addi a3, zero, 255
+; LMULMAX1-RV64-NEXT:    li a3, 255
 ; LMULMAX1-RV64-NEXT:    slli t3, a3, 24
 ; LMULMAX1-RV64-NEXT:    vand.vx v12, v12, t3
 ; LMULMAX1-RV64-NEXT:    vor.vv v11, v12, v11

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
index 7a12c7817bfe..9441accb0631 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
@@ -141,9 +141,9 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV32:       # %bb.0:
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV32-NEXT:    li a1, 56
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v9, v8, a1
-; LMULMAX2-RV32-NEXT:    addi a2, zero, 40
+; LMULMAX2-RV32-NEXT:    li a2, 40
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v10, v8, a2
 ; LMULMAX2-RV32-NEXT:    lui a3, 16
 ; LMULMAX2-RV32-NEXT:    addi a3, a3, -256
@@ -152,7 +152,7 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v10, v8, 24
 ; LMULMAX2-RV32-NEXT:    lui a4, 4080
 ; LMULMAX2-RV32-NEXT:    vand.vx v10, v10, a4
-; LMULMAX2-RV32-NEXT:    addi a5, zero, 5
+; LMULMAX2-RV32-NEXT:    li a5, 5
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a5
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -164,7 +164,7 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV32-NEXT:    vand.vv v11, v12, v11
 ; LMULMAX2-RV32-NEXT:    vor.vv v10, v11, v10
 ; LMULMAX2-RV32-NEXT:    vor.vv v9, v10, v9
-; LMULMAX2-RV32-NEXT:    addi a5, zero, 255
+; LMULMAX2-RV32-NEXT:    li a5, 255
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.v.x v10, a5
 ; LMULMAX2-RV32-NEXT:    vmerge.vim v10, v10, 0, v0
@@ -195,9 +195,9 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV64:       # %bb.0:
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV64-NEXT:    li a1, 56
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v9, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a2, zero, 40
+; LMULMAX2-RV64-NEXT:    li a2, 40
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v10, v8, a2
 ; LMULMAX2-RV64-NEXT:    lui a3, 16
 ; LMULMAX2-RV64-NEXT:    addiw a3, a3, -256
@@ -207,7 +207,7 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV64-NEXT:    lui a3, 4080
 ; LMULMAX2-RV64-NEXT:    vand.vx v10, v10, a3
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v11, v8, 8
-; LMULMAX2-RV64-NEXT:    addi a3, zero, 255
+; LMULMAX2-RV64-NEXT:    li a3, 255
 ; LMULMAX2-RV64-NEXT:    slli a4, a3, 24
 ; LMULMAX2-RV64-NEXT:    vand.vx v11, v11, a4
 ; LMULMAX2-RV64-NEXT:    vor.vv v10, v11, v10
@@ -233,9 +233,9 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV32:       # %bb.0:
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX1-RV32-NEXT:    li a1, 56
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v9, v8, a1
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 40
+; LMULMAX1-RV32-NEXT:    li a2, 40
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v10, v8, a2
 ; LMULMAX1-RV32-NEXT:    lui a3, 16
 ; LMULMAX1-RV32-NEXT:    addi a3, a3, -256
@@ -244,7 +244,7 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v8, 24
 ; LMULMAX1-RV32-NEXT:    lui a4, 4080
 ; LMULMAX1-RV32-NEXT:    vand.vx v10, v10, a4
-; LMULMAX1-RV32-NEXT:    addi a5, zero, 5
+; LMULMAX1-RV32-NEXT:    li a5, 5
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.s.x v0, a5
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -256,7 +256,7 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV32-NEXT:    vand.vv v11, v12, v11
 ; LMULMAX1-RV32-NEXT:    vor.vv v10, v11, v10
 ; LMULMAX1-RV32-NEXT:    vor.vv v9, v10, v9
-; LMULMAX1-RV32-NEXT:    addi a5, zero, 255
+; LMULMAX1-RV32-NEXT:    li a5, 255
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.v.x v10, a5
 ; LMULMAX1-RV32-NEXT:    vmerge.vim v10, v10, 0, v0
@@ -287,9 +287,9 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV64:       # %bb.0:
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX1-RV64-NEXT:    li a1, 56
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v9, v8, a1
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 40
+; LMULMAX1-RV64-NEXT:    li a2, 40
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v10, v8, a2
 ; LMULMAX1-RV64-NEXT:    lui a3, 16
 ; LMULMAX1-RV64-NEXT:    addiw a3, a3, -256
@@ -299,7 +299,7 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    lui a3, 4080
 ; LMULMAX1-RV64-NEXT:    vand.vx v10, v10, a3
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v11, v8, 8
-; LMULMAX1-RV64-NEXT:    addi a3, zero, 255
+; LMULMAX1-RV64-NEXT:    li a3, 255
 ; LMULMAX1-RV64-NEXT:    slli a4, a3, 24
 ; LMULMAX1-RV64-NEXT:    vand.vx v11, v11, a4
 ; LMULMAX1-RV64-NEXT:    vor.vv v10, v11, v10
@@ -501,9 +501,9 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-RV32:       # %bb.0:
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV32-NEXT:    li a1, 56
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v10, v8, a1
-; LMULMAX2-RV32-NEXT:    addi a2, zero, 40
+; LMULMAX2-RV32-NEXT:    li a2, 40
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v12, v8, a2
 ; LMULMAX2-RV32-NEXT:    lui a3, 16
 ; LMULMAX2-RV32-NEXT:    addi a3, a3, -256
@@ -512,7 +512,7 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v12, v8, 24
 ; LMULMAX2-RV32-NEXT:    lui a4, 4080
 ; LMULMAX2-RV32-NEXT:    vand.vx v12, v12, a4
-; LMULMAX2-RV32-NEXT:    addi a5, zero, 85
+; LMULMAX2-RV32-NEXT:    li a5, 85
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a5
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
@@ -524,7 +524,7 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-RV32-NEXT:    vand.vv v14, v16, v14
 ; LMULMAX2-RV32-NEXT:    vor.vv v12, v14, v12
 ; LMULMAX2-RV32-NEXT:    vor.vv v10, v12, v10
-; LMULMAX2-RV32-NEXT:    addi a5, zero, 255
+; LMULMAX2-RV32-NEXT:    li a5, 255
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.v.x v12, a5
 ; LMULMAX2-RV32-NEXT:    vmerge.vim v12, v12, 0, v0
@@ -555,9 +555,9 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-RV64:       # %bb.0:
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV64-NEXT:    li a1, 56
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v10, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a2, zero, 40
+; LMULMAX2-RV64-NEXT:    li a2, 40
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v12, v8, a2
 ; LMULMAX2-RV64-NEXT:    lui a3, 16
 ; LMULMAX2-RV64-NEXT:    addiw a3, a3, -256
@@ -567,7 +567,7 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-RV64-NEXT:    lui a3, 4080
 ; LMULMAX2-RV64-NEXT:    vand.vx v12, v12, a3
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v14, v8, 8
-; LMULMAX2-RV64-NEXT:    addi a3, zero, 255
+; LMULMAX2-RV64-NEXT:    li a3, 255
 ; LMULMAX2-RV64-NEXT:    slli a4, a3, 24
 ; LMULMAX2-RV64-NEXT:    vand.vx v14, v14, a4
 ; LMULMAX2-RV64-NEXT:    vor.vv v12, v14, v12
@@ -595,9 +595,9 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v9, (a1)
 ; LMULMAX1-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT:    addi a6, zero, 56
+; LMULMAX1-RV32-NEXT:    li a6, 56
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v10, v9, a6
-; LMULMAX1-RV32-NEXT:    addi a3, zero, 40
+; LMULMAX1-RV32-NEXT:    li a3, 40
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v11, v9, a3
 ; LMULMAX1-RV32-NEXT:    lui a4, 16
 ; LMULMAX1-RV32-NEXT:    addi a4, a4, -256
@@ -606,7 +606,7 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v11, v9, 24
 ; LMULMAX1-RV32-NEXT:    lui a5, 4080
 ; LMULMAX1-RV32-NEXT:    vand.vx v11, v11, a5
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 5
+; LMULMAX1-RV32-NEXT:    li a2, 5
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.s.x v0, a2
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -618,7 +618,7 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV32-NEXT:    vand.vv v13, v13, v12
 ; LMULMAX1-RV32-NEXT:    vor.vv v11, v13, v11
 ; LMULMAX1-RV32-NEXT:    vor.vv v10, v11, v10
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 255
+; LMULMAX1-RV32-NEXT:    li a2, 255
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.v.x v11, a2
 ; LMULMAX1-RV32-NEXT:    vmerge.vim v11, v11, 0, v0
@@ -673,9 +673,9 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    addi t1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v8, (t1)
 ; LMULMAX1-RV64-NEXT:    vle64.v v9, (a0)
-; LMULMAX1-RV64-NEXT:    addi a7, zero, 56
+; LMULMAX1-RV64-NEXT:    li a7, 56
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v10, v8, a7
-; LMULMAX1-RV64-NEXT:    addi t0, zero, 40
+; LMULMAX1-RV64-NEXT:    li t0, 40
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v11, v8, t0
 ; LMULMAX1-RV64-NEXT:    lui a4, 16
 ; LMULMAX1-RV64-NEXT:    addiw a4, a4, -256
@@ -685,7 +685,7 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    lui a6, 4080
 ; LMULMAX1-RV64-NEXT:    vand.vx v11, v11, a6
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v12, v8, 8
-; LMULMAX1-RV64-NEXT:    addi a5, zero, 255
+; LMULMAX1-RV64-NEXT:    li a5, 255
 ; LMULMAX1-RV64-NEXT:    slli a2, a5, 24
 ; LMULMAX1-RV64-NEXT:    vand.vx v12, v12, a2
 ; LMULMAX1-RV64-NEXT:    vor.vv v11, v12, v11

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
index 5efe1c493697..8d0f0a3a03af 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
@@ -63,7 +63,7 @@ define fastcc <8 x i1> @ret_mask_v8i1(<8 x i1>* %p) {
 define fastcc <32 x i1> @ret_mask_v32i1(<32 x i1>* %p) {
 ; CHECK-LABEL: ret_mask_v32i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vlm.v v0, (a0)
 ; CHECK-NEXT:    ret
@@ -75,7 +75,7 @@ define fastcc <32 x i1> @ret_mask_v32i1(<32 x i1>* %p) {
 define fastcc <64 x i32> @ret_split_v64i32(<64 x i32>* %x) {
 ; LMULMAX8-LABEL: ret_split_v64i32:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a1, zero, 32
+; LMULMAX8-NEXT:    li a1, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v8, (a0)
 ; LMULMAX8-NEXT:    addi a0, a0, 128
@@ -101,7 +101,7 @@ define fastcc <64 x i32> @ret_split_v64i32(<64 x i32>* %x) {
 define fastcc <128 x i32> @ret_split_v128i32(<128 x i32>* %x) {
 ; LMULMAX8-LABEL: ret_split_v128i32:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a2, zero, 32
+; LMULMAX8-NEXT:    li a2, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v8, (a1)
 ; LMULMAX8-NEXT:    addi a2, a1, 128
@@ -200,7 +200,7 @@ define fastcc <8 x i1> @ret_v8i1_param_v8i1_v8i1(<8 x i1> %v, <8 x i1> %w) {
 define fastcc <32 x i1> @ret_v32i1_param_v32i1_v32i1(<32 x i1> %v, <32 x i1> %w) {
 ; CHECK-LABEL: ret_v32i1_param_v32i1_v32i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vmand.mm v0, v0, v8
 ; CHECK-NEXT:    ret
@@ -211,7 +211,7 @@ define fastcc <32 x i1> @ret_v32i1_param_v32i1_v32i1(<32 x i1> %v, <32 x i1> %w)
 define fastcc <32 x i32> @ret_v32i32_param_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) {
 ; LMULMAX8-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a2, zero, 32
+; LMULMAX8-NEXT:    li a2, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v24, (a0)
 ; LMULMAX8-NEXT:    vadd.vv v8, v8, v16
@@ -251,7 +251,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX8-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; LMULMAX8-NEXT:    .cfi_offset ra, -8
 ; LMULMAX8-NEXT:    vmv8r.v v24, v8
-; LMULMAX8-NEXT:    addi a1, zero, 2
+; LMULMAX8-NEXT:    li a1, 2
 ; LMULMAX8-NEXT:    vmv8r.v v8, v16
 ; LMULMAX8-NEXT:    vmv8r.v v16, v24
 ; LMULMAX8-NEXT:    call ext2 at plt
@@ -267,7 +267,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX4-NEXT:    .cfi_offset ra, -8
 ; LMULMAX4-NEXT:    vmv4r.v v24, v12
 ; LMULMAX4-NEXT:    vmv4r.v v28, v8
-; LMULMAX4-NEXT:    addi a1, zero, 2
+; LMULMAX4-NEXT:    li a1, 2
 ; LMULMAX4-NEXT:    vmv4r.v v8, v16
 ; LMULMAX4-NEXT:    vmv4r.v v12, v20
 ; LMULMAX4-NEXT:    vmv4r.v v16, v28
@@ -292,11 +292,11 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x
 ; LMULMAX8-NEXT:    addi s0, sp, 384
 ; LMULMAX8-NEXT:    .cfi_def_cfa s0, 0
 ; LMULMAX8-NEXT:    andi sp, sp, -128
-; LMULMAX8-NEXT:    addi a2, zero, 32
+; LMULMAX8-NEXT:    li a2, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v24, (a0)
 ; LMULMAX8-NEXT:    addi a0, sp, 128
-; LMULMAX8-NEXT:    addi a2, zero, 42
+; LMULMAX8-NEXT:    li a2, 42
 ; LMULMAX8-NEXT:    addi a3, sp, 128
 ; LMULMAX8-NEXT:    vse32.v v8, (a3)
 ; LMULMAX8-NEXT:    vmv.v.v v8, v24
@@ -325,7 +325,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x
 ; LMULMAX4-NEXT:    addi a0, sp, 192
 ; LMULMAX4-NEXT:    vse32.v v12, (a0)
 ; LMULMAX4-NEXT:    addi a0, sp, 128
-; LMULMAX4-NEXT:    addi a3, zero, 42
+; LMULMAX4-NEXT:    li a3, 42
 ; LMULMAX4-NEXT:    addi a1, sp, 128
 ; LMULMAX4-NEXT:    vse32.v v8, (a1)
 ; LMULMAX4-NEXT:    vmv.v.v v8, v24
@@ -346,7 +346,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x
 define fastcc <32 x i32> @vector_arg_indirect_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %8) {
 ; LMULMAX8-LABEL: vector_arg_indirect_stack:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a0, zero, 32
+; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v16, (t2)
 ; LMULMAX8-NEXT:    vadd.vv v8, v8, v16
@@ -378,21 +378,21 @@ define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i3
 ; LMULMAX8-NEXT:    addi s0, sp, 384
 ; LMULMAX8-NEXT:    .cfi_def_cfa s0, 0
 ; LMULMAX8-NEXT:    andi sp, sp, -128
-; LMULMAX8-NEXT:    addi a0, zero, 32
+; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vmv.v.i v8, 0
-; LMULMAX8-NEXT:    addi a1, zero, 1
-; LMULMAX8-NEXT:    addi a2, zero, 2
-; LMULMAX8-NEXT:    addi a3, zero, 3
-; LMULMAX8-NEXT:    addi a4, zero, 4
-; LMULMAX8-NEXT:    addi a5, zero, 5
-; LMULMAX8-NEXT:    addi a6, zero, 6
-; LMULMAX8-NEXT:    addi a7, zero, 7
+; LMULMAX8-NEXT:    li a1, 1
+; LMULMAX8-NEXT:    li a2, 2
+; LMULMAX8-NEXT:    li a3, 3
+; LMULMAX8-NEXT:    li a4, 4
+; LMULMAX8-NEXT:    li a5, 5
+; LMULMAX8-NEXT:    li a6, 6
+; LMULMAX8-NEXT:    li a7, 7
 ; LMULMAX8-NEXT:    addi t2, sp, 128
-; LMULMAX8-NEXT:    addi t3, zero, 8
+; LMULMAX8-NEXT:    li t3, 8
 ; LMULMAX8-NEXT:    addi a0, sp, 128
 ; LMULMAX8-NEXT:    vse32.v v8, (a0)
-; LMULMAX8-NEXT:    mv a0, zero
+; LMULMAX8-NEXT:    li a0, 0
 ; LMULMAX8-NEXT:    vmv.v.i v16, 0
 ; LMULMAX8-NEXT:    call vector_arg_indirect_stack at plt
 ; LMULMAX8-NEXT:    addi sp, s0, -384
@@ -416,18 +416,18 @@ define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i3
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; LMULMAX4-NEXT:    vmv.v.i v8, 0
 ; LMULMAX4-NEXT:    vse32.v v8, (a0)
-; LMULMAX4-NEXT:    addi a1, zero, 1
-; LMULMAX4-NEXT:    addi a2, zero, 2
-; LMULMAX4-NEXT:    addi a3, zero, 3
-; LMULMAX4-NEXT:    addi a4, zero, 4
-; LMULMAX4-NEXT:    addi a5, zero, 5
-; LMULMAX4-NEXT:    addi a6, zero, 6
-; LMULMAX4-NEXT:    addi a7, zero, 7
+; LMULMAX4-NEXT:    li a1, 1
+; LMULMAX4-NEXT:    li a2, 2
+; LMULMAX4-NEXT:    li a3, 3
+; LMULMAX4-NEXT:    li a4, 4
+; LMULMAX4-NEXT:    li a5, 5
+; LMULMAX4-NEXT:    li a6, 6
+; LMULMAX4-NEXT:    li a7, 7
 ; LMULMAX4-NEXT:    addi t2, sp, 128
-; LMULMAX4-NEXT:    addi t4, zero, 8
+; LMULMAX4-NEXT:    li t4, 8
 ; LMULMAX4-NEXT:    addi a0, sp, 128
 ; LMULMAX4-NEXT:    vse32.v v8, (a0)
-; LMULMAX4-NEXT:    mv a0, zero
+; LMULMAX4-NEXT:    li a0, 0
 ; LMULMAX4-NEXT:    vmv.v.i v12, 0
 ; LMULMAX4-NEXT:    vmv.v.i v16, 0
 ; LMULMAX4-NEXT:    vmv.v.i v20, 0
@@ -447,7 +447,7 @@ define fastcc <32 x i32> @vector_arg_direct_stack(i32 %0, i32 %1, i32 %2, i32 %3
 ; LMULMAX8:       # %bb.0:
 ; LMULMAX8-NEXT:    addi sp, sp, -16
 ; LMULMAX8-NEXT:    .cfi_def_cfa_offset 16
-; LMULMAX8-NEXT:    addi a0, zero, 32
+; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    addi a0, sp, 24
 ; LMULMAX8-NEXT:    vle32.v v24, (a0)
@@ -484,28 +484,28 @@ define fastcc <32 x i32> @pass_vector_arg_direct_stack(<32 x i32> %x, <32 x i32>
 ; LMULMAX8-NEXT:    .cfi_def_cfa_offset 160
 ; LMULMAX8-NEXT:    sd ra, 152(sp) # 8-byte Folded Spill
 ; LMULMAX8-NEXT:    .cfi_offset ra, -8
-; LMULMAX8-NEXT:    addi a0, zero, 32
+; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vmv.v.i v8, 0
 ; LMULMAX8-NEXT:    addi a0, sp, 8
 ; LMULMAX8-NEXT:    vse32.v v8, (a0)
-; LMULMAX8-NEXT:    addi a0, zero, 1
+; LMULMAX8-NEXT:    li a0, 1
 ; LMULMAX8-NEXT:    sd a0, 136(sp)
-; LMULMAX8-NEXT:    addi a0, zero, 13
-; LMULMAX8-NEXT:    addi a1, zero, 1
-; LMULMAX8-NEXT:    addi a2, zero, 2
-; LMULMAX8-NEXT:    addi a3, zero, 3
-; LMULMAX8-NEXT:    addi a4, zero, 4
-; LMULMAX8-NEXT:    addi a5, zero, 5
-; LMULMAX8-NEXT:    addi a6, zero, 6
-; LMULMAX8-NEXT:    addi a7, zero, 7
-; LMULMAX8-NEXT:    addi t2, zero, 8
-; LMULMAX8-NEXT:    addi t3, zero, 9
-; LMULMAX8-NEXT:    addi t4, zero, 10
-; LMULMAX8-NEXT:    addi t5, zero, 11
-; LMULMAX8-NEXT:    addi t6, zero, 12
+; LMULMAX8-NEXT:    li a0, 13
+; LMULMAX8-NEXT:    li a1, 1
+; LMULMAX8-NEXT:    li a2, 2
+; LMULMAX8-NEXT:    li a3, 3
+; LMULMAX8-NEXT:    li a4, 4
+; LMULMAX8-NEXT:    li a5, 5
+; LMULMAX8-NEXT:    li a6, 6
+; LMULMAX8-NEXT:    li a7, 7
+; LMULMAX8-NEXT:    li t2, 8
+; LMULMAX8-NEXT:    li t3, 9
+; LMULMAX8-NEXT:    li t4, 10
+; LMULMAX8-NEXT:    li t5, 11
+; LMULMAX8-NEXT:    li t6, 12
 ; LMULMAX8-NEXT:    sd a0, 0(sp)
-; LMULMAX8-NEXT:    mv a0, zero
+; LMULMAX8-NEXT:    li a0, 0
 ; LMULMAX8-NEXT:    vmv.v.i v16, 0
 ; LMULMAX8-NEXT:    call vector_arg_direct_stack at plt
 ; LMULMAX8-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
@@ -518,29 +518,29 @@ define fastcc <32 x i32> @pass_vector_arg_direct_stack(<32 x i32> %x, <32 x i32>
 ; LMULMAX4-NEXT:    .cfi_def_cfa_offset 160
 ; LMULMAX4-NEXT:    sd ra, 152(sp) # 8-byte Folded Spill
 ; LMULMAX4-NEXT:    .cfi_offset ra, -8
-; LMULMAX4-NEXT:    addi a0, zero, 1
+; LMULMAX4-NEXT:    li a0, 1
 ; LMULMAX4-NEXT:    sd a0, 136(sp)
-; LMULMAX4-NEXT:    addi a0, zero, 13
+; LMULMAX4-NEXT:    li a0, 13
 ; LMULMAX4-NEXT:    sd a0, 0(sp)
 ; LMULMAX4-NEXT:    addi a0, sp, 72
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; LMULMAX4-NEXT:    vmv.v.i v8, 0
 ; LMULMAX4-NEXT:    vse32.v v8, (a0)
 ; LMULMAX4-NEXT:    addi a0, sp, 8
-; LMULMAX4-NEXT:    addi a1, zero, 1
-; LMULMAX4-NEXT:    addi a2, zero, 2
-; LMULMAX4-NEXT:    addi a3, zero, 3
-; LMULMAX4-NEXT:    addi a4, zero, 4
-; LMULMAX4-NEXT:    addi a5, zero, 5
-; LMULMAX4-NEXT:    addi a6, zero, 6
-; LMULMAX4-NEXT:    addi a7, zero, 7
-; LMULMAX4-NEXT:    addi t2, zero, 8
-; LMULMAX4-NEXT:    addi t3, zero, 9
-; LMULMAX4-NEXT:    addi t4, zero, 10
-; LMULMAX4-NEXT:    addi t5, zero, 11
-; LMULMAX4-NEXT:    addi t6, zero, 12
+; LMULMAX4-NEXT:    li a1, 1
+; LMULMAX4-NEXT:    li a2, 2
+; LMULMAX4-NEXT:    li a3, 3
+; LMULMAX4-NEXT:    li a4, 4
+; LMULMAX4-NEXT:    li a5, 5
+; LMULMAX4-NEXT:    li a6, 6
+; LMULMAX4-NEXT:    li a7, 7
+; LMULMAX4-NEXT:    li t2, 8
+; LMULMAX4-NEXT:    li t3, 9
+; LMULMAX4-NEXT:    li t4, 10
+; LMULMAX4-NEXT:    li t5, 11
+; LMULMAX4-NEXT:    li t6, 12
 ; LMULMAX4-NEXT:    vse32.v v8, (a0)
-; LMULMAX4-NEXT:    mv a0, zero
+; LMULMAX4-NEXT:    li a0, 0
 ; LMULMAX4-NEXT:    vmv.v.i v12, 0
 ; LMULMAX4-NEXT:    vmv.v.i v16, 0
 ; LMULMAX4-NEXT:    vmv.v.i v20, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
index 6d36eb3f5fc5..8ad2ce989b3b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
@@ -117,21 +117,21 @@ define <8 x i1> @ret_mask_v8i1(<8 x i1>* %p) {
 define <32 x i1> @ret_mask_v32i1(<32 x i1>* %p) {
 ; LMULMAX8-LABEL: ret_mask_v32i1:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a1, zero, 32
+; LMULMAX8-NEXT:    li a1, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vlm.v v0, (a0)
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX4-LABEL: ret_mask_v32i1:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    addi a1, zero, 32
+; LMULMAX4-NEXT:    li a1, 32
 ; LMULMAX4-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX4-NEXT:    vlm.v v0, (a0)
 ; LMULMAX4-NEXT:    ret
 ;
 ; LMULMAX2-LABEL: ret_mask_v32i1:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 32
+; LMULMAX2-NEXT:    li a1, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vlm.v v0, (a0)
 ; LMULMAX2-NEXT:    ret
@@ -151,7 +151,7 @@ define <32 x i1> @ret_mask_v32i1(<32 x i1>* %p) {
 define <64 x i32> @ret_split_v64i32(<64 x i32>* %x) {
 ; LMULMAX8-LABEL: ret_split_v64i32:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a1, zero, 32
+; LMULMAX8-NEXT:    li a1, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v8, (a0)
 ; LMULMAX8-NEXT:    addi a0, a0, 128
@@ -233,7 +233,7 @@ define <64 x i32> @ret_split_v64i32(<64 x i32>* %x) {
 define <128 x i32> @ret_split_v128i32(<128 x i32>* %x) {
 ; LMULMAX8-LABEL: ret_split_v128i32:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a2, zero, 32
+; LMULMAX8-NEXT:    li a2, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v8, (a1)
 ; LMULMAX8-NEXT:    addi a2, a1, 128
@@ -549,21 +549,21 @@ define <8 x i1> @ret_v8i1_param_v8i1_v8i1(<8 x i1> %v, <8 x i1> %w) {
 define <32 x i1> @ret_v32i1_param_v32i1_v32i1(<32 x i1> %v, <32 x i1> %w) {
 ; LMULMAX8-LABEL: ret_v32i1_param_v32i1_v32i1:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a0, zero, 32
+; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vmand.mm v0, v0, v8
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX4-LABEL: ret_v32i1_param_v32i1_v32i1:
 ; LMULMAX4:       # %bb.0:
-; LMULMAX4-NEXT:    addi a0, zero, 32
+; LMULMAX4-NEXT:    li a0, 32
 ; LMULMAX4-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; LMULMAX4-NEXT:    vmand.mm v0, v0, v8
 ; LMULMAX4-NEXT:    ret
 ;
 ; LMULMAX2-LABEL: ret_v32i1_param_v32i1_v32i1:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a0, zero, 32
+; LMULMAX2-NEXT:    li a0, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vmand.mm v0, v0, v8
 ; LMULMAX2-NEXT:    ret
@@ -581,7 +581,7 @@ define <32 x i1> @ret_v32i1_param_v32i1_v32i1(<32 x i1> %v, <32 x i1> %w) {
 define <32 x i32> @ret_v32i32_param_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) {
 ; LMULMAX8-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a2, zero, 32
+; LMULMAX8-NEXT:    li a2, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v24, (a0)
 ; LMULMAX8-NEXT:    vadd.vv v8, v8, v16
@@ -690,7 +690,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
 ; LMULMAX8-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; LMULMAX8-NEXT:    .cfi_offset ra, -8
 ; LMULMAX8-NEXT:    vmv8r.v v24, v8
-; LMULMAX8-NEXT:    addi a1, zero, 2
+; LMULMAX8-NEXT:    li a1, 2
 ; LMULMAX8-NEXT:    vmv8r.v v8, v16
 ; LMULMAX8-NEXT:    vmv8r.v v16, v24
 ; LMULMAX8-NEXT:    call ext2 at plt
@@ -706,7 +706,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
 ; LMULMAX4-NEXT:    .cfi_offset ra, -8
 ; LMULMAX4-NEXT:    vmv4r.v v24, v12
 ; LMULMAX4-NEXT:    vmv4r.v v28, v8
-; LMULMAX4-NEXT:    addi a1, zero, 2
+; LMULMAX4-NEXT:    li a1, 2
 ; LMULMAX4-NEXT:    vmv4r.v v8, v16
 ; LMULMAX4-NEXT:    vmv4r.v v12, v20
 ; LMULMAX4-NEXT:    vmv4r.v v16, v28
@@ -726,7 +726,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
 ; LMULMAX2-NEXT:    vmv2r.v v26, v12
 ; LMULMAX2-NEXT:    vmv2r.v v28, v10
 ; LMULMAX2-NEXT:    vmv2r.v v30, v8
-; LMULMAX2-NEXT:    addi a1, zero, 2
+; LMULMAX2-NEXT:    li a1, 2
 ; LMULMAX2-NEXT:    vmv2r.v v8, v16
 ; LMULMAX2-NEXT:    vmv2r.v v10, v18
 ; LMULMAX2-NEXT:    vmv2r.v v12, v20
@@ -754,7 +754,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
 ; LMULMAX1-NEXT:    vmv1r.v v29, v10
 ; LMULMAX1-NEXT:    vmv1r.v v30, v9
 ; LMULMAX1-NEXT:    vmv1r.v v31, v8
-; LMULMAX1-NEXT:    addi a1, zero, 2
+; LMULMAX1-NEXT:    li a1, 2
 ; LMULMAX1-NEXT:    vmv1r.v v8, v16
 ; LMULMAX1-NEXT:    vmv1r.v v9, v17
 ; LMULMAX1-NEXT:    vmv1r.v v10, v18
@@ -791,11 +791,11 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX8-NEXT:    addi s0, sp, 384
 ; LMULMAX8-NEXT:    .cfi_def_cfa s0, 0
 ; LMULMAX8-NEXT:    andi sp, sp, -128
-; LMULMAX8-NEXT:    addi a2, zero, 32
+; LMULMAX8-NEXT:    li a2, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v24, (a0)
 ; LMULMAX8-NEXT:    addi a0, sp, 128
-; LMULMAX8-NEXT:    addi a2, zero, 42
+; LMULMAX8-NEXT:    li a2, 42
 ; LMULMAX8-NEXT:    addi a3, sp, 128
 ; LMULMAX8-NEXT:    vse32.v v8, (a3)
 ; LMULMAX8-NEXT:    vmv.v.v v8, v24
@@ -824,7 +824,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX4-NEXT:    addi a0, sp, 192
 ; LMULMAX4-NEXT:    vse32.v v12, (a0)
 ; LMULMAX4-NEXT:    addi a0, sp, 128
-; LMULMAX4-NEXT:    addi a3, zero, 42
+; LMULMAX4-NEXT:    li a3, 42
 ; LMULMAX4-NEXT:    addi a1, sp, 128
 ; LMULMAX4-NEXT:    vse32.v v8, (a1)
 ; LMULMAX4-NEXT:    vmv.v.v v8, v24
@@ -862,7 +862,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX2-NEXT:    addi a0, sp, 160
 ; LMULMAX2-NEXT:    vse32.v v10, (a0)
 ; LMULMAX2-NEXT:    addi a0, sp, 128
-; LMULMAX2-NEXT:    addi a5, zero, 42
+; LMULMAX2-NEXT:    li a5, 42
 ; LMULMAX2-NEXT:    addi a1, sp, 128
 ; LMULMAX2-NEXT:    vse32.v v8, (a1)
 ; LMULMAX2-NEXT:    vmv.v.v v8, v24
@@ -918,7 +918,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 ; LMULMAX1-NEXT:    vse32.v v10, (a1)
 ; LMULMAX1-NEXT:    addi a1, sp, 144
 ; LMULMAX1-NEXT:    vse32.v v9, (a1)
-; LMULMAX1-NEXT:    addi a1, zero, 42
+; LMULMAX1-NEXT:    li a1, 42
 ; LMULMAX1-NEXT:    sd a1, 8(sp)
 ; LMULMAX1-NEXT:    sd a0, 0(sp)
 ; LMULMAX1-NEXT:    addi a0, sp, 128
@@ -958,7 +958,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
 define <32 x i32> @split_vector_args(<2 x i32>,<2 x i32>,<2 x i32>,<2 x i32>,<2 x i32>, <32 x i32> %y, <32 x i32> %z) {
 ; LMULMAX8-LABEL: split_vector_args:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a1, zero, 32
+; LMULMAX8-NEXT:    li a1, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v8, (a0)
 ; LMULMAX8-NEXT:    vadd.vv v8, v16, v8
@@ -1027,7 +1027,7 @@ define <32 x i32> @call_split_vector_args(<2 x i32>* %pa, <32 x i32>* %pb) {
 ; LMULMAX8-NEXT:    andi sp, sp, -128
 ; LMULMAX8-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v8, (a0)
-; LMULMAX8-NEXT:    addi a0, zero, 32
+; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vle32.v v16, (a1)
 ; LMULMAX8-NEXT:    addi a0, sp, 128
@@ -1183,7 +1183,7 @@ define <32 x i32> @vector_arg_via_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4,
 ; LMULMAX8:       # %bb.0:
 ; LMULMAX8-NEXT:    addi sp, sp, -16
 ; LMULMAX8-NEXT:    .cfi_def_cfa_offset 16
-; LMULMAX8-NEXT:    addi a0, zero, 32
+; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    addi a0, sp, 16
 ; LMULMAX8-NEXT:    vle32.v v16, (a0)
@@ -1268,20 +1268,20 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
 ; LMULMAX8-NEXT:    .cfi_def_cfa_offset 144
 ; LMULMAX8-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
 ; LMULMAX8-NEXT:    .cfi_offset ra, -8
-; LMULMAX8-NEXT:    addi a0, zero, 32
+; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vmv.v.i v8, 0
 ; LMULMAX8-NEXT:    vse32.v v8, (sp)
-; LMULMAX8-NEXT:    addi a0, zero, 8
-; LMULMAX8-NEXT:    addi a1, zero, 1
-; LMULMAX8-NEXT:    addi a2, zero, 2
-; LMULMAX8-NEXT:    addi a3, zero, 3
-; LMULMAX8-NEXT:    addi a4, zero, 4
-; LMULMAX8-NEXT:    addi a5, zero, 5
-; LMULMAX8-NEXT:    addi a6, zero, 6
-; LMULMAX8-NEXT:    addi a7, zero, 7
+; LMULMAX8-NEXT:    li a0, 8
+; LMULMAX8-NEXT:    li a1, 1
+; LMULMAX8-NEXT:    li a2, 2
+; LMULMAX8-NEXT:    li a3, 3
+; LMULMAX8-NEXT:    li a4, 4
+; LMULMAX8-NEXT:    li a5, 5
+; LMULMAX8-NEXT:    li a6, 6
+; LMULMAX8-NEXT:    li a7, 7
 ; LMULMAX8-NEXT:    sd a0, 128(sp)
-; LMULMAX8-NEXT:    mv a0, zero
+; LMULMAX8-NEXT:    li a0, 0
 ; LMULMAX8-NEXT:    vmv.v.i v16, 0
 ; LMULMAX8-NEXT:    call vector_arg_via_stack at plt
 ; LMULMAX8-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
@@ -1294,21 +1294,21 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
 ; LMULMAX4-NEXT:    .cfi_def_cfa_offset 144
 ; LMULMAX4-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
 ; LMULMAX4-NEXT:    .cfi_offset ra, -8
-; LMULMAX4-NEXT:    addi a0, zero, 8
+; LMULMAX4-NEXT:    li a0, 8
 ; LMULMAX4-NEXT:    sd a0, 128(sp)
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; LMULMAX4-NEXT:    vmv.v.i v8, 0
 ; LMULMAX4-NEXT:    vse32.v v8, (sp)
 ; LMULMAX4-NEXT:    addi a0, sp, 64
-; LMULMAX4-NEXT:    addi a1, zero, 1
-; LMULMAX4-NEXT:    addi a2, zero, 2
-; LMULMAX4-NEXT:    addi a3, zero, 3
-; LMULMAX4-NEXT:    addi a4, zero, 4
-; LMULMAX4-NEXT:    addi a5, zero, 5
-; LMULMAX4-NEXT:    addi a6, zero, 6
-; LMULMAX4-NEXT:    addi a7, zero, 7
+; LMULMAX4-NEXT:    li a1, 1
+; LMULMAX4-NEXT:    li a2, 2
+; LMULMAX4-NEXT:    li a3, 3
+; LMULMAX4-NEXT:    li a4, 4
+; LMULMAX4-NEXT:    li a5, 5
+; LMULMAX4-NEXT:    li a6, 6
+; LMULMAX4-NEXT:    li a7, 7
 ; LMULMAX4-NEXT:    vse32.v v8, (a0)
-; LMULMAX4-NEXT:    mv a0, zero
+; LMULMAX4-NEXT:    li a0, 0
 ; LMULMAX4-NEXT:    vmv.v.i v12, 0
 ; LMULMAX4-NEXT:    vmv.v.i v16, 0
 ; LMULMAX4-NEXT:    vmv.v.i v20, 0
@@ -1323,7 +1323,7 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
 ; LMULMAX2-NEXT:    .cfi_def_cfa_offset 144
 ; LMULMAX2-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
 ; LMULMAX2-NEXT:    .cfi_offset ra, -8
-; LMULMAX2-NEXT:    addi a0, zero, 8
+; LMULMAX2-NEXT:    li a0, 8
 ; LMULMAX2-NEXT:    sd a0, 128(sp)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vmv.v.i v8, 0
@@ -1333,15 +1333,15 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
 ; LMULMAX2-NEXT:    addi a0, sp, 64
 ; LMULMAX2-NEXT:    vse32.v v8, (a0)
 ; LMULMAX2-NEXT:    addi a0, sp, 32
-; LMULMAX2-NEXT:    addi a1, zero, 1
-; LMULMAX2-NEXT:    addi a2, zero, 2
-; LMULMAX2-NEXT:    addi a3, zero, 3
-; LMULMAX2-NEXT:    addi a4, zero, 4
-; LMULMAX2-NEXT:    addi a5, zero, 5
-; LMULMAX2-NEXT:    addi a6, zero, 6
-; LMULMAX2-NEXT:    addi a7, zero, 7
+; LMULMAX2-NEXT:    li a1, 1
+; LMULMAX2-NEXT:    li a2, 2
+; LMULMAX2-NEXT:    li a3, 3
+; LMULMAX2-NEXT:    li a4, 4
+; LMULMAX2-NEXT:    li a5, 5
+; LMULMAX2-NEXT:    li a6, 6
+; LMULMAX2-NEXT:    li a7, 7
 ; LMULMAX2-NEXT:    vse32.v v8, (a0)
-; LMULMAX2-NEXT:    mv a0, zero
+; LMULMAX2-NEXT:    li a0, 0
 ; LMULMAX2-NEXT:    vmv.v.i v10, 0
 ; LMULMAX2-NEXT:    vmv.v.i v12, 0
 ; LMULMAX2-NEXT:    vmv.v.i v14, 0
@@ -1360,7 +1360,7 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
 ; LMULMAX1-NEXT:    .cfi_def_cfa_offset 144
 ; LMULMAX1-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
 ; LMULMAX1-NEXT:    .cfi_offset ra, -8
-; LMULMAX1-NEXT:    addi a0, zero, 8
+; LMULMAX1-NEXT:    li a0, 8
 ; LMULMAX1-NEXT:    sd a0, 128(sp)
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vmv.v.i v8, 0
@@ -1378,15 +1378,15 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
 ; LMULMAX1-NEXT:    addi a0, sp, 32
 ; LMULMAX1-NEXT:    vse32.v v8, (a0)
 ; LMULMAX1-NEXT:    addi a0, sp, 16
-; LMULMAX1-NEXT:    addi a1, zero, 1
-; LMULMAX1-NEXT:    addi a2, zero, 2
-; LMULMAX1-NEXT:    addi a3, zero, 3
-; LMULMAX1-NEXT:    addi a4, zero, 4
-; LMULMAX1-NEXT:    addi a5, zero, 5
-; LMULMAX1-NEXT:    addi a6, zero, 6
-; LMULMAX1-NEXT:    addi a7, zero, 7
+; LMULMAX1-NEXT:    li a1, 1
+; LMULMAX1-NEXT:    li a2, 2
+; LMULMAX1-NEXT:    li a3, 3
+; LMULMAX1-NEXT:    li a4, 4
+; LMULMAX1-NEXT:    li a5, 5
+; LMULMAX1-NEXT:    li a6, 6
+; LMULMAX1-NEXT:    li a7, 7
 ; LMULMAX1-NEXT:    vse32.v v8, (a0)
-; LMULMAX1-NEXT:    mv a0, zero
+; LMULMAX1-NEXT:    li a0, 0
 ; LMULMAX1-NEXT:    vmv.v.i v9, 0
 ; LMULMAX1-NEXT:    vmv.v.i v10, 0
 ; LMULMAX1-NEXT:    vmv.v.i v11, 0
@@ -1434,11 +1434,11 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX8-NEXT:    .cfi_def_cfa_offset 160
 ; LMULMAX8-NEXT:    sd ra, 152(sp) # 8-byte Folded Spill
 ; LMULMAX8-NEXT:    .cfi_offset ra, -8
-; LMULMAX8-NEXT:    addi a0, zero, 32
+; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vmv.v.i v8, 0
 ; LMULMAX8-NEXT:    vse32.v v8, (sp)
-; LMULMAX8-NEXT:    addi a0, zero, 8
+; LMULMAX8-NEXT:    li a0, 8
 ; LMULMAX8-NEXT:    sd a0, 128(sp)
 ; LMULMAX8-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; LMULMAX8-NEXT:    vmv.v.i v16, 0
@@ -1450,15 +1450,15 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX8-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX8-NEXT:    vmsne.vi v16, v17, 0
 ; LMULMAX8-NEXT:    addi a0, sp, 136
-; LMULMAX8-NEXT:    addi a5, zero, 5
-; LMULMAX8-NEXT:    addi a6, zero, 6
-; LMULMAX8-NEXT:    addi a7, zero, 7
+; LMULMAX8-NEXT:    li a5, 5
+; LMULMAX8-NEXT:    li a6, 6
+; LMULMAX8-NEXT:    li a7, 7
 ; LMULMAX8-NEXT:    vsm.v v16, (a0)
-; LMULMAX8-NEXT:    mv a0, zero
-; LMULMAX8-NEXT:    mv a1, zero
-; LMULMAX8-NEXT:    mv a2, zero
-; LMULMAX8-NEXT:    mv a3, zero
-; LMULMAX8-NEXT:    mv a4, zero
+; LMULMAX8-NEXT:    li a0, 0
+; LMULMAX8-NEXT:    li a1, 0
+; LMULMAX8-NEXT:    li a2, 0
+; LMULMAX8-NEXT:    li a3, 0
+; LMULMAX8-NEXT:    li a4, 0
 ; LMULMAX8-NEXT:    vmv8r.v v16, v8
 ; LMULMAX8-NEXT:    call vector_mask_arg_via_stack at plt
 ; LMULMAX8-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
@@ -1471,7 +1471,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX4-NEXT:    .cfi_def_cfa_offset 160
 ; LMULMAX4-NEXT:    sd ra, 152(sp) # 8-byte Folded Spill
 ; LMULMAX4-NEXT:    .cfi_offset ra, -8
-; LMULMAX4-NEXT:    addi a0, zero, 8
+; LMULMAX4-NEXT:    li a0, 8
 ; LMULMAX4-NEXT:    sd a0, 128(sp)
 ; LMULMAX4-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; LMULMAX4-NEXT:    vmv.v.i v8, 0
@@ -1488,15 +1488,15 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX4-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX4-NEXT:    vmsne.vi v12, v13, 0
 ; LMULMAX4-NEXT:    addi a0, sp, 136
-; LMULMAX4-NEXT:    addi a5, zero, 5
-; LMULMAX4-NEXT:    addi a6, zero, 6
-; LMULMAX4-NEXT:    addi a7, zero, 7
+; LMULMAX4-NEXT:    li a5, 5
+; LMULMAX4-NEXT:    li a6, 6
+; LMULMAX4-NEXT:    li a7, 7
 ; LMULMAX4-NEXT:    vsm.v v12, (a0)
-; LMULMAX4-NEXT:    mv a0, zero
-; LMULMAX4-NEXT:    mv a1, zero
-; LMULMAX4-NEXT:    mv a2, zero
-; LMULMAX4-NEXT:    mv a3, zero
-; LMULMAX4-NEXT:    mv a4, zero
+; LMULMAX4-NEXT:    li a0, 0
+; LMULMAX4-NEXT:    li a1, 0
+; LMULMAX4-NEXT:    li a2, 0
+; LMULMAX4-NEXT:    li a3, 0
+; LMULMAX4-NEXT:    li a4, 0
 ; LMULMAX4-NEXT:    vmv4r.v v12, v8
 ; LMULMAX4-NEXT:    vmv4r.v v16, v8
 ; LMULMAX4-NEXT:    vmv4r.v v20, v8
@@ -1511,7 +1511,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX2-NEXT:    .cfi_def_cfa_offset 160
 ; LMULMAX2-NEXT:    sd ra, 152(sp) # 8-byte Folded Spill
 ; LMULMAX2-NEXT:    .cfi_offset ra, -8
-; LMULMAX2-NEXT:    addi a0, zero, 8
+; LMULMAX2-NEXT:    li a0, 8
 ; LMULMAX2-NEXT:    sd a0, 128(sp)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vmv.v.i v8, 0
@@ -1532,15 +1532,15 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX2-NEXT:    vmsne.vi v10, v11, 0
 ; LMULMAX2-NEXT:    addi a0, sp, 136
-; LMULMAX2-NEXT:    addi a5, zero, 5
-; LMULMAX2-NEXT:    addi a6, zero, 6
-; LMULMAX2-NEXT:    addi a7, zero, 7
+; LMULMAX2-NEXT:    li a5, 5
+; LMULMAX2-NEXT:    li a6, 6
+; LMULMAX2-NEXT:    li a7, 7
 ; LMULMAX2-NEXT:    vsm.v v10, (a0)
-; LMULMAX2-NEXT:    mv a0, zero
-; LMULMAX2-NEXT:    mv a1, zero
-; LMULMAX2-NEXT:    mv a2, zero
-; LMULMAX2-NEXT:    mv a3, zero
-; LMULMAX2-NEXT:    mv a4, zero
+; LMULMAX2-NEXT:    li a0, 0
+; LMULMAX2-NEXT:    li a1, 0
+; LMULMAX2-NEXT:    li a2, 0
+; LMULMAX2-NEXT:    li a3, 0
+; LMULMAX2-NEXT:    li a4, 0
 ; LMULMAX2-NEXT:    vmv2r.v v10, v8
 ; LMULMAX2-NEXT:    vmv2r.v v12, v8
 ; LMULMAX2-NEXT:    vmv2r.v v14, v8
@@ -1559,7 +1559,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX1-NEXT:    .cfi_def_cfa_offset 160
 ; LMULMAX1-NEXT:    sd ra, 152(sp) # 8-byte Folded Spill
 ; LMULMAX1-NEXT:    .cfi_offset ra, -8
-; LMULMAX1-NEXT:    addi a0, zero, 8
+; LMULMAX1-NEXT:    li a0, 8
 ; LMULMAX1-NEXT:    sd a0, 128(sp)
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-NEXT:    vmv.v.i v8, 0
@@ -1588,15 +1588,15 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; LMULMAX1-NEXT:    vmsne.vi v9, v10, 0
 ; LMULMAX1-NEXT:    addi a0, sp, 136
-; LMULMAX1-NEXT:    addi a5, zero, 5
-; LMULMAX1-NEXT:    addi a6, zero, 6
-; LMULMAX1-NEXT:    addi a7, zero, 7
+; LMULMAX1-NEXT:    li a5, 5
+; LMULMAX1-NEXT:    li a6, 6
+; LMULMAX1-NEXT:    li a7, 7
 ; LMULMAX1-NEXT:    vsm.v v9, (a0)
-; LMULMAX1-NEXT:    mv a0, zero
-; LMULMAX1-NEXT:    mv a1, zero
-; LMULMAX1-NEXT:    mv a2, zero
-; LMULMAX1-NEXT:    mv a3, zero
-; LMULMAX1-NEXT:    mv a4, zero
+; LMULMAX1-NEXT:    li a0, 0
+; LMULMAX1-NEXT:    li a1, 0
+; LMULMAX1-NEXT:    li a2, 0
+; LMULMAX1-NEXT:    li a3, 0
+; LMULMAX1-NEXT:    li a4, 0
 ; LMULMAX1-NEXT:    vmv1r.v v9, v8
 ; LMULMAX1-NEXT:    vmv1r.v v10, v8
 ; LMULMAX1-NEXT:    vmv1r.v v11, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
index 1e68636edd57..6b1573c0dc1e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
@@ -23,10 +23,10 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX2-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v9, v8, 1
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 85
+; LMULMAX2-RV32-NEXT:    li a1, 85
 ; LMULMAX2-RV32-NEXT:    vand.vx v9, v9, a1
 ; LMULMAX2-RV32-NEXT:    vsub.vv v8, v8, v9
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 51
+; LMULMAX2-RV32-NEXT:    li a1, 51
 ; LMULMAX2-RV32-NEXT:    vand.vx v9, v8, a1
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX2-RV32-NEXT:    vand.vx v8, v8, a1
@@ -49,10 +49,10 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v9, v8, 1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 85
+; LMULMAX2-RV64-NEXT:    li a1, 85
 ; LMULMAX2-RV64-NEXT:    vand.vx v9, v9, a1
 ; LMULMAX2-RV64-NEXT:    vsub.vv v8, v8, v9
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 51
+; LMULMAX2-RV64-NEXT:    li a1, 51
 ; LMULMAX2-RV64-NEXT:    vand.vx v9, v8, a1
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX2-RV64-NEXT:    vand.vx v8, v8, a1
@@ -75,10 +75,10 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v9, v8, 1
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 85
+; LMULMAX1-RV32-NEXT:    li a1, 85
 ; LMULMAX1-RV32-NEXT:    vand.vx v9, v9, a1
 ; LMULMAX1-RV32-NEXT:    vsub.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 51
+; LMULMAX1-RV32-NEXT:    li a1, 51
 ; LMULMAX1-RV32-NEXT:    vand.vx v9, v8, a1
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX1-RV32-NEXT:    vand.vx v8, v8, a1
@@ -101,10 +101,10 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v9, v8, 1
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 85
+; LMULMAX1-RV64-NEXT:    li a1, 85
 ; LMULMAX1-RV64-NEXT:    vand.vx v9, v9, a1
 ; LMULMAX1-RV64-NEXT:    vsub.vv v8, v8, v9
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 51
+; LMULMAX1-RV64-NEXT:    li a1, 51
 ; LMULMAX1-RV64-NEXT:    vand.vx v9, v8, a1
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX1-RV64-NEXT:    vand.vx v8, v8, a1
@@ -126,7 +126,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v10, v12, 23
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v9, v10, 0
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 134
+; LMULMAX8-RV32-NEXT:    li a1, 134
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV32-NEXT:    vrsub.vx v8, v9, a1
 ; LMULMAX8-RV32-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -144,7 +144,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v10, v12, 23
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v9, v10, 0
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 134
+; LMULMAX8-RV64-NEXT:    li a1, 134
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV64-NEXT:    vrsub.vx v8, v9, a1
 ; LMULMAX8-RV64-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -188,7 +188,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV32I-NEXT:    lui a1, 1
 ; LMULMAX2-RV32I-NEXT:    addi a1, a1, -241
 ; LMULMAX2-RV32I-NEXT:    vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV32I-NEXT:    li a1, 257
 ; LMULMAX2-RV32I-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX2-RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX2-RV32I-NEXT:    vse16.v v8, (a0)
@@ -223,7 +223,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV64I-NEXT:    lui a1, 1
 ; LMULMAX2-RV64I-NEXT:    addiw a1, a1, -241
 ; LMULMAX2-RV64I-NEXT:    vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV64I-NEXT:    li a1, 257
 ; LMULMAX2-RV64I-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX2-RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX2-RV64I-NEXT:    vse16.v v8, (a0)
@@ -258,7 +258,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    lui a1, 1
 ; LMULMAX1-RV32-NEXT:    addi a1, a1, -241
 ; LMULMAX1-RV32-NEXT:    vand.vx v8, v8, a1
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 257
+; LMULMAX1-RV32-NEXT:    li a1, 257
 ; LMULMAX1-RV32-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV32-NEXT:    vse16.v v8, (a0)
@@ -293,7 +293,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    lui a1, 1
 ; LMULMAX1-RV64-NEXT:    addiw a1, a1, -241
 ; LMULMAX1-RV64-NEXT:    vand.vx v8, v8, a1
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 257
+; LMULMAX1-RV64-NEXT:    li a1, 257
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV64-NEXT:    vse16.v v8, (a0)
@@ -305,10 +305,10 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV32D-NEXT:    vle16.v v8, (a0)
 ; LMULMAX2-RV32D-NEXT:    vfwcvt.f.xu.v v10, v8
 ; LMULMAX2-RV32D-NEXT:    vnsrl.wi v9, v10, 23
-; LMULMAX2-RV32D-NEXT:    addi a1, zero, 142
+; LMULMAX2-RV32D-NEXT:    li a1, 142
 ; LMULMAX2-RV32D-NEXT:    vrsub.vx v9, v9, a1
 ; LMULMAX2-RV32D-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX2-RV32D-NEXT:    addi a1, zero, 16
+; LMULMAX2-RV32D-NEXT:    li a1, 16
 ; LMULMAX2-RV32D-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX2-RV32D-NEXT:    vse16.v v8, (a0)
 ; LMULMAX2-RV32D-NEXT:    ret
@@ -319,10 +319,10 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV64D-NEXT:    vle16.v v8, (a0)
 ; LMULMAX2-RV64D-NEXT:    vfwcvt.f.xu.v v10, v8
 ; LMULMAX2-RV64D-NEXT:    vnsrl.wi v9, v10, 23
-; LMULMAX2-RV64D-NEXT:    addi a1, zero, 142
+; LMULMAX2-RV64D-NEXT:    li a1, 142
 ; LMULMAX2-RV64D-NEXT:    vrsub.vx v9, v9, a1
 ; LMULMAX2-RV64D-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX2-RV64D-NEXT:    addi a1, zero, 16
+; LMULMAX2-RV64D-NEXT:    li a1, 16
 ; LMULMAX2-RV64D-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX2-RV64D-NEXT:    vse16.v v8, (a0)
 ; LMULMAX2-RV64D-NEXT:    ret
@@ -333,10 +333,10 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vle16.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    vfwcvt.f.xu.v v10, v8
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v9, v10, 23
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 142
+; LMULMAX8-RV32-NEXT:    li a1, 142
 ; LMULMAX8-RV32-NEXT:    vrsub.vx v9, v9, a1
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 16
+; LMULMAX8-RV32-NEXT:    li a1, 16
 ; LMULMAX8-RV32-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX8-RV32-NEXT:    vse16.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    ret
@@ -347,10 +347,10 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vle16.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    vfwcvt.f.xu.v v10, v8
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v9, v10, 23
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 142
+; LMULMAX8-RV64-NEXT:    li a1, 142
 ; LMULMAX8-RV64-NEXT:    vrsub.vx v9, v9, a1
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 16
+; LMULMAX8-RV64-NEXT:    li a1, 16
 ; LMULMAX8-RV64-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX8-RV64-NEXT:    vse16.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    ret
@@ -520,12 +520,12 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX2-RV32D-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX2-RV32D-NEXT:    vle32.v v8, (a0)
 ; LMULMAX2-RV32D-NEXT:    vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV32D-NEXT:    addi a1, zero, 52
+; LMULMAX2-RV32D-NEXT:    li a1, 52
 ; LMULMAX2-RV32D-NEXT:    vnsrl.wx v9, v10, a1
-; LMULMAX2-RV32D-NEXT:    addi a1, zero, 1054
+; LMULMAX2-RV32D-NEXT:    li a1, 1054
 ; LMULMAX2-RV32D-NEXT:    vrsub.vx v9, v9, a1
 ; LMULMAX2-RV32D-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX2-RV32D-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV32D-NEXT:    li a1, 32
 ; LMULMAX2-RV32D-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX2-RV32D-NEXT:    vse32.v v8, (a0)
 ; LMULMAX2-RV32D-NEXT:    ret
@@ -535,12 +535,12 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX2-RV64D-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX2-RV64D-NEXT:    vle32.v v8, (a0)
 ; LMULMAX2-RV64D-NEXT:    vfwcvt.f.xu.v v10, v8
-; LMULMAX2-RV64D-NEXT:    addi a1, zero, 52
+; LMULMAX2-RV64D-NEXT:    li a1, 52
 ; LMULMAX2-RV64D-NEXT:    vnsrl.wx v9, v10, a1
-; LMULMAX2-RV64D-NEXT:    addi a1, zero, 1054
+; LMULMAX2-RV64D-NEXT:    li a1, 1054
 ; LMULMAX2-RV64D-NEXT:    vrsub.vx v9, v9, a1
 ; LMULMAX2-RV64D-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX2-RV64D-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV64D-NEXT:    li a1, 32
 ; LMULMAX2-RV64D-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX2-RV64D-NEXT:    vse32.v v8, (a0)
 ; LMULMAX2-RV64D-NEXT:    ret
@@ -550,12 +550,12 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX8-RV32-NEXT:    vle32.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    vfwcvt.f.xu.v v10, v8
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 52
+; LMULMAX8-RV32-NEXT:    li a1, 52
 ; LMULMAX8-RV32-NEXT:    vnsrl.wx v9, v10, a1
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 1054
+; LMULMAX8-RV32-NEXT:    li a1, 1054
 ; LMULMAX8-RV32-NEXT:    vrsub.vx v9, v9, a1
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV32-NEXT:    li a1, 32
 ; LMULMAX8-RV32-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX8-RV32-NEXT:    vse32.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    ret
@@ -565,12 +565,12 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX8-RV64-NEXT:    vle32.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    vfwcvt.f.xu.v v10, v8
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 52
+; LMULMAX8-RV64-NEXT:    li a1, 52
 ; LMULMAX8-RV64-NEXT:    vnsrl.wx v9, v10, a1
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 1054
+; LMULMAX8-RV64-NEXT:    li a1, 1054
 ; LMULMAX8-RV64-NEXT:    vrsub.vx v9, v9, a1
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV64-NEXT:    li a1, 32
 ; LMULMAX8-RV64-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX8-RV64-NEXT:    vse32.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    ret
@@ -597,7 +597,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v9, v8, 16
 ; LMULMAX2-RV32-NEXT:    vor.vv v8, v8, v9
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV32-NEXT:    li a1, 32
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v9, v8, a1
 ; LMULMAX2-RV32-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -635,7 +635,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    vmv.v.x v9, a1
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmul.vv v8, v8, v9
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV32-NEXT:    li a1, 56
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX2-RV32-NEXT:    vse64.v v8, (a0)
 ; LMULMAX2-RV32-NEXT:    ret
@@ -654,7 +654,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v9, v8, 16
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v9
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV64-NEXT:    li a1, 32
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v9, v8, a1
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
@@ -699,7 +699,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    slli a1, a1, 16
 ; LMULMAX2-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX2-RV64-NEXT:    vmul.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV64-NEXT:    li a1, 56
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX2-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX2-RV64-NEXT:    ret
@@ -718,7 +718,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v9, v8, 16
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX1-RV32-NEXT:    li a1, 32
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v9, v8, a1
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -756,7 +756,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vmv.v.x v9, a1
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmul.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX1-RV32-NEXT:    li a1, 56
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX1-RV32-NEXT:    vse64.v v8, (a0)
 ; LMULMAX1-RV32-NEXT:    ret
@@ -775,7 +775,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v9, v8, 16
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v9
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX1-RV64-NEXT:    li a1, 32
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v9, v8, a1
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
@@ -820,7 +820,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    slli a1, a1, 16
 ; LMULMAX1-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a1
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX1-RV64-NEXT:    li a1, 56
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX1-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
@@ -839,7 +839,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX8-RV32-NEXT:    vsrl.vi v9, v8, 16
 ; LMULMAX8-RV32-NEXT:    vor.vv v8, v8, v9
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV32-NEXT:    li a1, 32
 ; LMULMAX8-RV32-NEXT:    vsrl.vx v9, v8, a1
 ; LMULMAX8-RV32-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX8-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -877,7 +877,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vmv.v.x v9, a1
 ; LMULMAX8-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX8-RV32-NEXT:    vmul.vv v8, v8, v9
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX8-RV32-NEXT:    li a1, 56
 ; LMULMAX8-RV32-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX8-RV32-NEXT:    vse64.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    ret
@@ -896,7 +896,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX8-RV64-NEXT:    vsrl.vi v9, v8, 16
 ; LMULMAX8-RV64-NEXT:    vor.vv v8, v8, v9
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV64-NEXT:    li a1, 32
 ; LMULMAX8-RV64-NEXT:    vsrl.vx v9, v8, a1
 ; LMULMAX8-RV64-NEXT:    vor.vv v8, v8, v9
 ; LMULMAX8-RV64-NEXT:    vxor.vi v8, v8, -1
@@ -941,7 +941,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    slli a1, a1, 16
 ; LMULMAX8-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX8-RV64-NEXT:    vmul.vx v8, v8, a1
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX8-RV64-NEXT:    li a1, 56
 ; LMULMAX8-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX8-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    ret
@@ -956,7 +956,7 @@ declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1)
 define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX2-RV32-LABEL: ctlz_v32i8:
 ; LMULMAX2-RV32:       # %bb.0:
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV32-NEXT:    li a1, 32
 ; LMULMAX2-RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v10, v8, 1
@@ -967,10 +967,10 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX2-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v10, v8, 1
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 85
+; LMULMAX2-RV32-NEXT:    li a1, 85
 ; LMULMAX2-RV32-NEXT:    vand.vx v10, v10, a1
 ; LMULMAX2-RV32-NEXT:    vsub.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 51
+; LMULMAX2-RV32-NEXT:    li a1, 51
 ; LMULMAX2-RV32-NEXT:    vand.vx v10, v8, a1
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX2-RV32-NEXT:    vand.vx v8, v8, a1
@@ -983,7 +983,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ;
 ; LMULMAX2-RV64-LABEL: ctlz_v32i8:
 ; LMULMAX2-RV64:       # %bb.0:
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV64-NEXT:    li a1, 32
 ; LMULMAX2-RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v10, v8, 1
@@ -994,10 +994,10 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v10, v8, 1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 85
+; LMULMAX2-RV64-NEXT:    li a1, 85
 ; LMULMAX2-RV64-NEXT:    vand.vx v10, v10, a1
 ; LMULMAX2-RV64-NEXT:    vsub.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 51
+; LMULMAX2-RV64-NEXT:    li a1, 51
 ; LMULMAX2-RV64-NEXT:    vand.vx v10, v8, a1
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX2-RV64-NEXT:    vand.vx v8, v8, a1
@@ -1022,10 +1022,10 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v8, 1
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 85
+; LMULMAX1-RV32-NEXT:    li a2, 85
 ; LMULMAX1-RV32-NEXT:    vand.vx v10, v10, a2
 ; LMULMAX1-RV32-NEXT:    vsub.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT:    addi a3, zero, 51
+; LMULMAX1-RV32-NEXT:    li a3, 51
 ; LMULMAX1-RV32-NEXT:    vand.vx v10, v8, a3
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX1-RV32-NEXT:    vand.vx v8, v8, a3
@@ -1068,10 +1068,10 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v8, 1
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 85
+; LMULMAX1-RV64-NEXT:    li a2, 85
 ; LMULMAX1-RV64-NEXT:    vand.vx v10, v10, a2
 ; LMULMAX1-RV64-NEXT:    vsub.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT:    addi a3, zero, 51
+; LMULMAX1-RV64-NEXT:    li a3, 51
 ; LMULMAX1-RV64-NEXT:    vand.vx v10, v8, a3
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX1-RV64-NEXT:    vand.vx v8, v8, a3
@@ -1102,7 +1102,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ;
 ; LMULMAX8-RV32-LABEL: ctlz_v32i8:
 ; LMULMAX8-RV32:       # %bb.0:
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV32-NEXT:    li a1, 32
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX8-RV32-NEXT:    vle8.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
@@ -1112,7 +1112,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v12, v16, 23
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v10, v12, 0
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 134
+; LMULMAX8-RV32-NEXT:    li a1, 134
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV32-NEXT:    vrsub.vx v8, v10, a1
 ; LMULMAX8-RV32-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -1121,7 +1121,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ;
 ; LMULMAX8-RV64-LABEL: ctlz_v32i8:
 ; LMULMAX8-RV64:       # %bb.0:
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV64-NEXT:    li a1, 32
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX8-RV64-NEXT:    vle8.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
@@ -1131,7 +1131,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v12, v16, 23
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v10, v12, 0
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 134
+; LMULMAX8-RV64-NEXT:    li a1, 134
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV64-NEXT:    vrsub.vx v8, v10, a1
 ; LMULMAX8-RV64-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -1175,7 +1175,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    lui a1, 1
 ; LMULMAX2-RV32-NEXT:    addi a1, a1, -241
 ; LMULMAX2-RV32-NEXT:    vand.vx v8, v8, a1
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV32-NEXT:    li a1, 257
 ; LMULMAX2-RV32-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX2-RV32-NEXT:    vse16.v v8, (a0)
@@ -1210,7 +1210,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    lui a1, 1
 ; LMULMAX2-RV64-NEXT:    addiw a1, a1, -241
 ; LMULMAX2-RV64-NEXT:    vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV64-NEXT:    li a1, 257
 ; LMULMAX2-RV64-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX2-RV64-NEXT:    vse16.v v8, (a0)
@@ -1247,7 +1247,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    lui a4, 1
 ; LMULMAX1-RV32-NEXT:    addi a4, a4, -241
 ; LMULMAX1-RV32-NEXT:    vand.vx v8, v8, a4
-; LMULMAX1-RV32-NEXT:    addi a5, zero, 257
+; LMULMAX1-RV32-NEXT:    li a5, 257
 ; LMULMAX1-RV32-NEXT:    vmul.vx v8, v8, a5
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v9, 1
@@ -1306,7 +1306,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    lui a4, 1
 ; LMULMAX1-RV64-NEXT:    addiw a4, a4, -241
 ; LMULMAX1-RV64-NEXT:    vand.vx v8, v8, a4
-; LMULMAX1-RV64-NEXT:    addi a5, zero, 257
+; LMULMAX1-RV64-NEXT:    li a5, 257
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a5
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v9, 1
@@ -1340,10 +1340,10 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vle16.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    vfwcvt.f.xu.v v12, v8
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v10, v12, 23
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 142
+; LMULMAX8-RV32-NEXT:    li a1, 142
 ; LMULMAX8-RV32-NEXT:    vrsub.vx v10, v10, a1
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 16
+; LMULMAX8-RV32-NEXT:    li a1, 16
 ; LMULMAX8-RV32-NEXT:    vmerge.vxm v8, v10, a1, v0
 ; LMULMAX8-RV32-NEXT:    vse16.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    ret
@@ -1354,10 +1354,10 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vle16.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    vfwcvt.f.xu.v v12, v8
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v10, v12, 23
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 142
+; LMULMAX8-RV64-NEXT:    li a1, 142
 ; LMULMAX8-RV64-NEXT:    vrsub.vx v10, v10, a1
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 16
+; LMULMAX8-RV64-NEXT:    li a1, 16
 ; LMULMAX8-RV64-NEXT:    vmerge.vxm v8, v10, a1, v0
 ; LMULMAX8-RV64-NEXT:    vse16.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    ret
@@ -1579,12 +1579,12 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX8-RV32-NEXT:    vle32.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    vfwcvt.f.xu.v v12, v8
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 52
+; LMULMAX8-RV32-NEXT:    li a1, 52
 ; LMULMAX8-RV32-NEXT:    vnsrl.wx v10, v12, a1
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 1054
+; LMULMAX8-RV32-NEXT:    li a1, 1054
 ; LMULMAX8-RV32-NEXT:    vrsub.vx v10, v10, a1
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV32-NEXT:    li a1, 32
 ; LMULMAX8-RV32-NEXT:    vmerge.vxm v8, v10, a1, v0
 ; LMULMAX8-RV32-NEXT:    vse32.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    ret
@@ -1594,12 +1594,12 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX8-RV64-NEXT:    vle32.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    vfwcvt.f.xu.v v12, v8
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 52
+; LMULMAX8-RV64-NEXT:    li a1, 52
 ; LMULMAX8-RV64-NEXT:    vnsrl.wx v10, v12, a1
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 1054
+; LMULMAX8-RV64-NEXT:    li a1, 1054
 ; LMULMAX8-RV64-NEXT:    vrsub.vx v10, v10, a1
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV64-NEXT:    li a1, 32
 ; LMULMAX8-RV64-NEXT:    vmerge.vxm v8, v10, a1, v0
 ; LMULMAX8-RV64-NEXT:    vse32.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    ret
@@ -1626,7 +1626,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v10, v8, 16
 ; LMULMAX2-RV32-NEXT:    vor.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV32-NEXT:    li a1, 32
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v10, v8, a1
 ; LMULMAX2-RV32-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
@@ -1664,7 +1664,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    vmv.v.x v10, a1
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmul.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV32-NEXT:    li a1, 56
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX2-RV32-NEXT:    vse64.v v8, (a0)
 ; LMULMAX2-RV32-NEXT:    ret
@@ -1683,7 +1683,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v10, v8, 16
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV64-NEXT:    li a1, 32
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v10, v8, a1
 ; LMULMAX2-RV64-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
@@ -1728,7 +1728,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    slli a1, a1, 16
 ; LMULMAX2-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX2-RV64-NEXT:    vmul.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV64-NEXT:    li a1, 56
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX2-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX2-RV64-NEXT:    ret
@@ -1749,7 +1749,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v8, 16
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 32
+; LMULMAX1-RV32-NEXT:    li a2, 32
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v10, v8, a2
 ; LMULMAX1-RV32-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -1787,7 +1787,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vmv.v.x v14, a3
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmul.vv v8, v8, v14
-; LMULMAX1-RV32-NEXT:    addi a3, zero, 56
+; LMULMAX1-RV32-NEXT:    li a3, 56
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v8, v8, a3
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v15, v9, 1
 ; LMULMAX1-RV32-NEXT:    vor.vv v9, v9, v15
@@ -1834,7 +1834,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v8, 16
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT:    addi a6, zero, 32
+; LMULMAX1-RV64-NEXT:    li a6, 32
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v10, v8, a6
 ; LMULMAX1-RV64-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
@@ -1879,7 +1879,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    slli a2, a2, 16
 ; LMULMAX1-RV64-NEXT:    addi a2, a2, 257
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a2
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX1-RV64-NEXT:    li a1, 56
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-RV64-NEXT:    vor.vv v9, v9, v10
@@ -1924,7 +1924,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX8-RV32-NEXT:    vsrl.vi v10, v8, 16
 ; LMULMAX8-RV32-NEXT:    vor.vv v8, v8, v10
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV32-NEXT:    li a1, 32
 ; LMULMAX8-RV32-NEXT:    vsrl.vx v10, v8, a1
 ; LMULMAX8-RV32-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX8-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
@@ -1962,7 +1962,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vmv.v.x v10, a1
 ; LMULMAX8-RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX8-RV32-NEXT:    vmul.vv v8, v8, v10
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX8-RV32-NEXT:    li a1, 56
 ; LMULMAX8-RV32-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX8-RV32-NEXT:    vse64.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    ret
@@ -1981,7 +1981,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX8-RV64-NEXT:    vsrl.vi v10, v8, 16
 ; LMULMAX8-RV64-NEXT:    vor.vv v8, v8, v10
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV64-NEXT:    li a1, 32
 ; LMULMAX8-RV64-NEXT:    vsrl.vx v10, v8, a1
 ; LMULMAX8-RV64-NEXT:    vor.vv v8, v8, v10
 ; LMULMAX8-RV64-NEXT:    vxor.vi v8, v8, -1
@@ -2026,7 +2026,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    slli a1, a1, 16
 ; LMULMAX8-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX8-RV64-NEXT:    vmul.vx v8, v8, a1
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX8-RV64-NEXT:    li a1, 56
 ; LMULMAX8-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX8-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll
index 3a337724d35c..d3af39b15585 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll
@@ -10,10 +10,10 @@ define void @ctpop_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsrl.vi v9, v8, 1
-; CHECK-NEXT:    addi a1, zero, 85
+; CHECK-NEXT:    li a1, 85
 ; CHECK-NEXT:    vand.vx v9, v9, a1
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
-; CHECK-NEXT:    addi a1, zero, 51
+; CHECK-NEXT:    li a1, 51
 ; CHECK-NEXT:    vand.vx v9, v8, a1
 ; CHECK-NEXT:    vsrl.vi v8, v8, 2
 ; CHECK-NEXT:    vand.vx v8, v8, a1
@@ -52,7 +52,7 @@ define void @ctpop_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX2-RV32-NEXT:    lui a1, 1
 ; LMULMAX2-RV32-NEXT:    addi a1, a1, -241
 ; LMULMAX2-RV32-NEXT:    vand.vx v8, v8, a1
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV32-NEXT:    li a1, 257
 ; LMULMAX2-RV32-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX2-RV32-NEXT:    vse16.v v8, (a0)
@@ -78,7 +78,7 @@ define void @ctpop_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    lui a1, 1
 ; LMULMAX2-RV64-NEXT:    addiw a1, a1, -241
 ; LMULMAX2-RV64-NEXT:    vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV64-NEXT:    li a1, 257
 ; LMULMAX2-RV64-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX2-RV64-NEXT:    vse16.v v8, (a0)
@@ -104,7 +104,7 @@ define void @ctpop_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX1-RV32-NEXT:    lui a1, 1
 ; LMULMAX1-RV32-NEXT:    addi a1, a1, -241
 ; LMULMAX1-RV32-NEXT:    vand.vx v8, v8, a1
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 257
+; LMULMAX1-RV32-NEXT:    li a1, 257
 ; LMULMAX1-RV32-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV32-NEXT:    vse16.v v8, (a0)
@@ -130,7 +130,7 @@ define void @ctpop_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    lui a1, 1
 ; LMULMAX1-RV64-NEXT:    addiw a1, a1, -241
 ; LMULMAX1-RV64-NEXT:    vand.vx v8, v8, a1
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 257
+; LMULMAX1-RV64-NEXT:    li a1, 257
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV64-NEXT:    vse16.v v8, (a0)
@@ -295,7 +295,7 @@ define void @ctpop_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV32-NEXT:    vmv.v.x v9, a1
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmul.vv v8, v8, v9
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV32-NEXT:    li a1, 56
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX2-RV32-NEXT:    vse64.v v8, (a0)
 ; LMULMAX2-RV32-NEXT:    ret
@@ -345,7 +345,7 @@ define void @ctpop_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX2-RV64-NEXT:    slli a1, a1, 16
 ; LMULMAX2-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX2-RV64-NEXT:    vmul.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV64-NEXT:    li a1, 56
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX2-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX2-RV64-NEXT:    ret
@@ -385,7 +385,7 @@ define void @ctpop_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV32-NEXT:    vmv.v.x v9, a1
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmul.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX1-RV32-NEXT:    li a1, 56
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX1-RV32-NEXT:    vse64.v v8, (a0)
 ; LMULMAX1-RV32-NEXT:    ret
@@ -435,7 +435,7 @@ define void @ctpop_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    slli a1, a1, 16
 ; LMULMAX1-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a1
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX1-RV64-NEXT:    li a1, 56
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX1-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
@@ -450,14 +450,14 @@ declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
 define void @ctpop_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: ctpop_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 32
+; LMULMAX2-NEXT:    li a1, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vsrl.vi v10, v8, 1
-; LMULMAX2-NEXT:    addi a1, zero, 85
+; LMULMAX2-NEXT:    li a1, 85
 ; LMULMAX2-NEXT:    vand.vx v10, v10, a1
 ; LMULMAX2-NEXT:    vsub.vv v8, v8, v10
-; LMULMAX2-NEXT:    addi a1, zero, 51
+; LMULMAX2-NEXT:    li a1, 51
 ; LMULMAX2-NEXT:    vand.vx v10, v8, a1
 ; LMULMAX2-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX2-NEXT:    vand.vx v8, v8, a1
@@ -475,10 +475,10 @@ define void @ctpop_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-NEXT:    vle8.v v8, (a1)
 ; LMULMAX1-NEXT:    vle8.v v9, (a0)
 ; LMULMAX1-NEXT:    vsrl.vi v10, v8, 1
-; LMULMAX1-NEXT:    addi a2, zero, 85
+; LMULMAX1-NEXT:    li a2, 85
 ; LMULMAX1-NEXT:    vand.vx v10, v10, a2
 ; LMULMAX1-NEXT:    vsub.vv v8, v8, v10
-; LMULMAX1-NEXT:    addi a3, zero, 51
+; LMULMAX1-NEXT:    li a3, 51
 ; LMULMAX1-NEXT:    vand.vx v10, v8, a3
 ; LMULMAX1-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX1-NEXT:    vand.vx v8, v8, a3
@@ -528,7 +528,7 @@ define void @ctpop_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV32-NEXT:    lui a1, 1
 ; LMULMAX2-RV32-NEXT:    addi a1, a1, -241
 ; LMULMAX2-RV32-NEXT:    vand.vx v8, v8, a1
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV32-NEXT:    li a1, 257
 ; LMULMAX2-RV32-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX2-RV32-NEXT:    vse16.v v8, (a0)
@@ -554,7 +554,7 @@ define void @ctpop_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    lui a1, 1
 ; LMULMAX2-RV64-NEXT:    addiw a1, a1, -241
 ; LMULMAX2-RV64-NEXT:    vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV64-NEXT:    li a1, 257
 ; LMULMAX2-RV64-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX2-RV64-NEXT:    vse16.v v8, (a0)
@@ -582,7 +582,7 @@ define void @ctpop_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV32-NEXT:    lui a4, 1
 ; LMULMAX1-RV32-NEXT:    addi a4, a4, -241
 ; LMULMAX1-RV32-NEXT:    vand.vx v8, v8, a4
-; LMULMAX1-RV32-NEXT:    addi a5, zero, 257
+; LMULMAX1-RV32-NEXT:    li a5, 257
 ; LMULMAX1-RV32-NEXT:    vmul.vx v8, v8, a5
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v9, 1
@@ -623,7 +623,7 @@ define void @ctpop_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    lui a4, 1
 ; LMULMAX1-RV64-NEXT:    addiw a4, a4, -241
 ; LMULMAX1-RV64-NEXT:    vand.vx v8, v8, a4
-; LMULMAX1-RV64-NEXT:    addi a5, zero, 257
+; LMULMAX1-RV64-NEXT:    li a5, 257
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a5
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v9, 1
@@ -831,7 +831,7 @@ define void @ctpop_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-RV32-NEXT:    vmv.v.x v10, a1
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmul.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV32-NEXT:    li a1, 56
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX2-RV32-NEXT:    vse64.v v8, (a0)
 ; LMULMAX2-RV32-NEXT:    ret
@@ -881,7 +881,7 @@ define void @ctpop_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX2-RV64-NEXT:    slli a1, a1, 16
 ; LMULMAX2-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX2-RV64-NEXT:    vmul.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV64-NEXT:    li a1, 56
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX2-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX2-RV64-NEXT:    ret
@@ -923,7 +923,7 @@ define void @ctpop_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV32-NEXT:    vmv.v.x v13, a2
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmul.vv v8, v8, v13
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 56
+; LMULMAX1-RV32-NEXT:    li a2, 56
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v8, v8, a2
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v14, v9, 1
 ; LMULMAX1-RV32-NEXT:    vand.vv v11, v14, v11
@@ -988,7 +988,7 @@ define void @ctpop_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 ; LMULMAX1-RV64-NEXT:    slli a5, a5, 16
 ; LMULMAX1-RV64-NEXT:    addi a5, a5, 257
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a5
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX1-RV64-NEXT:    li a1, 56
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v9, 1
 ; LMULMAX1-RV64-NEXT:    vand.vx v10, v10, a2

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
index 3fcf539bd58c..a59223515fb6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
@@ -15,15 +15,15 @@ define void @cttz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX2-RV32:       # %bb.0:
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle8.v v8, (a0)
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV32-NEXT:    li a1, 1
 ; LMULMAX2-RV32-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX2-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV32-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v9, v8, 1
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 85
+; LMULMAX2-RV32-NEXT:    li a1, 85
 ; LMULMAX2-RV32-NEXT:    vand.vx v9, v9, a1
 ; LMULMAX2-RV32-NEXT:    vsub.vv v8, v8, v9
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 51
+; LMULMAX2-RV32-NEXT:    li a1, 51
 ; LMULMAX2-RV32-NEXT:    vand.vx v9, v8, a1
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX2-RV32-NEXT:    vand.vx v8, v8, a1
@@ -38,15 +38,15 @@ define void @cttz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX2-RV64:       # %bb.0:
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle8.v v8, (a0)
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV64-NEXT:    li a1, 1
 ; LMULMAX2-RV64-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV64-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v9, v8, 1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 85
+; LMULMAX2-RV64-NEXT:    li a1, 85
 ; LMULMAX2-RV64-NEXT:    vand.vx v9, v9, a1
 ; LMULMAX2-RV64-NEXT:    vsub.vv v8, v8, v9
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 51
+; LMULMAX2-RV64-NEXT:    li a1, 51
 ; LMULMAX2-RV64-NEXT:    vand.vx v9, v8, a1
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX2-RV64-NEXT:    vand.vx v8, v8, a1
@@ -61,15 +61,15 @@ define void @cttz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX1-RV32:       # %bb.0:
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vle8.v v8, (a0)
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 1
+; LMULMAX1-RV32-NEXT:    li a1, 1
 ; LMULMAX1-RV32-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV32-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v9, v8, 1
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 85
+; LMULMAX1-RV32-NEXT:    li a1, 85
 ; LMULMAX1-RV32-NEXT:    vand.vx v9, v9, a1
 ; LMULMAX1-RV32-NEXT:    vsub.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 51
+; LMULMAX1-RV32-NEXT:    li a1, 51
 ; LMULMAX1-RV32-NEXT:    vand.vx v9, v8, a1
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX1-RV32-NEXT:    vand.vx v8, v8, a1
@@ -84,15 +84,15 @@ define void @cttz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX1-RV64:       # %bb.0:
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    vle8.v v8, (a0)
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 1
+; LMULMAX1-RV64-NEXT:    li a1, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v9
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v9, v8, 1
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 85
+; LMULMAX1-RV64-NEXT:    li a1, 85
 ; LMULMAX1-RV64-NEXT:    vand.vx v9, v9, a1
 ; LMULMAX1-RV64-NEXT:    vsub.vv v8, v8, v9
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 51
+; LMULMAX1-RV64-NEXT:    li a1, 51
 ; LMULMAX1-RV64-NEXT:    vand.vx v9, v8, a1
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX1-RV64-NEXT:    vand.vx v8, v8, a1
@@ -116,7 +116,7 @@ define void @cttz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v10, v12, 23
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v9, v10, 0
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 127
+; LMULMAX8-RV32-NEXT:    li a1, 127
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV32-NEXT:    vsub.vx v8, v9, a1
 ; LMULMAX8-RV32-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -136,7 +136,7 @@ define void @cttz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v10, v12, 23
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v9, v10, 0
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 127
+; LMULMAX8-RV64-NEXT:    li a1, 127
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV64-NEXT:    vsub.vx v8, v9, a1
 ; LMULMAX8-RV64-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -155,7 +155,7 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV32I:       # %bb.0:
 ; LMULMAX2-RV32I-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX2-RV32I-NEXT:    vle16.v v8, (a0)
-; LMULMAX2-RV32I-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV32I-NEXT:    li a1, 1
 ; LMULMAX2-RV32I-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX2-RV32I-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV32I-NEXT:    vand.vv v8, v8, v9
@@ -175,7 +175,7 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV32I-NEXT:    lui a1, 1
 ; LMULMAX2-RV32I-NEXT:    addi a1, a1, -241
 ; LMULMAX2-RV32I-NEXT:    vand.vx v8, v8, a1
-; LMULMAX2-RV32I-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV32I-NEXT:    li a1, 257
 ; LMULMAX2-RV32I-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX2-RV32I-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX2-RV32I-NEXT:    vse16.v v8, (a0)
@@ -185,7 +185,7 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV64I:       # %bb.0:
 ; LMULMAX2-RV64I-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX2-RV64I-NEXT:    vle16.v v8, (a0)
-; LMULMAX2-RV64I-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV64I-NEXT:    li a1, 1
 ; LMULMAX2-RV64I-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX2-RV64I-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV64I-NEXT:    vand.vv v8, v8, v9
@@ -205,7 +205,7 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV64I-NEXT:    lui a1, 1
 ; LMULMAX2-RV64I-NEXT:    addiw a1, a1, -241
 ; LMULMAX2-RV64I-NEXT:    vand.vx v8, v8, a1
-; LMULMAX2-RV64I-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV64I-NEXT:    li a1, 257
 ; LMULMAX2-RV64I-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX2-RV64I-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX2-RV64I-NEXT:    vse16.v v8, (a0)
@@ -215,7 +215,7 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX1-RV32:       # %bb.0:
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vle16.v v8, (a0)
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 1
+; LMULMAX1-RV32-NEXT:    li a1, 1
 ; LMULMAX1-RV32-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV32-NEXT:    vand.vv v8, v8, v9
@@ -235,7 +235,7 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    lui a1, 1
 ; LMULMAX1-RV32-NEXT:    addi a1, a1, -241
 ; LMULMAX1-RV32-NEXT:    vand.vx v8, v8, a1
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 257
+; LMULMAX1-RV32-NEXT:    li a1, 257
 ; LMULMAX1-RV32-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV32-NEXT:    vse16.v v8, (a0)
@@ -245,7 +245,7 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX1-RV64:       # %bb.0:
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    vle16.v v8, (a0)
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 1
+; LMULMAX1-RV64-NEXT:    li a1, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v9
@@ -265,7 +265,7 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    lui a1, 1
 ; LMULMAX1-RV64-NEXT:    addiw a1, a1, -241
 ; LMULMAX1-RV64-NEXT:    vand.vx v8, v8, a1
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 257
+; LMULMAX1-RV64-NEXT:    li a1, 257
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV64-NEXT:    vse16.v v8, (a0)
@@ -279,10 +279,10 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV32D-NEXT:    vand.vv v9, v8, v9
 ; LMULMAX2-RV32D-NEXT:    vfwcvt.f.xu.v v10, v9
 ; LMULMAX2-RV32D-NEXT:    vnsrl.wi v9, v10, 23
-; LMULMAX2-RV32D-NEXT:    addi a1, zero, 127
+; LMULMAX2-RV32D-NEXT:    li a1, 127
 ; LMULMAX2-RV32D-NEXT:    vsub.vx v9, v9, a1
 ; LMULMAX2-RV32D-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX2-RV32D-NEXT:    addi a1, zero, 16
+; LMULMAX2-RV32D-NEXT:    li a1, 16
 ; LMULMAX2-RV32D-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX2-RV32D-NEXT:    vse16.v v8, (a0)
 ; LMULMAX2-RV32D-NEXT:    ret
@@ -295,10 +295,10 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX2-RV64D-NEXT:    vand.vv v9, v8, v9
 ; LMULMAX2-RV64D-NEXT:    vfwcvt.f.xu.v v10, v9
 ; LMULMAX2-RV64D-NEXT:    vnsrl.wi v9, v10, 23
-; LMULMAX2-RV64D-NEXT:    addi a1, zero, 127
+; LMULMAX2-RV64D-NEXT:    li a1, 127
 ; LMULMAX2-RV64D-NEXT:    vsub.vx v9, v9, a1
 ; LMULMAX2-RV64D-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX2-RV64D-NEXT:    addi a1, zero, 16
+; LMULMAX2-RV64D-NEXT:    li a1, 16
 ; LMULMAX2-RV64D-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX2-RV64D-NEXT:    vse16.v v8, (a0)
 ; LMULMAX2-RV64D-NEXT:    ret
@@ -311,10 +311,10 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vand.vv v9, v8, v9
 ; LMULMAX8-RV32-NEXT:    vfwcvt.f.xu.v v10, v9
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v9, v10, 23
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 127
+; LMULMAX8-RV32-NEXT:    li a1, 127
 ; LMULMAX8-RV32-NEXT:    vsub.vx v9, v9, a1
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 16
+; LMULMAX8-RV32-NEXT:    li a1, 16
 ; LMULMAX8-RV32-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX8-RV32-NEXT:    vse16.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    ret
@@ -327,10 +327,10 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vand.vv v9, v8, v9
 ; LMULMAX8-RV64-NEXT:    vfwcvt.f.xu.v v10, v9
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v9, v10, 23
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 127
+; LMULMAX8-RV64-NEXT:    li a1, 127
 ; LMULMAX8-RV64-NEXT:    vsub.vx v9, v9, a1
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 16
+; LMULMAX8-RV64-NEXT:    li a1, 16
 ; LMULMAX8-RV64-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX8-RV64-NEXT:    vse16.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    ret
@@ -347,7 +347,7 @@ define void @cttz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX2-RV32I:       # %bb.0:
 ; LMULMAX2-RV32I-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX2-RV32I-NEXT:    vle32.v v8, (a0)
-; LMULMAX2-RV32I-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV32I-NEXT:    li a1, 1
 ; LMULMAX2-RV32I-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX2-RV32I-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV32I-NEXT:    vand.vv v8, v8, v9
@@ -378,7 +378,7 @@ define void @cttz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX2-RV64I:       # %bb.0:
 ; LMULMAX2-RV64I-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX2-RV64I-NEXT:    vle32.v v8, (a0)
-; LMULMAX2-RV64I-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV64I-NEXT:    li a1, 1
 ; LMULMAX2-RV64I-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX2-RV64I-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV64I-NEXT:    vand.vv v8, v8, v9
@@ -409,7 +409,7 @@ define void @cttz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX1-RV32:       # %bb.0:
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vle32.v v8, (a0)
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 1
+; LMULMAX1-RV32-NEXT:    li a1, 1
 ; LMULMAX1-RV32-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV32-NEXT:    vand.vv v8, v8, v9
@@ -440,7 +440,7 @@ define void @cttz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX1-RV64:       # %bb.0:
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    vle32.v v8, (a0)
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 1
+; LMULMAX1-RV64-NEXT:    li a1, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v9
@@ -474,12 +474,12 @@ define void @cttz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX2-RV32D-NEXT:    vrsub.vi v9, v8, 0
 ; LMULMAX2-RV32D-NEXT:    vand.vv v9, v8, v9
 ; LMULMAX2-RV32D-NEXT:    vfwcvt.f.xu.v v10, v9
-; LMULMAX2-RV32D-NEXT:    addi a1, zero, 52
+; LMULMAX2-RV32D-NEXT:    li a1, 52
 ; LMULMAX2-RV32D-NEXT:    vnsrl.wx v9, v10, a1
-; LMULMAX2-RV32D-NEXT:    addi a1, zero, 1023
+; LMULMAX2-RV32D-NEXT:    li a1, 1023
 ; LMULMAX2-RV32D-NEXT:    vsub.vx v9, v9, a1
 ; LMULMAX2-RV32D-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX2-RV32D-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV32D-NEXT:    li a1, 32
 ; LMULMAX2-RV32D-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX2-RV32D-NEXT:    vse32.v v8, (a0)
 ; LMULMAX2-RV32D-NEXT:    ret
@@ -491,12 +491,12 @@ define void @cttz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX2-RV64D-NEXT:    vrsub.vi v9, v8, 0
 ; LMULMAX2-RV64D-NEXT:    vand.vv v9, v8, v9
 ; LMULMAX2-RV64D-NEXT:    vfwcvt.f.xu.v v10, v9
-; LMULMAX2-RV64D-NEXT:    addi a1, zero, 52
+; LMULMAX2-RV64D-NEXT:    li a1, 52
 ; LMULMAX2-RV64D-NEXT:    vnsrl.wx v9, v10, a1
-; LMULMAX2-RV64D-NEXT:    addi a1, zero, 1023
+; LMULMAX2-RV64D-NEXT:    li a1, 1023
 ; LMULMAX2-RV64D-NEXT:    vsub.vx v9, v9, a1
 ; LMULMAX2-RV64D-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX2-RV64D-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV64D-NEXT:    li a1, 32
 ; LMULMAX2-RV64D-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX2-RV64D-NEXT:    vse32.v v8, (a0)
 ; LMULMAX2-RV64D-NEXT:    ret
@@ -508,12 +508,12 @@ define void @cttz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vrsub.vi v9, v8, 0
 ; LMULMAX8-RV32-NEXT:    vand.vv v9, v8, v9
 ; LMULMAX8-RV32-NEXT:    vfwcvt.f.xu.v v10, v9
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 52
+; LMULMAX8-RV32-NEXT:    li a1, 52
 ; LMULMAX8-RV32-NEXT:    vnsrl.wx v9, v10, a1
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 1023
+; LMULMAX8-RV32-NEXT:    li a1, 1023
 ; LMULMAX8-RV32-NEXT:    vsub.vx v9, v9, a1
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV32-NEXT:    li a1, 32
 ; LMULMAX8-RV32-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX8-RV32-NEXT:    vse32.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    ret
@@ -525,12 +525,12 @@ define void @cttz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vrsub.vi v9, v8, 0
 ; LMULMAX8-RV64-NEXT:    vand.vv v9, v8, v9
 ; LMULMAX8-RV64-NEXT:    vfwcvt.f.xu.v v10, v9
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 52
+; LMULMAX8-RV64-NEXT:    li a1, 52
 ; LMULMAX8-RV64-NEXT:    vnsrl.wx v9, v10, a1
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 1023
+; LMULMAX8-RV64-NEXT:    li a1, 1023
 ; LMULMAX8-RV64-NEXT:    vsub.vx v9, v9, a1
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV64-NEXT:    li a1, 32
 ; LMULMAX8-RV64-NEXT:    vmerge.vxm v8, v9, a1, v0
 ; LMULMAX8-RV64-NEXT:    vse32.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    ret
@@ -547,7 +547,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX2-RV32:       # %bb.0:
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV32-NEXT:    li a1, 1
 ; LMULMAX2-RV32-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.v.i v10, -1
@@ -585,7 +585,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    vmv.v.x v9, a1
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmul.vv v8, v8, v9
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV32-NEXT:    li a1, 56
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX2-RV32-NEXT:    vse64.v v8, (a0)
 ; LMULMAX2-RV32-NEXT:    ret
@@ -594,7 +594,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX2-RV64:       # %bb.0:
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV64-NEXT:    li a1, 1
 ; LMULMAX2-RV64-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV64-NEXT:    vand.vv v8, v8, v9
@@ -639,7 +639,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    slli a1, a1, 16
 ; LMULMAX2-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX2-RV64-NEXT:    vmul.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV64-NEXT:    li a1, 56
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX2-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX2-RV64-NEXT:    ret
@@ -648,7 +648,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX1-RV32:       # %bb.0:
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 1
+; LMULMAX1-RV32-NEXT:    li a1, 1
 ; LMULMAX1-RV32-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.v.i v10, -1
@@ -686,7 +686,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vmv.v.x v9, a1
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmul.vv v8, v8, v9
-; LMULMAX1-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX1-RV32-NEXT:    li a1, 56
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX1-RV32-NEXT:    vse64.v v8, (a0)
 ; LMULMAX1-RV32-NEXT:    ret
@@ -695,7 +695,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX1-RV64:       # %bb.0:
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 1
+; LMULMAX1-RV64-NEXT:    li a1, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v9
@@ -740,7 +740,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    slli a1, a1, 16
 ; LMULMAX1-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a1
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX1-RV64-NEXT:    li a1, 56
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX1-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX1-RV64-NEXT:    ret
@@ -749,7 +749,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX8-RV32:       # %bb.0:
 ; LMULMAX8-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX8-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 1
+; LMULMAX8-RV32-NEXT:    li a1, 1
 ; LMULMAX8-RV32-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX8-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX8-RV32-NEXT:    vmv.v.i v10, -1
@@ -787,7 +787,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vmv.v.x v9, a1
 ; LMULMAX8-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX8-RV32-NEXT:    vmul.vv v8, v8, v9
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX8-RV32-NEXT:    li a1, 56
 ; LMULMAX8-RV32-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX8-RV32-NEXT:    vse64.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    ret
@@ -796,7 +796,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX8-RV64:       # %bb.0:
 ; LMULMAX8-RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX8-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 1
+; LMULMAX8-RV64-NEXT:    li a1, 1
 ; LMULMAX8-RV64-NEXT:    vsub.vx v9, v8, a1
 ; LMULMAX8-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX8-RV64-NEXT:    vand.vv v8, v8, v9
@@ -841,7 +841,7 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    slli a1, a1, 16
 ; LMULMAX8-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX8-RV64-NEXT:    vmul.vx v8, v8, a1
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX8-RV64-NEXT:    li a1, 56
 ; LMULMAX8-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX8-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    ret
@@ -856,18 +856,18 @@ declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1)
 define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX2-RV32-LABEL: cttz_v32i8:
 ; LMULMAX2-RV32:       # %bb.0:
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV32-NEXT:    li a1, 32
 ; LMULMAX2-RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle8.v v8, (a0)
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV32-NEXT:    li a1, 1
 ; LMULMAX2-RV32-NEXT:    vsub.vx v10, v8, a1
 ; LMULMAX2-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV32-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v10, v8, 1
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 85
+; LMULMAX2-RV32-NEXT:    li a1, 85
 ; LMULMAX2-RV32-NEXT:    vand.vx v10, v10, a1
 ; LMULMAX2-RV32-NEXT:    vsub.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 51
+; LMULMAX2-RV32-NEXT:    li a1, 51
 ; LMULMAX2-RV32-NEXT:    vand.vx v10, v8, a1
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX2-RV32-NEXT:    vand.vx v8, v8, a1
@@ -880,18 +880,18 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ;
 ; LMULMAX2-RV64-LABEL: cttz_v32i8:
 ; LMULMAX2-RV64:       # %bb.0:
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV64-NEXT:    li a1, 32
 ; LMULMAX2-RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle8.v v8, (a0)
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV64-NEXT:    li a1, 1
 ; LMULMAX2-RV64-NEXT:    vsub.vx v10, v8, a1
 ; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV64-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v10, v8, 1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 85
+; LMULMAX2-RV64-NEXT:    li a1, 85
 ; LMULMAX2-RV64-NEXT:    vand.vx v10, v10, a1
 ; LMULMAX2-RV64-NEXT:    vsub.vv v8, v8, v10
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 51
+; LMULMAX2-RV64-NEXT:    li a1, 51
 ; LMULMAX2-RV64-NEXT:    vand.vx v10, v8, a1
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX2-RV64-NEXT:    vand.vx v8, v8, a1
@@ -908,15 +908,15 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle8.v v8, (a1)
 ; LMULMAX1-RV32-NEXT:    vle8.v v9, (a0)
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 1
+; LMULMAX1-RV32-NEXT:    li a2, 1
 ; LMULMAX1-RV32-NEXT:    vsub.vx v10, v8, a2
 ; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV32-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v10, v8, 1
-; LMULMAX1-RV32-NEXT:    addi a3, zero, 85
+; LMULMAX1-RV32-NEXT:    li a3, 85
 ; LMULMAX1-RV32-NEXT:    vand.vx v10, v10, a3
 ; LMULMAX1-RV32-NEXT:    vsub.vv v8, v8, v10
-; LMULMAX1-RV32-NEXT:    addi a4, zero, 51
+; LMULMAX1-RV32-NEXT:    li a4, 51
 ; LMULMAX1-RV32-NEXT:    vand.vx v10, v8, a4
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX1-RV32-NEXT:    vand.vx v8, v8, a4
@@ -947,15 +947,15 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle8.v v8, (a1)
 ; LMULMAX1-RV64-NEXT:    vle8.v v9, (a0)
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 1
+; LMULMAX1-RV64-NEXT:    li a2, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v10, v8, a2
 ; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v10
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v10, v8, 1
-; LMULMAX1-RV64-NEXT:    addi a3, zero, 85
+; LMULMAX1-RV64-NEXT:    li a3, 85
 ; LMULMAX1-RV64-NEXT:    vand.vx v10, v10, a3
 ; LMULMAX1-RV64-NEXT:    vsub.vv v8, v8, v10
-; LMULMAX1-RV64-NEXT:    addi a4, zero, 51
+; LMULMAX1-RV64-NEXT:    li a4, 51
 ; LMULMAX1-RV64-NEXT:    vand.vx v10, v8, a4
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v8, v8, 2
 ; LMULMAX1-RV64-NEXT:    vand.vx v8, v8, a4
@@ -982,7 +982,7 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ;
 ; LMULMAX8-RV32-LABEL: cttz_v32i8:
 ; LMULMAX8-RV32:       # %bb.0:
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV32-NEXT:    li a1, 32
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX8-RV32-NEXT:    vle8.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    vrsub.vi v10, v8, 0
@@ -994,7 +994,7 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v12, v16, 23
 ; LMULMAX8-RV32-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v10, v12, 0
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 127
+; LMULMAX8-RV32-NEXT:    li a1, 127
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV32-NEXT:    vsub.vx v8, v10, a1
 ; LMULMAX8-RV32-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -1003,7 +1003,7 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ;
 ; LMULMAX8-RV64-LABEL: cttz_v32i8:
 ; LMULMAX8-RV64:       # %bb.0:
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV64-NEXT:    li a1, 32
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX8-RV64-NEXT:    vle8.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    vrsub.vi v10, v8, 0
@@ -1015,7 +1015,7 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v12, v16, 23
 ; LMULMAX8-RV64-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v10, v12, 0
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 127
+; LMULMAX8-RV64-NEXT:    li a1, 127
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
 ; LMULMAX8-RV64-NEXT:    vsub.vx v8, v10, a1
 ; LMULMAX8-RV64-NEXT:    vmerge.vim v8, v8, 8, v0
@@ -1034,7 +1034,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX2-RV32:       # %bb.0:
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle16.v v8, (a0)
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV32-NEXT:    li a1, 1
 ; LMULMAX2-RV32-NEXT:    vsub.vx v10, v8, a1
 ; LMULMAX2-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV32-NEXT:    vand.vv v8, v8, v10
@@ -1054,7 +1054,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    lui a1, 1
 ; LMULMAX2-RV32-NEXT:    addi a1, a1, -241
 ; LMULMAX2-RV32-NEXT:    vand.vx v8, v8, a1
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV32-NEXT:    li a1, 257
 ; LMULMAX2-RV32-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX2-RV32-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX2-RV32-NEXT:    vse16.v v8, (a0)
@@ -1064,7 +1064,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX2-RV64:       # %bb.0:
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle16.v v8, (a0)
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV64-NEXT:    li a1, 1
 ; LMULMAX2-RV64-NEXT:    vsub.vx v10, v8, a1
 ; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV64-NEXT:    vand.vv v8, v8, v10
@@ -1084,7 +1084,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    lui a1, 1
 ; LMULMAX2-RV64-NEXT:    addiw a1, a1, -241
 ; LMULMAX2-RV64-NEXT:    vand.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV64-NEXT:    li a1, 257
 ; LMULMAX2-RV64-NEXT:    vmul.vx v8, v8, a1
 ; LMULMAX2-RV64-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX2-RV64-NEXT:    vse16.v v8, (a0)
@@ -1096,7 +1096,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle16.v v8, (a1)
 ; LMULMAX1-RV32-NEXT:    vle16.v v9, (a0)
-; LMULMAX1-RV32-NEXT:    addi a6, zero, 1
+; LMULMAX1-RV32-NEXT:    li a6, 1
 ; LMULMAX1-RV32-NEXT:    vsub.vx v10, v8, a6
 ; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV32-NEXT:    vand.vv v8, v8, v10
@@ -1116,7 +1116,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    lui a5, 1
 ; LMULMAX1-RV32-NEXT:    addi a5, a5, -241
 ; LMULMAX1-RV32-NEXT:    vand.vx v8, v8, a5
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 257
+; LMULMAX1-RV32-NEXT:    li a2, 257
 ; LMULMAX1-RV32-NEXT:    vmul.vx v8, v8, a2
 ; LMULMAX1-RV32-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV32-NEXT:    vsub.vx v10, v9, a6
@@ -1144,7 +1144,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle16.v v8, (a1)
 ; LMULMAX1-RV64-NEXT:    vle16.v v9, (a0)
-; LMULMAX1-RV64-NEXT:    addi a6, zero, 1
+; LMULMAX1-RV64-NEXT:    li a6, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v10, v8, a6
 ; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v10
@@ -1164,7 +1164,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    lui a5, 1
 ; LMULMAX1-RV64-NEXT:    addiw a5, a5, -241
 ; LMULMAX1-RV64-NEXT:    vand.vx v8, v8, a5
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 257
+; LMULMAX1-RV64-NEXT:    li a2, 257
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a2
 ; LMULMAX1-RV64-NEXT:    vsrl.vi v8, v8, 8
 ; LMULMAX1-RV64-NEXT:    vsub.vx v10, v9, a6
@@ -1194,10 +1194,10 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vand.vv v10, v8, v10
 ; LMULMAX8-RV32-NEXT:    vfwcvt.f.xu.v v12, v10
 ; LMULMAX8-RV32-NEXT:    vnsrl.wi v10, v12, 23
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 127
+; LMULMAX8-RV32-NEXT:    li a1, 127
 ; LMULMAX8-RV32-NEXT:    vsub.vx v10, v10, a1
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 16
+; LMULMAX8-RV32-NEXT:    li a1, 16
 ; LMULMAX8-RV32-NEXT:    vmerge.vxm v8, v10, a1, v0
 ; LMULMAX8-RV32-NEXT:    vse16.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    ret
@@ -1210,10 +1210,10 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vand.vv v10, v8, v10
 ; LMULMAX8-RV64-NEXT:    vfwcvt.f.xu.v v12, v10
 ; LMULMAX8-RV64-NEXT:    vnsrl.wi v10, v12, 23
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 127
+; LMULMAX8-RV64-NEXT:    li a1, 127
 ; LMULMAX8-RV64-NEXT:    vsub.vx v10, v10, a1
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 16
+; LMULMAX8-RV64-NEXT:    li a1, 16
 ; LMULMAX8-RV64-NEXT:    vmerge.vxm v8, v10, a1, v0
 ; LMULMAX8-RV64-NEXT:    vse16.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    ret
@@ -1230,7 +1230,7 @@ define void @cttz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX2-RV32:       # %bb.0:
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle32.v v8, (a0)
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV32-NEXT:    li a1, 1
 ; LMULMAX2-RV32-NEXT:    vsub.vx v10, v8, a1
 ; LMULMAX2-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV32-NEXT:    vand.vv v8, v8, v10
@@ -1261,7 +1261,7 @@ define void @cttz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX2-RV64:       # %bb.0:
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle32.v v8, (a0)
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV64-NEXT:    li a1, 1
 ; LMULMAX2-RV64-NEXT:    vsub.vx v10, v8, a1
 ; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV64-NEXT:    vand.vv v8, v8, v10
@@ -1294,7 +1294,7 @@ define void @cttz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v8, (a1)
 ; LMULMAX1-RV32-NEXT:    vle32.v v9, (a0)
-; LMULMAX1-RV32-NEXT:    addi a6, zero, 1
+; LMULMAX1-RV32-NEXT:    li a6, 1
 ; LMULMAX1-RV32-NEXT:    vsub.vx v10, v8, a6
 ; LMULMAX1-RV32-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV32-NEXT:    vand.vv v8, v8, v10
@@ -1343,7 +1343,7 @@ define void @cttz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v8, (a1)
 ; LMULMAX1-RV64-NEXT:    vle32.v v9, (a0)
-; LMULMAX1-RV64-NEXT:    addi a6, zero, 1
+; LMULMAX1-RV64-NEXT:    li a6, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v10, v8, a6
 ; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v10
@@ -1393,12 +1393,12 @@ define void @cttz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vrsub.vi v10, v8, 0
 ; LMULMAX8-RV32-NEXT:    vand.vv v10, v8, v10
 ; LMULMAX8-RV32-NEXT:    vfwcvt.f.xu.v v12, v10
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 52
+; LMULMAX8-RV32-NEXT:    li a1, 52
 ; LMULMAX8-RV32-NEXT:    vnsrl.wx v10, v12, a1
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 1023
+; LMULMAX8-RV32-NEXT:    li a1, 1023
 ; LMULMAX8-RV32-NEXT:    vsub.vx v10, v10, a1
 ; LMULMAX8-RV32-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV32-NEXT:    li a1, 32
 ; LMULMAX8-RV32-NEXT:    vmerge.vxm v8, v10, a1, v0
 ; LMULMAX8-RV32-NEXT:    vse32.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    ret
@@ -1410,12 +1410,12 @@ define void @cttz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    vrsub.vi v10, v8, 0
 ; LMULMAX8-RV64-NEXT:    vand.vv v10, v8, v10
 ; LMULMAX8-RV64-NEXT:    vfwcvt.f.xu.v v12, v10
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 52
+; LMULMAX8-RV64-NEXT:    li a1, 52
 ; LMULMAX8-RV64-NEXT:    vnsrl.wx v10, v12, a1
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 1023
+; LMULMAX8-RV64-NEXT:    li a1, 1023
 ; LMULMAX8-RV64-NEXT:    vsub.vx v10, v10, a1
 ; LMULMAX8-RV64-NEXT:    vmseq.vi v0, v8, 0
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX8-RV64-NEXT:    li a1, 32
 ; LMULMAX8-RV64-NEXT:    vmerge.vxm v8, v10, a1, v0
 ; LMULMAX8-RV64-NEXT:    vse32.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    ret
@@ -1432,7 +1432,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX2-RV32:       # %bb.0:
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV32-NEXT:    li a1, 1
 ; LMULMAX2-RV32-NEXT:    vsub.vx v10, v8, a1
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.v.i v12, -1
@@ -1470,7 +1470,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX2-RV32-NEXT:    vmv.v.x v10, a1
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmul.vv v8, v8, v10
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV32-NEXT:    li a1, 56
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX2-RV32-NEXT:    vse64.v v8, (a0)
 ; LMULMAX2-RV32-NEXT:    ret
@@ -1479,7 +1479,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX2-RV64:       # %bb.0:
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 1
+; LMULMAX2-RV64-NEXT:    li a1, 1
 ; LMULMAX2-RV64-NEXT:    vsub.vx v10, v8, a1
 ; LMULMAX2-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX2-RV64-NEXT:    vand.vv v8, v8, v10
@@ -1524,7 +1524,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX2-RV64-NEXT:    slli a1, a1, 16
 ; LMULMAX2-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX2-RV64-NEXT:    vmul.vx v8, v8, a1
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX2-RV64-NEXT:    li a1, 56
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX2-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX2-RV64-NEXT:    ret
@@ -1535,7 +1535,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v8, (a1)
 ; LMULMAX1-RV32-NEXT:    vle64.v v9, (a0)
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 1
+; LMULMAX1-RV32-NEXT:    li a2, 1
 ; LMULMAX1-RV32-NEXT:    vsub.vx v10, v8, a2
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.v.i v11, -1
@@ -1573,7 +1573,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX1-RV32-NEXT:    vmv.v.x v14, a3
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmul.vv v8, v8, v14
-; LMULMAX1-RV32-NEXT:    addi a3, zero, 56
+; LMULMAX1-RV32-NEXT:    li a3, 56
 ; LMULMAX1-RV32-NEXT:    vsrl.vx v8, v8, a3
 ; LMULMAX1-RV32-NEXT:    vsub.vx v15, v9, a2
 ; LMULMAX1-RV32-NEXT:    vxor.vv v9, v9, v11
@@ -1600,7 +1600,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    addi a7, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v8, (a7)
 ; LMULMAX1-RV64-NEXT:    vle64.v v9, (a0)
-; LMULMAX1-RV64-NEXT:    addi a6, zero, 1
+; LMULMAX1-RV64-NEXT:    li a6, 1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v10, v8, a6
 ; LMULMAX1-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX1-RV64-NEXT:    vand.vv v8, v8, v10
@@ -1645,7 +1645,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX1-RV64-NEXT:    slli a2, a2, 16
 ; LMULMAX1-RV64-NEXT:    addi a2, a2, 257
 ; LMULMAX1-RV64-NEXT:    vmul.vx v8, v8, a2
-; LMULMAX1-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX1-RV64-NEXT:    li a1, 56
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX1-RV64-NEXT:    vsub.vx v10, v9, a6
 ; LMULMAX1-RV64-NEXT:    vxor.vi v9, v9, -1
@@ -1670,7 +1670,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX8-RV32:       # %bb.0:
 ; LMULMAX8-RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX8-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 1
+; LMULMAX8-RV32-NEXT:    li a1, 1
 ; LMULMAX8-RV32-NEXT:    vsub.vx v10, v8, a1
 ; LMULMAX8-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX8-RV32-NEXT:    vmv.v.i v12, -1
@@ -1708,7 +1708,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX8-RV32-NEXT:    vmv.v.x v10, a1
 ; LMULMAX8-RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX8-RV32-NEXT:    vmul.vv v8, v8, v10
-; LMULMAX8-RV32-NEXT:    addi a1, zero, 56
+; LMULMAX8-RV32-NEXT:    li a1, 56
 ; LMULMAX8-RV32-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX8-RV32-NEXT:    vse64.v v8, (a0)
 ; LMULMAX8-RV32-NEXT:    ret
@@ -1717,7 +1717,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX8-RV64:       # %bb.0:
 ; LMULMAX8-RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX8-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 1
+; LMULMAX8-RV64-NEXT:    li a1, 1
 ; LMULMAX8-RV64-NEXT:    vsub.vx v10, v8, a1
 ; LMULMAX8-RV64-NEXT:    vxor.vi v8, v8, -1
 ; LMULMAX8-RV64-NEXT:    vand.vv v8, v8, v10
@@ -1762,7 +1762,7 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind {
 ; LMULMAX8-RV64-NEXT:    slli a1, a1, 16
 ; LMULMAX8-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX8-RV64-NEXT:    vmul.vx v8, v8, a1
-; LMULMAX8-RV64-NEXT:    addi a1, zero, 56
+; LMULMAX8-RV64-NEXT:    li a1, 56
 ; LMULMAX8-RV64-NEXT:    vsrl.vx v8, v8, a1
 ; LMULMAX8-RV64-NEXT:    vse64.v v8, (a0)
 ; LMULMAX8-RV64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
index 341a06c03613..f68b8f9049b0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
@@ -94,7 +94,7 @@ define i1 @extractelt_v16i1(<16 x i8>* %x, i64 %idx) nounwind {
 define i1 @extractelt_v32i1(<32 x i8>* %x, i64 %idx) nounwind {
 ; CHECK-LABEL: extractelt_v32i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
@@ -113,7 +113,7 @@ define i1 @extractelt_v32i1(<32 x i8>* %x, i64 %idx) nounwind {
 define i1 @extractelt_v64i1(<64 x i8>* %x, i64 %idx) nounwind {
 ; CHECK-LABEL: extractelt_v64i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
@@ -132,7 +132,7 @@ define i1 @extractelt_v64i1(<64 x i8>* %x, i64 %idx) nounwind {
 define i1 @extractelt_v128i1(<128 x i8>* %x, i64 %idx) nounwind {
 ; CHECK-LABEL: extractelt_v128i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 128
+; CHECK-NEXT:    li a2, 128
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
@@ -158,7 +158,7 @@ define i1 @extractelt_v256i1(<256 x i8>* %x, i64 %idx) nounwind {
 ; RV32-NEXT:    andi sp, sp, -128
 ; RV32-NEXT:    andi a1, a1, 255
 ; RV32-NEXT:    addi a2, a0, 128
-; RV32-NEXT:    addi a3, zero, 128
+; RV32-NEXT:    li a3, 128
 ; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, mu
 ; RV32-NEXT:    vle8.v v16, (a0)
 ; RV32-NEXT:    vle8.v v24, (a2)
@@ -190,7 +190,7 @@ define i1 @extractelt_v256i1(<256 x i8>* %x, i64 %idx) nounwind {
 ; RV64-NEXT:    andi sp, sp, -128
 ; RV64-NEXT:    andi a1, a1, 255
 ; RV64-NEXT:    addi a2, a0, 128
-; RV64-NEXT:    addi a3, zero, 128
+; RV64-NEXT:    li a3, 128
 ; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, mu
 ; RV64-NEXT:    vle8.v v16, (a0)
 ; RV64-NEXT:    vle8.v v24, (a2)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
index 690da3979a00..633251e96362 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
@@ -212,7 +212,7 @@ define void @extract_v8i32_nxv16i32_8(<vscale x 16 x i32> %x, <8 x i32>* %y) {
 define void @extract_v8i1_v64i1_0(<64 x i1>* %x, <8 x i1>* %y) {
 ; LMULMAX2-LABEL: extract_v8i1_v64i1_0:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vlm.v v8, (a0)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -235,7 +235,7 @@ define void @extract_v8i1_v64i1_0(<64 x i1>* %x, <8 x i1>* %y) {
 define void @extract_v8i1_v64i1_8(<64 x i1>* %x, <8 x i1>* %y) {
 ; LMULMAX2-LABEL: extract_v8i1_v64i1_8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vlm.v v8, (a0)
 ; LMULMAX2-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
@@ -263,7 +263,7 @@ define void @extract_v8i1_v64i1_48(<64 x i1>* %x, <8 x i1>* %y) {
 ; LMULMAX2-LABEL: extract_v8i1_v64i1_48:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    addi a0, a0, 4
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vlm.v v8, (a0)
 ; LMULMAX2-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
@@ -338,7 +338,7 @@ define void @extract_v8i1_nxv64i1_48(<vscale x 64 x i1> %x, <8 x i1>* %y) {
 define void @extract_v2i1_v64i1_0(<64 x i1>* %x, <2 x i1>* %y) {
 ; LMULMAX2-LABEL: extract_v2i1_v64i1_0:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vlm.v v0, (a0)
 ; LMULMAX2-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
@@ -377,7 +377,7 @@ define void @extract_v2i1_v64i1_0(<64 x i1>* %x, <2 x i1>* %y) {
 define void @extract_v2i1_v64i1_2(<64 x i1>* %x, <2 x i1>* %y) {
 ; LMULMAX2-LABEL: extract_v2i1_v64i1_2:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vlm.v v0, (a0)
 ; LMULMAX2-NEXT:    vmv.v.i v8, 0
@@ -427,7 +427,7 @@ define void @extract_v2i1_v64i1_42(<64 x i1>* %x, <2 x i1>* %y) {
 ; LMULMAX2-LABEL: extract_v2i1_v64i1_42:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    addi a0, a0, 4
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vlm.v v0, (a0)
 ; LMULMAX2-NEXT:    vmv.v.i v8, 0
@@ -568,7 +568,7 @@ define void @extract_v2i1_nxv64i1_42(<vscale x 64 x i1> %x, <2 x i1>* %y) {
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    addi a1, zero, 42
+; CHECK-NEXT:    li a1, 42
 ; CHECK-NEXT:    vsetivli zero, 2, e8, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
index 37e1f229e203..3ac59d78f773 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -49,7 +49,7 @@ define i64 @extractelt_v2i64(<2 x i64>* %x) nounwind {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
@@ -110,7 +110,7 @@ define double @extractelt_v2f64(<2 x double>* %x) nounwind {
 define i8 @extractelt_v32i8(<32 x i8>* %x) nounwind {
 ; CHECK-LABEL: extractelt_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m2, ta, mu
@@ -158,7 +158,7 @@ define i64 @extractelt_v4i64(<4 x i64>* %x) nounwind {
 ; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, mu
 ; RV32-NEXT:    vslidedown.vi v8, v8, 3
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    ret
@@ -298,7 +298,7 @@ define i64 @extractelt_v2i64_idx(<2 x i64>* %x, i32 signext %idx) nounwind {
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vslidedown.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    ret
@@ -369,7 +369,7 @@ define double @extractelt_v2f64_idx(<2 x double>* %x, i32 signext %idx) nounwind
 define i8 @extractelt_v32i8_idx(<32 x i8>* %x, i32 signext %idx) nounwind {
 ; CHECK-LABEL: extractelt_v32i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m2, ta, mu
@@ -420,7 +420,7 @@ define i64 @extractelt_v4i64_idx(<4 x i64>* %x, i32 signext %idx) nounwind {
 ; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, mu
 ; RV32-NEXT:    vslidedown.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    ret
@@ -575,7 +575,7 @@ define void @store_extractelt_v4i64(<2 x i64>* %x, i64* %p) nounwind {
 ; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a0, v9
 ; RV32-NEXT:    vmv.x.s a2, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
index d1525447c40d..51bdb54fe3de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
@@ -80,7 +80,7 @@ define float @bitcast_v1f32_f32(<1 x float> %a) {
 define i64 @bitcast_v4f16_i64(<4 x half> %a) {
 ; RV32-FP-LABEL: bitcast_v4f16_i64:
 ; RV32-FP:       # %bb.0:
-; RV32-FP-NEXT:    addi a0, zero, 32
+; RV32-FP-NEXT:    li a0, 32
 ; RV32-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-FP-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-FP-NEXT:    vmv.x.s a1, v9
@@ -99,7 +99,7 @@ define i64 @bitcast_v4f16_i64(<4 x half> %a) {
 define i64 @bitcast_v2f32_i64(<2 x float> %a) {
 ; RV32-FP-LABEL: bitcast_v2f32_i64:
 ; RV32-FP:       # %bb.0:
-; RV32-FP-NEXT:    addi a0, zero, 32
+; RV32-FP-NEXT:    li a0, 32
 ; RV32-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-FP-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-FP-NEXT:    vmv.x.s a1, v9
@@ -118,7 +118,7 @@ define i64 @bitcast_v2f32_i64(<2 x float> %a) {
 define i64 @bitcast_v1f64_i64(<1 x double> %a) {
 ; RV32-FP-LABEL: bitcast_v1f64_i64:
 ; RV32-FP:       # %bb.0:
-; RV32-FP-NEXT:    addi a0, zero, 32
+; RV32-FP-NEXT:    li a0, 32
 ; RV32-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-FP-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-FP-NEXT:    vmv.x.s a1, v9

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
index eba7f00807f6..717824031e17 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
@@ -57,19 +57,19 @@ define <4 x float> @hang_when_merging_stores_after_legalization(<8 x float> %x,
 ;
 ; LMULMAX2-LABEL: hang_when_merging_stores_after_legalization:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a0, zero, 2
+; LMULMAX2-NEXT:    li a0, 2
 ; LMULMAX2-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-NEXT:    vmv.s.x v0, a0
 ; LMULMAX2-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX2-NEXT:    vrgather.vi v12, v8, 0
 ; LMULMAX2-NEXT:    vrgather.vi v12, v9, 3, v0.t
-; LMULMAX2-NEXT:    addi a0, zero, 8
+; LMULMAX2-NEXT:    li a0, 8
 ; LMULMAX2-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-NEXT:    vmv.s.x v0, a0
 ; LMULMAX2-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX2-NEXT:    vrgather.vi v8, v10, 0
 ; LMULMAX2-NEXT:    vrgather.vi v8, v11, 3, v0.t
-; LMULMAX2-NEXT:    addi a0, zero, 3
+; LMULMAX2-NEXT:    li a0, 3
 ; LMULMAX2-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-NEXT:    vmv.s.x v0, a0
 ; LMULMAX2-NEXT:    vsetivli zero, 4, e32, m1, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
index 9999efbe3380..881511728225 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll
@@ -251,7 +251,7 @@ define void @fcmp_ole_vv_v4f64_nonans(<4 x double>* %x, <4 x double>* %y, <4 x i
 define void @fcmp_ule_vv_v32f16(<32 x half>* %x, <32 x half>* %y, <32 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ule_vv_v32f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vle16.v v12, (a1)
@@ -269,7 +269,7 @@ define void @fcmp_ule_vv_v32f16(<32 x half>* %x, <32 x half>* %y, <32 x i1>* %z)
 define void @fcmp_ule_vv_v32f16_nonans(<32 x half>* %x, <32 x half>* %y, <32 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ule_vv_v32f16_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vle16.v v12, (a1)
@@ -352,7 +352,7 @@ define void @fcmp_ult_vv_v8f64_nonans(<8 x double>* %x, <8 x double>* %y, <8 x i
 define void @fcmp_ugt_vv_v64f16(<64 x half>* %x, <64 x half>* %y, <64 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ugt_vv_v64f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vle16.v v16, (a1)
@@ -370,7 +370,7 @@ define void @fcmp_ugt_vv_v64f16(<64 x half>* %x, <64 x half>* %y, <64 x i1>* %z)
 define void @fcmp_ugt_vv_v64f16_nonans(<64 x half>* %x, <64 x half>* %y, <64 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ugt_vv_v64f16_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vle16.v v16, (a1)
@@ -387,7 +387,7 @@ define void @fcmp_ugt_vv_v64f16_nonans(<64 x half>* %x, <64 x half>* %y, <64 x i
 define void @fcmp_ueq_vv_v32f32(<32 x float>* %x, <32 x float>* %y, <32 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ueq_vv_v32f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vle32.v v16, (a1)
@@ -406,7 +406,7 @@ define void @fcmp_ueq_vv_v32f32(<32 x float>* %x, <32 x float>* %y, <32 x i1>* %
 define void @fcmp_ueq_vv_v32f32_nonans(<32 x float>* %x, <32 x float>* %y, <32 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ueq_vv_v32f32_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vle32.v v16, (a1)
@@ -757,7 +757,7 @@ define void @fcmp_ole_vf_v4f64_nonans(<4 x double>* %x, double %y, <4 x i1>* %z)
 define void @fcmp_ule_vf_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ule_vf_v32f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vmfgt.vf v12, v8, fa0
@@ -775,7 +775,7 @@ define void @fcmp_ule_vf_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) {
 define void @fcmp_ule_vf_v32f16_nonans(<32 x half>* %x, half %y, <32 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ule_vf_v32f16_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vmfle.vf v12, v8, fa0
@@ -858,7 +858,7 @@ define void @fcmp_ult_vf_v8f64_nonans(<8 x double>* %x, double %y, <8 x i1>* %z)
 define void @fcmp_ugt_vf_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ugt_vf_v64f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vmfle.vf v16, v8, fa0
@@ -876,7 +876,7 @@ define void @fcmp_ugt_vf_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) {
 define void @fcmp_ugt_vf_v64f16_nonans(<64 x half>* %x, half %y, <64 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ugt_vf_v64f16_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vmfgt.vf v16, v8, fa0
@@ -893,7 +893,7 @@ define void @fcmp_ugt_vf_v64f16_nonans(<64 x half>* %x, half %y, <64 x i1>* %z)
 define void @fcmp_ueq_vf_v32f32(<32 x float>* %x, float %y, <32 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ueq_vf_v32f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vmflt.vf v16, v8, fa0
@@ -912,7 +912,7 @@ define void @fcmp_ueq_vf_v32f32(<32 x float>* %x, float %y, <32 x i1>* %z) {
 define void @fcmp_ueq_vf_v32f32_nonans(<32 x float>* %x, float %y, <32 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ueq_vf_v32f32_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vmfeq.vf v16, v8, fa0
@@ -1265,7 +1265,7 @@ define void @fcmp_ole_fv_v4f64_nonans(<4 x double>* %x, double %y, <4 x i1>* %z)
 define void @fcmp_ule_fv_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ule_fv_v32f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vmflt.vf v12, v8, fa0
@@ -1283,7 +1283,7 @@ define void @fcmp_ule_fv_v32f16(<32 x half>* %x, half %y, <32 x i1>* %z) {
 define void @fcmp_ule_fv_v32f16_nonans(<32 x half>* %x, half %y, <32 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ule_fv_v32f16_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vmfge.vf v12, v8, fa0
@@ -1366,7 +1366,7 @@ define void @fcmp_ult_fv_v8f64_nonans(<8 x double>* %x, double %y, <8 x i1>* %z)
 define void @fcmp_ugt_fv_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ugt_fv_v64f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vmfge.vf v16, v8, fa0
@@ -1384,7 +1384,7 @@ define void @fcmp_ugt_fv_v64f16(<64 x half>* %x, half %y, <64 x i1>* %z) {
 define void @fcmp_ugt_fv_v64f16_nonans(<64 x half>* %x, half %y, <64 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ugt_fv_v64f16_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vmflt.vf v16, v8, fa0
@@ -1401,7 +1401,7 @@ define void @fcmp_ugt_fv_v64f16_nonans(<64 x half>* %x, half %y, <64 x i1>* %z)
 define void @fcmp_ueq_fv_v32f32(<32 x float>* %x, float %y, <32 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ueq_fv_v32f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vmfgt.vf v16, v8, fa0
@@ -1420,7 +1420,7 @@ define void @fcmp_ueq_fv_v32f32(<32 x float>* %x, float %y, <32 x i1>* %z) {
 define void @fcmp_ueq_fv_v32f32_nonans(<32 x float>* %x, float %y, <32 x i1>* %z) {
 ; CHECK-LABEL: fcmp_ueq_fv_v32f32_nonans:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vmfeq.vf v16, v8, fa0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
index dfbae3f9c9ee..3118a88818ce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
@@ -5,7 +5,7 @@
 define <4 x half> @shuffle_v4f16(<4 x half> %x, <4 x half> %y) {
 ; CHECK-LABEL: shuffle_v4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 11
+; CHECK-NEXT:    li a0, 11
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
@@ -18,7 +18,7 @@ define <4 x half> @shuffle_v4f16(<4 x half> %x, <4 x half> %y) {
 define <8 x float> @shuffle_v8f32(<8 x float> %x, <8 x float> %y) {
 ; CHECK-LABEL: shuffle_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 236
+; CHECK-NEXT:    li a0, 236
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
@@ -31,7 +31,7 @@ define <8 x float> @shuffle_v8f32(<8 x float> %x, <8 x float> %y) {
 define <4 x double> @shuffle_fv_v4f64(<4 x double> %x) {
 ; RV32-LABEL: shuffle_fv_v4f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 9
+; RV32-NEXT:    li a0, 9
 ; RV32-NEXT:    lui a1, %hi(.LCPI2_0)
 ; RV32-NEXT:    fld ft0, %lo(.LCPI2_0)(a1)
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
@@ -44,7 +44,7 @@ define <4 x double> @shuffle_fv_v4f64(<4 x double> %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI2_0)
 ; RV64-NEXT:    fld ft0, %lo(.LCPI2_0)(a0)
-; RV64-NEXT:    addi a0, zero, 9
+; RV64-NEXT:    li a0, 9
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
@@ -57,7 +57,7 @@ define <4 x double> @shuffle_fv_v4f64(<4 x double> %x) {
 define <4 x double> @shuffle_vf_v4f64(<4 x double> %x) {
 ; RV32-LABEL: shuffle_vf_v4f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 6
+; RV32-NEXT:    li a0, 6
 ; RV32-NEXT:    lui a1, %hi(.LCPI3_0)
 ; RV32-NEXT:    fld ft0, %lo(.LCPI3_0)(a1)
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
@@ -70,7 +70,7 @@ define <4 x double> @shuffle_vf_v4f64(<4 x double> %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a0, %hi(.LCPI3_0)
 ; RV64-NEXT:    fld ft0, %lo(.LCPI3_0)(a0)
-; RV64-NEXT:    addi a0, zero, 6
+; RV64-NEXT:    li a0, 6
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
@@ -139,7 +139,7 @@ define <4 x double> @vrgather_shuffle_vv_v4f64(<4 x double> %x, <4 x double> %y)
 ; RV32-NEXT:    vle16.v v14, (a0)
 ; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32-NEXT:    vrgatherei16.vv v12, v8, v14
-; RV32-NEXT:    addi a0, zero, 8
+; RV32-NEXT:    li a0, 8
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
@@ -154,7 +154,7 @@ define <4 x double> @vrgather_shuffle_vv_v4f64(<4 x double> %x, <4 x double> %y)
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-NEXT:    vle64.v v14, (a0)
 ; RV64-NEXT:    vrgather.vv v12, v8, v14
-; RV64-NEXT:    addi a0, zero, 8
+; RV64-NEXT:    li a0, 8
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
@@ -168,7 +168,7 @@ define <4 x double> @vrgather_shuffle_vv_v4f64(<4 x double> %x, <4 x double> %y)
 define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) {
 ; RV32-LABEL: vrgather_shuffle_xv_v4f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 12
+; RV32-NEXT:    li a0, 12
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
@@ -184,7 +184,7 @@ define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) {
 ;
 ; RV64-LABEL: vrgather_shuffle_xv_v4f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 12
+; RV64-NEXT:    li a0, 12
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
@@ -205,7 +205,7 @@ define <4 x double> @vrgather_shuffle_vx_v4f64(<4 x double> %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; RV32-NEXT:    vid.v v10
-; RV32-NEXT:    addi a0, zero, 3
+; RV32-NEXT:    li a0, 3
 ; RV32-NEXT:    vmul.vx v12, v10, a0
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a0
@@ -221,7 +221,7 @@ define <4 x double> @vrgather_shuffle_vx_v4f64(<4 x double> %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-NEXT:    vid.v v10
-; RV64-NEXT:    addi a0, zero, 3
+; RV64-NEXT:    li a0, 3
 ; RV64-NEXT:    vmul.vx v12, v10, a0
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV64-NEXT:    vmv.s.x v0, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
index eab91b5fcca4..2c2b0d4b1a13 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
@@ -55,7 +55,7 @@ define void @gather_const_v64f16(<64 x half>* %x) {
 ; LMULMAX8-LABEL: gather_const_v64f16:
 ; LMULMAX8:       # %bb.0:
 ; LMULMAX8-NEXT:    addi a1, a0, 94
-; LMULMAX8-NEXT:    addi a2, zero, 64
+; LMULMAX8-NEXT:    li a2, 64
 ; LMULMAX8-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
 ; LMULMAX8-NEXT:    vlse16.v v8, (a1), zero
 ; LMULMAX8-NEXT:    vse16.v v8, (a0)
@@ -94,7 +94,7 @@ define void @gather_const_v32f32(<32 x float>* %x) {
 ; LMULMAX8-LABEL: gather_const_v32f32:
 ; LMULMAX8:       # %bb.0:
 ; LMULMAX8-NEXT:    addi a1, a0, 68
-; LMULMAX8-NEXT:    addi a2, zero, 32
+; LMULMAX8-NEXT:    li a2, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; LMULMAX8-NEXT:    vlse32.v v8, (a1), zero
 ; LMULMAX8-NEXT:    vse32.v v8, (a0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
index 613433c3cd26..b37572025643 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
@@ -156,7 +156,7 @@ define <8 x i1> @insertelt_idx_v8i1(<8 x i1> %x, i1 %elt, i32 zeroext %idx) noun
 define <64 x i1> @insertelt_v64i1(<64 x i1> %x, i1 %elt) nounwind {
 ; CHECK-LABEL: insertelt_v64i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v12, 0
@@ -174,7 +174,7 @@ define <64 x i1> @insertelt_v64i1(<64 x i1> %x, i1 %elt) nounwind {
 define <64 x i1> @insertelt_idx_v64i1(<64 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind {
 ; RV32-LABEL: insertelt_idx_v64i1:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a2, zero, 64
+; RV32-NEXT:    li a2, 64
 ; RV32-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; RV32-NEXT:    vmv.s.x v8, a0
 ; RV32-NEXT:    vmv.v.i v12, 0
@@ -189,7 +189,7 @@ define <64 x i1> @insertelt_idx_v64i1(<64 x i1> %x, i1 %elt, i32 zeroext %idx) n
 ;
 ; RV64-LABEL: insertelt_idx_v64i1:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a2, zero, 64
+; RV64-NEXT:    li a2, 64
 ; RV64-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; RV64-NEXT:    vmv.s.x v8, a0
 ; RV64-NEXT:    vmv.v.i v12, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index ef304c050dc7..b1bf02d22a38 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -320,7 +320,7 @@ define void @insert_v4i16_v2i16_2(<4 x i16>* %vp, <2 x i16>* %svp) {
 define void @insert_v32i1_v8i1_0(<32 x i1>* %vp, <8 x i1>* %svp) {
 ; LMULMAX2-LABEL: insert_v32i1_v8i1_0:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vlm.v v8, (a0)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -352,7 +352,7 @@ define void @insert_v32i1_v8i1_0(<32 x i1>* %vp, <8 x i1>* %svp) {
 define void @insert_v32i1_v8i1_16(<32 x i1>* %vp, <8 x i1>* %svp) {
 ; LMULMAX2-LABEL: insert_v32i1_v8i1_16:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vlm.v v8, (a0)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
index 73b0c63764d5..9c9565fceefe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
@@ -97,7 +97,7 @@ define void @insertelt_v16i8(<16 x i8>* %x, i8 %y) {
 define void @insertelt_v32i16(<32 x i16>* %x, i16 %y, i32 %idx) {
 ; RV32-LABEL: insertelt_v32i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a3, zero, 32
+; RV32-NEXT:    li a3, 32
 ; RV32-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; RV32-NEXT:    vle16.v v8, (a0)
 ; RV32-NEXT:    vmv.s.x v12, a1
@@ -110,7 +110,7 @@ define void @insertelt_v32i16(<32 x i16>* %x, i16 %y, i32 %idx) {
 ;
 ; RV64-LABEL: insertelt_v32i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a3, zero, 32
+; RV64-NEXT:    li a3, 32
 ; RV64-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; RV64-NEXT:    vle16.v v8, (a0)
 ; RV64-NEXT:    vmv.s.x v12, a1
@@ -163,7 +163,7 @@ define void @insertelt_v8i64_0(<8 x i64>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    addi a1, zero, -1
+; CHECK-NEXT:    li a1, -1
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
 ; CHECK-NEXT:    vmv.s.x v8, a1
 ; CHECK-NEXT:    vse64.v v8, (a0)
@@ -179,7 +179,7 @@ define void @insertelt_v8i64(<8 x i64>* %x, i32 %idx) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a2, zero, -1
+; RV32-NEXT:    li a2, -1
 ; RV32-NEXT:    vmv.s.x v12, a2
 ; RV32-NEXT:    addi a2, a1, 1
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
@@ -192,7 +192,7 @@ define void @insertelt_v8i64(<8 x i64>* %x, i32 %idx) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi a2, zero, -1
+; RV64-NEXT:    li a2, -1
 ; RV64-NEXT:    vmv.s.x v12, a2
 ; RV64-NEXT:    sext.w a1, a1
 ; RV64-NEXT:    addi a2, a1, 1
@@ -212,7 +212,7 @@ define void @insertelt_c6_v8i64_0(<8 x i64>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    addi a1, zero, 6
+; CHECK-NEXT:    li a1, 6
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
 ; CHECK-NEXT:    vmv.s.x v8, a1
 ; CHECK-NEXT:    vse64.v v8, (a0)
@@ -228,7 +228,7 @@ define void @insertelt_c6_v8i64(<8 x i64>* %x, i32 %idx) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a2, zero, 6
+; RV32-NEXT:    li a2, 6
 ; RV32-NEXT:    vmv.s.x v12, a2
 ; RV32-NEXT:    addi a2, a1, 1
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
@@ -241,7 +241,7 @@ define void @insertelt_c6_v8i64(<8 x i64>* %x, i32 %idx) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi a2, zero, 6
+; RV64-NEXT:    li a2, 6
 ; RV64-NEXT:    vmv.s.x v12, a2
 ; RV64-NEXT:    sext.w a1, a1
 ; RV64-NEXT:    addi a2, a1, 1
@@ -263,7 +263,7 @@ define void @insertelt_c6_v8i64_0_add(<8 x i64>* %x, <8 x i64>* %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    addi a2, zero, 6
+; CHECK-NEXT:    li a2, 6
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
 ; CHECK-NEXT:    vmv.s.x v8, a2
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index 75e5ac705ca1..83417365d729 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -55,7 +55,7 @@ define void @buildvec_vid_mpy_imm_v16i8(<16 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    addi a1, zero, 3
+; CHECK-NEXT:    li a1, 3
 ; CHECK-NEXT:    vmul.vx v8, v8, a1
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
@@ -160,7 +160,7 @@ define void @buildvec_vid_stepn3_add3_v4i8(<4 x i8>* %z0, <4 x i8>* %z1, <4 x i8
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmv.v.i v8, 3
 ; CHECK-NEXT:    vid.v v9
-; CHECK-NEXT:    addi a1, zero, -3
+; CHECK-NEXT:    li a1, -3
 ; CHECK-NEXT:    vmadd.vx v9, a1, v8
 ; CHECK-NEXT:    vse8.v v9, (a0)
 ; CHECK-NEXT:    ret
@@ -174,7 +174,7 @@ define void @buildvec_vid_stepn3_addn3_v4i32(<4 x i32>* %z0, <4 x i32>* %z1, <4
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.i v8, -3
 ; CHECK-NEXT:    vid.v v9
-; CHECK-NEXT:    addi a4, zero, -3
+; CHECK-NEXT:    li a4, -3
 ; CHECK-NEXT:    vmadd.vx v9, a4, v8
 ; CHECK-NEXT:    vse32.v v9, (a0)
 ; CHECK-NEXT:    vse32.v v9, (a1)
@@ -192,7 +192,7 @@ define void @buildvec_vid_stepn3_addn3_v4i32(<4 x i32>* %z0, <4 x i32>* %z1, <4
 define <4 x i64> @buildvec_vid_step1_add0_v4i64() {
 ; RV32-LABEL: buildvec_vid_step1_add0_v4i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vmv.v.i v8, 0
@@ -216,7 +216,7 @@ define <4 x i64> @buildvec_vid_step1_add0_v4i64() {
 define <4 x i64> @buildvec_vid_step2_add0_v4i64() {
 ; RV32-LABEL: buildvec_vid_step2_add0_v4i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 2
+; RV32-NEXT:    li a0, 2
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vmv.v.i v8, 0
@@ -256,7 +256,7 @@ define void @buildvec_no_vid_v4i8(<4 x i8>* %z0, <4 x i8>* %z1, <4 x i8>* %z2, <
 ; RV32-NEXT:    vmv.v.x v8, a0
 ; RV32-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; RV32-NEXT:    vse8.v v8, (a2)
-; RV32-NEXT:    addi a0, zero, 2047
+; RV32-NEXT:    li a0, 2047
 ; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; RV32-NEXT:    vmv.v.x v8, a0
 ; RV32-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
@@ -286,7 +286,7 @@ define void @buildvec_no_vid_v4i8(<4 x i8>* %z0, <4 x i8>* %z1, <4 x i8>* %z2, <
 ; RV64-NEXT:    vmv.v.x v8, a0
 ; RV64-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; RV64-NEXT:    vse8.v v8, (a2)
-; RV64-NEXT:    addi a0, zero, 2047
+; RV64-NEXT:    li a0, 2047
 ; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; RV64-NEXT:    vmv.v.x v8, a0
 ; RV64-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
@@ -419,7 +419,7 @@ define void @buildvec_dominant1_optsize_v2i32(<2 x i64>* %x) optsize {
 define void @buildvec_seq_v8i8_v4i16(<8 x i8>* %x) {
 ; CHECK-LABEL: buildvec_seq_v8i8_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 513
+; CHECK-NEXT:    li a1, 513
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.x v8, a1
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -507,15 +507,15 @@ define void @buildvec_seq2_v16i8_v2i64(<16 x i8>* %x) {
 define void @buildvec_seq_v9i8(<9 x i8>* %x) {
 ; RV32-LABEL: buildvec_seq_v9i8:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 3
+; RV32-NEXT:    li a1, 3
 ; RV32-NEXT:    sb a1, 8(a0)
-; RV32-NEXT:    addi a1, zero, 73
+; RV32-NEXT:    li a1, 73
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a1
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmv.v.i v8, 2
 ; RV32-NEXT:    vmerge.vim v8, v8, 1, v0
-; RV32-NEXT:    addi a1, zero, 36
+; RV32-NEXT:    li a1, 36
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a1
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -525,7 +525,7 @@ define void @buildvec_seq_v9i8(<9 x i8>* %x) {
 ;
 ; RV64-LABEL: buildvec_seq_v9i8:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 3
+; RV64-NEXT:    li a1, 3
 ; RV64-NEXT:    sb a1, 8(a0)
 ; RV64-NEXT:    lui a1, 4104
 ; RV64-NEXT:    addiw a1, a1, 385
@@ -542,7 +542,7 @@ define void @buildvec_seq_v9i8(<9 x i8>* %x) {
 define void @buildvec_seq_v4i16_v2i32(<4 x i16>* %x) {
 ; CHECK-LABEL: buildvec_seq_v4i16_v2i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, -127
+; CHECK-NEXT:    li a1, -127
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.x v8, a1
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
@@ -569,7 +569,7 @@ define void @buildvec_vid_step1o2_v4i32(<4 x i32>* %z0, <4 x i32>* %z1, <4 x i32
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vse32.v v9, (a5)
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
@@ -600,14 +600,14 @@ define void @buildvec_vid_step1o2_add3_v4i16(<4 x i16>* %z0, <4 x i16>* %z1, <4
 ; CHECK-NEXT:    vse16.v v8, (a2)
 ; CHECK-NEXT:    vse16.v v8, (a3)
 ; CHECK-NEXT:    vse16.v v8, (a4)
-; CHECK-NEXT:    addi a0, zero, 3
+; CHECK-NEXT:    li a0, 3
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v9, 4
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf2, tu, mu
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vse16.v v9, (a5)
-; CHECK-NEXT:    addi a0, zero, 4
+; CHECK-NEXT:    li a0, 4
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vmv.v.i v9, 3
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
index 04a146748091..c71cabbf5c0b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
@@ -77,7 +77,7 @@ define void @sext_v8i8_v8i32(<8 x i8>* %x, <8 x i32>* %z) {
 define void @sext_v32i8_v32i32(<32 x i8>* %x, <32 x i32>* %z) {
 ; LMULMAX8-LABEL: sext_v32i8_v32i32:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a2, zero, 32
+; LMULMAX8-NEXT:    li a2, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vle8.v v8, (a0)
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
@@ -87,7 +87,7 @@ define void @sext_v32i8_v32i32(<32 x i8>* %x, <32 x i32>* %z) {
 ;
 ; LMULMAX2-LABEL: sext_v32i8_v32i32:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, m1, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
index 254ff627d054..40479b0fb343 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll
@@ -28,7 +28,7 @@ define void @seteq_vv_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 define void @setne_vv_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; CHECK-LABEL: setne_vv_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle8.v v10, (a1)
@@ -48,7 +48,7 @@ define void @setne_vv_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 define void @setgt_vv_v64i8(<64 x i8>* %x, <64 x i8>* %y, <64 x i1>* %z) {
 ; CHECK-LABEL: setgt_vv_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle8.v v12, (a1)
@@ -65,7 +65,7 @@ define void @setgt_vv_v64i8(<64 x i8>* %x, <64 x i8>* %y, <64 x i1>* %z) {
 define void @setlt_vv_v128i8(<128 x i8>* %x, <128 x i8>* %y, <128 x i1>* %z) {
 ; CHECK-LABEL: setlt_vv_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 128
+; CHECK-NEXT:    li a3, 128
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle8.v v16, (a1)
@@ -114,7 +114,7 @@ define void @setle_vv_v16i8(<16 x i8>* %x, <16 x i8>* %y, <16 x i1>* %z) {
 define void @setugt_vv_v32i8(<32 x i8>* %x, <32 x i8>* %y, <32 x i1>* %z) {
 ; CHECK-LABEL: setugt_vv_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle8.v v10, (a1)
@@ -131,7 +131,7 @@ define void @setugt_vv_v32i8(<32 x i8>* %x, <32 x i8>* %y, <32 x i1>* %z) {
 define void @setult_vv_v64i8(<64 x i8>* %x, <64 x i8>* %y, <64 x i1>* %z) {
 ; CHECK-LABEL: setult_vv_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle8.v v12, (a1)
@@ -148,7 +148,7 @@ define void @setult_vv_v64i8(<64 x i8>* %x, <64 x i8>* %y, <64 x i1>* %z) {
 define void @setuge_vv_v128i8(<128 x i8>* %x, <128 x i8>* %y, <128 x i1>* %z) {
 ; CHECK-LABEL: setuge_vv_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 128
+; CHECK-NEXT:    li a3, 128
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vle8.v v16, (a1)
@@ -197,7 +197,7 @@ define void @seteq_vx_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 define void @setne_vx_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) {
 ; CHECK-LABEL: setne_vx_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsne.vx v10, v8, a1
@@ -214,7 +214,7 @@ define void @setne_vx_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) {
 define void @setgt_vx_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) {
 ; CHECK-LABEL: setgt_vx_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsgt.vx v12, v8, a1
@@ -231,7 +231,7 @@ define void @setgt_vx_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) {
 define void @setlt_vx_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) {
 ; CHECK-LABEL: setlt_vx_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 128
+; CHECK-NEXT:    li a3, 128
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmslt.vx v16, v8, a1
@@ -281,7 +281,7 @@ define void @setle_vx_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 define void @setugt_vx_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) {
 ; CHECK-LABEL: setugt_vx_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsgtu.vx v10, v8, a1
@@ -298,7 +298,7 @@ define void @setugt_vx_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) {
 define void @setult_vx_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) {
 ; CHECK-LABEL: setult_vx_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsltu.vx v12, v8, a1
@@ -315,7 +315,7 @@ define void @setult_vx_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) {
 define void @setuge_vx_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) {
 ; CHECK-LABEL: setuge_vx_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 128
+; CHECK-NEXT:    li a3, 128
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmv.v.x v16, a1
@@ -365,7 +365,7 @@ define void @seteq_xv_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 define void @setne_xv_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) {
 ; CHECK-LABEL: setne_xv_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsne.vx v10, v8, a1
@@ -382,7 +382,7 @@ define void @setne_xv_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) {
 define void @setgt_xv_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) {
 ; CHECK-LABEL: setgt_xv_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmslt.vx v12, v8, a1
@@ -399,7 +399,7 @@ define void @setgt_xv_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) {
 define void @setlt_xv_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) {
 ; CHECK-LABEL: setlt_xv_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 128
+; CHECK-NEXT:    li a3, 128
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsgt.vx v16, v8, a1
@@ -449,7 +449,7 @@ define void @setle_xv_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
 define void @setugt_xv_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) {
 ; CHECK-LABEL: setugt_xv_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsltu.vx v10, v8, a1
@@ -466,7 +466,7 @@ define void @setugt_xv_v32i8(<32 x i8>* %x, i8 %y, <32 x i1>* %z) {
 define void @setult_xv_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) {
 ; CHECK-LABEL: setult_xv_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsgtu.vx v12, v8, a1
@@ -483,7 +483,7 @@ define void @setult_xv_v64i8(<64 x i8>* %x, i8 %y, <64 x i1>* %z) {
 define void @setuge_xv_v128i8(<128 x i8>* %x, i8 %y, <128 x i1>* %z) {
 ; CHECK-LABEL: setuge_xv_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 128
+; CHECK-NEXT:    li a3, 128
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsleu.vx v16, v8, a1
@@ -533,7 +533,7 @@ define void @seteq_vi_v16i8(<16 x i8>* %x, <16 x i1>* %z) {
 define void @setne_vi_v32i8(<32 x i8>* %x, <32 x i1>* %z) {
 ; CHECK-LABEL: setne_vi_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsne.vi v10, v8, 0
@@ -550,7 +550,7 @@ define void @setne_vi_v32i8(<32 x i8>* %x, <32 x i1>* %z) {
 define void @setgt_vi_v64i8(<64 x i8>* %x, <64 x i1>* %z) {
 ; CHECK-LABEL: setgt_vi_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsgt.vx v12, v8, zero
@@ -567,7 +567,7 @@ define void @setgt_vi_v64i8(<64 x i8>* %x, <64 x i1>* %z) {
 define void @setlt_vi_v128i8(<128 x i8>* %x, <128 x i1>* %z) {
 ; CHECK-LABEL: setlt_vi_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 128
+; CHECK-NEXT:    li a2, 128
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsle.vi v16, v8, -1
@@ -616,10 +616,10 @@ define void @setle_vi_v16i8(<16 x i8>* %x, <16 x i1>* %z) {
 define void @setugt_vi_v32i8(<32 x i8>* %x, <32 x i1>* %z) {
 ; CHECK-LABEL: setugt_vi_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, 5
+; CHECK-NEXT:    li a0, 5
 ; CHECK-NEXT:    vmsgtu.vx v10, v8, a0
 ; CHECK-NEXT:    vsm.v v10, (a1)
 ; CHECK-NEXT:    ret
@@ -634,7 +634,7 @@ define void @setugt_vi_v32i8(<32 x i8>* %x, <32 x i1>* %z) {
 define void @setult_vi_v64i8(<64 x i8>* %x, <64 x i1>* %z) {
 ; CHECK-LABEL: setult_vi_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsleu.vi v12, v8, 4
@@ -651,7 +651,7 @@ define void @setult_vi_v64i8(<64 x i8>* %x, <64 x i1>* %z) {
 define void @setuge_vi_v128i8(<128 x i8>* %x, <128 x i1>* %z) {
 ; CHECK-LABEL: setuge_vi_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 128
+; CHECK-NEXT:    li a2, 128
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmsgtu.vi v16, v8, 4

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
index 13f5644bdf7b..378239a2d745 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
@@ -5,7 +5,7 @@
 define <4 x i16> @shuffle_v4i16(<4 x i16> %x, <4 x i16> %y) {
 ; CHECK-LABEL: shuffle_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 11
+; CHECK-NEXT:    li a0, 11
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
@@ -18,7 +18,7 @@ define <4 x i16> @shuffle_v4i16(<4 x i16> %x, <4 x i16> %y) {
 define <8 x i32> @shuffle_v8i32(<8 x i32> %x, <8 x i32> %y) {
 ; CHECK-LABEL: shuffle_v8i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 203
+; CHECK-NEXT:    li a0, 203
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
@@ -31,7 +31,7 @@ define <8 x i32> @shuffle_v8i32(<8 x i32> %x, <8 x i32> %y) {
 define <4 x i16> @shuffle_xv_v4i16(<4 x i16> %x) {
 ; CHECK-LABEL: shuffle_xv_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 9
+; CHECK-NEXT:    li a0, 9
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
@@ -44,7 +44,7 @@ define <4 x i16> @shuffle_xv_v4i16(<4 x i16> %x) {
 define <4 x i16> @shuffle_vx_v4i16(<4 x i16> %x) {
 ; CHECK-LABEL: shuffle_vx_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 6
+; CHECK-NEXT:    li a0, 6
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
@@ -90,7 +90,7 @@ define <4 x i16> @vrgather_shuffle_vv_v4i16(<4 x i16> %x, <4 x i16> %y) {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle16.v v11, (a0)
 ; CHECK-NEXT:    vrgather.vv v10, v8, v11
-; CHECK-NEXT:    addi a0, zero, 8
+; CHECK-NEXT:    li a0, 8
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
@@ -104,7 +104,7 @@ define <4 x i16> @vrgather_shuffle_vv_v4i16(<4 x i16> %x, <4 x i16> %y) {
 define <4 x i16> @vrgather_shuffle_xv_v4i16(<4 x i16> %x) {
 ; CHECK-LABEL: vrgather_shuffle_xv_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 12
+; CHECK-NEXT:    li a0, 12
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
@@ -123,7 +123,7 @@ define <4 x i16> @vrgather_shuffle_vx_v4i16(<4 x i16> %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vid.v v9
-; CHECK-NEXT:    addi a0, zero, 3
+; CHECK-NEXT:    li a0, 3
 ; CHECK-NEXT:    vmul.vx v10, v9, a0
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
@@ -189,7 +189,7 @@ define <8 x i64> @vrgather_permute_shuffle_uv_v8i64(<8 x i64> %x) {
 define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) {
 ; RV32-LABEL: vrgather_shuffle_vv_v8i64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 5
+; RV32-NEXT:    li a0, 5
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v16, a0
 ; RV32-NEXT:    vmv.v.i v20, 2
@@ -201,7 +201,7 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) {
 ; RV32-NEXT:    vle16.v v21, (a0)
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vrgatherei16.vv v16, v8, v21
-; RV32-NEXT:    addi a0, zero, 164
+; RV32-NEXT:    li a0, 164
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
@@ -211,7 +211,7 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) {
 ;
 ; RV64-LABEL: vrgather_shuffle_vv_v8i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 5
+; RV64-NEXT:    li a0, 5
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vmv.s.x v16, a0
 ; RV64-NEXT:    vmv.v.i v20, 2
@@ -222,7 +222,7 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) {
 ; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64-NEXT:    vle64.v v24, (a0)
 ; RV64-NEXT:    vrgather.vv v16, v8, v24
-; RV64-NEXT:    addi a0, zero, 164
+; RV64-NEXT:    li a0, 164
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
@@ -243,7 +243,7 @@ define <8 x i64> @vrgather_shuffle_xv_v8i64(<8 x i64> %x) {
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vmv.v.i v20, -1
 ; RV32-NEXT:    vrgatherei16.vv v12, v20, v16
-; RV32-NEXT:    addi a0, zero, 113
+; RV32-NEXT:    li a0, 113
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    lui a0, %hi(.LCPI12_1)
@@ -257,7 +257,7 @@ define <8 x i64> @vrgather_shuffle_xv_v8i64(<8 x i64> %x) {
 ;
 ; RV64-LABEL: vrgather_shuffle_xv_v8i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 113
+; RV64-NEXT:    li a0, 113
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    lui a0, %hi(.LCPI12_0)
@@ -282,7 +282,7 @@ define <8 x i64> @vrgather_shuffle_vx_v8i64(<8 x i64> %x) {
 ; RV32-NEXT:    vmv4r.v v12, v8
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vrgatherei16.vv v8, v12, v16
-; RV32-NEXT:    addi a0, zero, 140
+; RV32-NEXT:    li a0, 140
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    lui a0, %hi(.LCPI13_1)
@@ -296,7 +296,7 @@ define <8 x i64> @vrgather_shuffle_vx_v8i64(<8 x i64> %x) {
 ;
 ; RV64-LABEL: vrgather_shuffle_vx_v8i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 115
+; RV64-NEXT:    li a0, 115
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    lui a0, %hi(.LCPI13_0)
@@ -318,7 +318,7 @@ define <4 x i8> @interleave_shuffles(<4 x i8> %x) {
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vrgather.vi v9, v8, 1
-; CHECK-NEXT:    addi a1, zero, 10
+; CHECK-NEXT:    li a1, 10
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a1
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
@@ -349,7 +349,7 @@ define <8 x i8> @splat_ve4_ins_i0ve2(<8 x i8> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v10, 4
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, tu, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
@@ -363,7 +363,7 @@ define <8 x i8> @splat_ve4_ins_i0ve2(<8 x i8> %v) {
 define <8 x i8> @splat_ve4_ins_i1ve3(<8 x i8> %v) {
 ; CHECK-LABEL: splat_ve4_ins_i1ve3:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 3
+; CHECK-NEXT:    li a0, 3
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v9, a0
 ; CHECK-NEXT:    vmv.v.i v10, 4
@@ -380,7 +380,7 @@ define <8 x i8> @splat_ve4_ins_i1ve3(<8 x i8> %v) {
 define <8 x i8> @splat_ve2_we0(<8 x i8> %v, <8 x i8> %w) {
 ; CHECK-LABEL: splat_ve2_we0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 66
+; CHECK-NEXT:    li a0, 66
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -397,12 +397,12 @@ define <8 x i8> @splat_ve2_we0_ins_i0ve4(<8 x i8> %v, <8 x i8> %w) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.i v11, 2
-; CHECK-NEXT:    addi a0, zero, 4
+; CHECK-NEXT:    li a0, 4
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, tu, mu
 ; CHECK-NEXT:    vmv.s.x v11, a0
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vrgather.vv v10, v8, v11
-; CHECK-NEXT:    addi a0, zero, 66
+; CHECK-NEXT:    li a0, 66
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -416,7 +416,7 @@ define <8 x i8> @splat_ve2_we0_ins_i0ve4(<8 x i8> %v, <8 x i8> %w) {
 define <8 x i8> @splat_ve2_we0_ins_i0we4(<8 x i8> %v, <8 x i8> %w) {
 ; CHECK-LABEL: splat_ve2_we0_ins_i0we4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 67
+; CHECK-NEXT:    li a0, 67
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -440,7 +440,7 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4(<8 x i8> %v, <8 x i8> %w) {
 ; RV32-NEXT:    vmv.v.x v11, a0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vrgather.vv v10, v8, v11
-; RV32-NEXT:    addi a0, zero, 66
+; RV32-NEXT:    li a0, 66
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -456,7 +456,7 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4(<8 x i8> %v, <8 x i8> %w) {
 ; RV64-NEXT:    vmv.v.x v11, a0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vrgather.vv v10, v8, v11
-; RV64-NEXT:    addi a0, zero, 66
+; RV64-NEXT:    li a0, 66
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -470,13 +470,13 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4(<8 x i8> %v, <8 x i8> %w) {
 define <8 x i8> @splat_ve2_we0_ins_i2we4(<8 x i8> %v, <8 x i8> %w) {
 ; CHECK-LABEL: splat_ve2_we0_ins_i2we4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 4
+; CHECK-NEXT:    li a0, 4
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    vmv.v.i v11, 0
 ; CHECK-NEXT:    vsetivli zero, 3, e8, mf2, tu, mu
 ; CHECK-NEXT:    vslideup.vi v11, v10, 2
-; CHECK-NEXT:    addi a0, zero, 70
+; CHECK-NEXT:    li a0, 70
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -491,7 +491,7 @@ define <8 x i8> @splat_ve2_we0_ins_i2we4(<8 x i8> %v, <8 x i8> %w) {
 define <8 x i8> @splat_ve2_we0_ins_i2ve4_i5we6(<8 x i8> %v, <8 x i8> %w) {
 ; RV32-LABEL: splat_ve2_we0_ins_i2ve4_i5we6:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 6
+; RV32-NEXT:    li a0, 6
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vmv.s.x v10, a0
 ; RV32-NEXT:    vmv.v.i v11, 0
@@ -503,7 +503,7 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4_i5we6(<8 x i8> %v, <8 x i8> %w) {
 ; RV32-NEXT:    vmv.v.x v12, a0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV32-NEXT:    vrgather.vv v10, v8, v12
-; RV32-NEXT:    addi a0, zero, 98
+; RV32-NEXT:    li a0, 98
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -513,7 +513,7 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4_i5we6(<8 x i8> %v, <8 x i8> %w) {
 ;
 ; RV64-LABEL: splat_ve2_we0_ins_i2ve4_i5we6:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 6
+; RV64-NEXT:    li a0, 6
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vmv.s.x v10, a0
 ; RV64-NEXT:    vmv.v.i v11, 0
@@ -525,7 +525,7 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4_i5we6(<8 x i8> %v, <8 x i8> %w) {
 ; RV64-NEXT:    vmv.v.x v12, a0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; RV64-NEXT:    vrgather.vv v10, v8, v12
-; RV64-NEXT:    addi a0, zero, 98
+; RV64-NEXT:    li a0, 98
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
index 024725bd1a5a..9e3c5ff0e485 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
@@ -114,7 +114,7 @@ define void @splat_v2i64(<2 x i64>* %x, i64 %y) {
 define void @splat_v32i8(<32 x i8>* %x, i8 %y) {
 ; LMULMAX8-LABEL: splat_v32i8:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a2, zero, 32
+; LMULMAX8-NEXT:    li a2, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vmv.v.x v8, a1
 ; LMULMAX8-NEXT:    vse8.v v8, (a0)
@@ -122,7 +122,7 @@ define void @splat_v32i8(<32 x i8>* %x, i8 %y) {
 ;
 ; LMULMAX2-LABEL: splat_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vmv.v.x v8, a1
 ; LMULMAX2-NEXT:    vse8.v v8, (a0)
@@ -229,7 +229,7 @@ define void @splat_v4i64(<4 x i64>* %x, i64 %y) {
 ;
 ; LMULMAX1-RV32-LABEL: splat_v4i64:
 ; LMULMAX1-RV32:       # %bb.0:
-; LMULMAX1-RV32-NEXT:    addi a3, zero, 5
+; LMULMAX1-RV32-NEXT:    li a3, 5
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.s.x v0, a3
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -323,7 +323,7 @@ define void @splat_zero_v2i64(<2 x i64>* %x) {
 define void @splat_zero_v32i8(<32 x i8>* %x) {
 ; LMULMAX8-LABEL: splat_zero_v32i8:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a1, zero, 32
+; LMULMAX8-NEXT:    li a1, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vmv.v.i v8, 0
 ; LMULMAX8-NEXT:    vse8.v v8, (a0)
@@ -331,7 +331,7 @@ define void @splat_zero_v32i8(<32 x i8>* %x) {
 ;
 ; LMULMAX2-LABEL: splat_zero_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 32
+; LMULMAX2-NEXT:    li a1, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vmv.v.i v8, 0
 ; LMULMAX2-NEXT:    vse8.v v8, (a0)
@@ -502,7 +502,7 @@ define void @splat_allones_v2i64(<2 x i64>* %x) {
 define void @splat_allones_v32i8(<32 x i8>* %x) {
 ; LMULMAX8-LABEL: splat_allones_v32i8:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a1, zero, 32
+; LMULMAX8-NEXT:    li a1, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vmv.v.i v8, -1
 ; LMULMAX8-NEXT:    vse8.v v8, (a0)
@@ -510,7 +510,7 @@ define void @splat_allones_v32i8(<32 x i8>* %x) {
 ;
 ; LMULMAX2-LABEL: splat_allones_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 32
+; LMULMAX2-NEXT:    li a1, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vmv.v.i v8, -1
 ; LMULMAX2-NEXT:    vse8.v v8, (a0)
@@ -709,7 +709,7 @@ define void @vadd_vx_v16i64(<16 x i64>* %a, i64 %b, <16 x i64>* %c) {
 ; LMULMAX2-RV32-NEXT:    vle64.v v12, (a0)
 ; LMULMAX2-RV32-NEXT:    addi a0, a0, 32
 ; LMULMAX2-RV32-NEXT:    vle64.v v14, (a0)
-; LMULMAX2-RV32-NEXT:    addi a0, zero, 85
+; LMULMAX2-RV32-NEXT:    li a0, 85
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a0
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
@@ -747,7 +747,7 @@ define void @vadd_vx_v16i64(<16 x i64>* %a, i64 %b, <16 x i64>* %c) {
 ; LMULMAX1-RV32-NEXT:    vle64.v v14, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a0, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle64.v v15, (a0)
-; LMULMAX1-RV32-NEXT:    addi a0, zero, 5
+; LMULMAX1-RV32-NEXT:    li a0, 5
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.s.x v0, a0
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
index aff029fe1df3..7cd9807bff72 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
@@ -72,7 +72,7 @@ define void @gather_const_v64i8(<64 x i8>* %x) {
 ; LMULMAX4-LABEL: gather_const_v64i8:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    addi a1, a0, 32
-; LMULMAX4-NEXT:    addi a2, zero, 64
+; LMULMAX4-NEXT:    li a2, 64
 ; LMULMAX4-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; LMULMAX4-NEXT:    vlse8.v v8, (a1), zero
 ; LMULMAX4-NEXT:    vse8.v v8, (a0)
@@ -102,7 +102,7 @@ define void @gather_const_v16i16(<32 x i16>* %x) {
 ; LMULMAX4-LABEL: gather_const_v16i16:
 ; LMULMAX4:       # %bb.0:
 ; LMULMAX4-NEXT:    addi a1, a0, 50
-; LMULMAX4-NEXT:    addi a2, zero, 32
+; LMULMAX4-NEXT:    li a2, 32
 ; LMULMAX4-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; LMULMAX4-NEXT:    vlse16.v v8, (a1), zero
 ; LMULMAX4-NEXT:    vse16.v v8, (a0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 4122a1f90148..cf068032e9e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -841,7 +841,7 @@ define void @mulhu_v16i8(<16 x i8>* %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; RV32-NEXT:    vle8.v v8, (a0)
-; RV32-NEXT:    addi a1, zero, 513
+; RV32-NEXT:    li a1, 513
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a1
 ; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
@@ -865,7 +865,7 @@ define void @mulhu_v16i8(<16 x i8>* %x) {
 ; RV32-NEXT:    vmv.s.x v0, a2
 ; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; RV32-NEXT:    vmv.v.i v10, 0
-; RV32-NEXT:    addi a2, zero, -128
+; RV32-NEXT:    li a2, -128
 ; RV32-NEXT:    vmerge.vxm v11, v10, a2, v0
 ; RV32-NEXT:    addi a1, a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
@@ -888,7 +888,7 @@ define void @mulhu_v16i8(<16 x i8>* %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; RV64-NEXT:    vle8.v v8, (a0)
-; RV64-NEXT:    addi a1, zero, 513
+; RV64-NEXT:    li a1, 513
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; RV64-NEXT:    vmv.s.x v0, a1
 ; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
@@ -912,7 +912,7 @@ define void @mulhu_v16i8(<16 x i8>* %x) {
 ; RV64-NEXT:    vmv.s.x v0, a2
 ; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; RV64-NEXT:    vmv.v.i v10, 0
-; RV64-NEXT:    addi a2, zero, -128
+; RV64-NEXT:    li a2, -128
 ; RV64-NEXT:    vmerge.vxm v11, v10, a2, v0
 ; RV64-NEXT:    addiw a1, a1, 32
 ; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
@@ -941,9 +941,9 @@ define void @mulhu_v8i16(<8 x i16>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    addi a1, zero, 1
+; CHECK-NEXT:    li a1, 1
 ; CHECK-NEXT:    vmv.s.x v9, a1
-; CHECK-NEXT:    addi a1, zero, 33
+; CHECK-NEXT:    li a1, 33
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a1
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
@@ -995,7 +995,7 @@ define void @mulhu_v4i32(<4 x i32>* %x) {
 ; CHECK-NEXT:    vsub.vv v8, v8, v9
 ; CHECK-NEXT:    vmulhu.vv v8, v8, v10
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    addi a1, zero, 1
+; CHECK-NEXT:    li a1, 1
 ; CHECK-NEXT:    vmv.s.x v9, a1
 ; CHECK-NEXT:    vmv.v.i v10, 2
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
@@ -1078,9 +1078,9 @@ define void @mulhs_v16i8(<16 x i8>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; RV32-NEXT:    vmv.v.i v9, 7
 ; RV32-NEXT:    vmerge.vim v9, v9, 1, v0
-; RV32-NEXT:    addi a1, zero, -123
+; RV32-NEXT:    li a1, -123
 ; RV32-NEXT:    vmv.v.x v10, a1
-; RV32-NEXT:    addi a1, zero, 57
+; RV32-NEXT:    li a1, 57
 ; RV32-NEXT:    vmerge.vxm v10, v10, a1, v0
 ; RV32-NEXT:    vmulhu.vv v8, v8, v10
 ; RV32-NEXT:    vsrl.vv v8, v8, v9
@@ -1098,9 +1098,9 @@ define void @mulhs_v16i8(<16 x i8>* %x) {
 ; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; RV64-NEXT:    vmv.v.i v9, 7
 ; RV64-NEXT:    vmerge.vim v9, v9, 1, v0
-; RV64-NEXT:    addi a1, zero, -123
+; RV64-NEXT:    li a1, -123
 ; RV64-NEXT:    vmv.v.x v10, a1
-; RV64-NEXT:    addi a1, zero, 57
+; RV64-NEXT:    li a1, 57
 ; RV64-NEXT:    vmerge.vxm v10, v10, a1, v0
 ; RV64-NEXT:    vmulhu.vv v8, v8, v10
 ; RV64-NEXT:    vsrl.vv v8, v8, v9
@@ -1117,7 +1117,7 @@ define void @mulhs_v8i16(<8 x i16>* %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; RV32-NEXT:    vle16.v v8, (a0)
-; RV32-NEXT:    addi a1, zero, 105
+; RV32-NEXT:    li a1, 105
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a1
 ; RV32-NEXT:    lui a1, 5
@@ -1138,7 +1138,7 @@ define void @mulhs_v8i16(<8 x i16>* %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; RV64-NEXT:    vle16.v v8, (a0)
-; RV64-NEXT:    addi a1, zero, 105
+; RV64-NEXT:    li a1, 105
 ; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV64-NEXT:    vmv.s.x v0, a1
 ; RV64-NEXT:    lui a1, 5
@@ -1165,7 +1165,7 @@ define void @mulhs_v4i32(<4 x i32>* %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    addi a1, zero, 5
+; RV32-NEXT:    li a1, 5
 ; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32-NEXT:    vmv.s.x v0, a1
 ; RV32-NEXT:    lui a1, 419430
@@ -1229,7 +1229,7 @@ define void @mulhs_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    vrsub.vi v10, v10, 0
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vmadd.vv v10, v8, v9
-; RV32-NEXT:    addi a1, zero, 1
+; RV32-NEXT:    li a1, 1
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v8, a1
 ; RV32-NEXT:    vmv.v.i v9, 0
@@ -1237,7 +1237,7 @@ define void @mulhs_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    vslideup.vi v9, v8, 2
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vsra.vv v8, v10, v9
-; RV32-NEXT:    addi a1, zero, 63
+; RV32-NEXT:    li a1, 63
 ; RV32-NEXT:    vsrl.vx v9, v10, a1
 ; RV32-NEXT:    vadd.vv v8, v8, v9
 ; RV32-NEXT:    vse64.v v8, (a0)
@@ -1264,7 +1264,7 @@ define void @mulhs_v2i64(<2 x i64>* %x) {
 ; RV64-NEXT:    vid.v v10
 ; RV64-NEXT:    vrsub.vi v11, v10, 0
 ; RV64-NEXT:    vmadd.vv v11, v8, v9
-; RV64-NEXT:    addi a1, zero, 63
+; RV64-NEXT:    li a1, 63
 ; RV64-NEXT:    vsrl.vx v8, v11, a1
 ; RV64-NEXT:    vsra.vv v9, v11, v10
 ; RV64-NEXT:    vadd.vv v8, v9, v8
@@ -1551,7 +1551,7 @@ define void @umax_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
 define void @add_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: add_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -1736,7 +1736,7 @@ define void @add_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @sub_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: sub_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -1921,7 +1921,7 @@ define void @sub_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @mul_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: mul_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -2106,7 +2106,7 @@ define void @mul_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @and_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: and_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -2291,7 +2291,7 @@ define void @and_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @or_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: or_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -2476,7 +2476,7 @@ define void @or_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @xor_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: xor_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -2661,7 +2661,7 @@ define void @xor_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @lshr_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: lshr_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -2846,7 +2846,7 @@ define void @lshr_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @ashr_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: ashr_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -3031,7 +3031,7 @@ define void @ashr_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @shl_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: shl_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -3216,7 +3216,7 @@ define void @shl_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @sdiv_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: sdiv_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -3401,7 +3401,7 @@ define void @sdiv_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @srem_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: srem_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -3586,7 +3586,7 @@ define void @srem_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @udiv_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: udiv_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -3771,7 +3771,7 @@ define void @udiv_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @urem_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: urem_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -3989,7 +3989,7 @@ define void @extract_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @mulhu_v32i8(<32 x i8>* %x) {
 ; LMULMAX2-RV32-LABEL: mulhu_v32i8:
 ; LMULMAX2-RV32:       # %bb.0:
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV32-NEXT:    li a1, 32
 ; LMULMAX2-RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-RV32-NEXT:    lui a2, 66049
@@ -4009,7 +4009,7 @@ define void @mulhu_v32i8(<32 x i8>* %x) {
 ; LMULMAX2-RV32-NEXT:    addi a2, a2, -2044
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a2
-; LMULMAX2-RV32-NEXT:    addi a2, zero, -128
+; LMULMAX2-RV32-NEXT:    li a2, -128
 ; LMULMAX2-RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmerge.vxm v12, v12, a2, v0
 ; LMULMAX2-RV32-NEXT:    vmulhu.vv v8, v8, v12
@@ -4039,7 +4039,7 @@ define void @mulhu_v32i8(<32 x i8>* %x) {
 ;
 ; LMULMAX2-RV64-LABEL: mulhu_v32i8:
 ; LMULMAX2-RV64:       # %bb.0:
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV64-NEXT:    li a1, 32
 ; LMULMAX2-RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-RV64-NEXT:    lui a2, 66049
@@ -4059,7 +4059,7 @@ define void @mulhu_v32i8(<32 x i8>* %x) {
 ; LMULMAX2-RV64-NEXT:    addiw a2, a2, -2044
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vmv.s.x v0, a2
-; LMULMAX2-RV64-NEXT:    addi a2, zero, -128
+; LMULMAX2-RV64-NEXT:    li a2, -128
 ; LMULMAX2-RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vmerge.vxm v12, v12, a2, v0
 ; LMULMAX2-RV64-NEXT:    vmulhu.vv v8, v8, v12
@@ -4126,7 +4126,7 @@ define void @mulhu_v16i16(<16 x i16>* %x) {
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv1r.v v0, v8
 ; LMULMAX2-RV32-NEXT:    vmerge.vim v12, v12, 1, v0
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV32-NEXT:    li a1, 257
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a1
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
@@ -4165,7 +4165,7 @@ define void @mulhu_v16i16(<16 x i16>* %x) {
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vmv1r.v v0, v8
 ; LMULMAX2-RV64-NEXT:    vmerge.vim v12, v12, 1, v0
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 257
+; LMULMAX2-RV64-NEXT:    li a1, 257
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; LMULMAX2-RV64-NEXT:    vmv.s.x v0, a1
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
@@ -4211,7 +4211,7 @@ define void @mulhu_v8i32(<8 x i32>* %x) {
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-NEXT:    vle32.v v8, (a0)
-; LMULMAX2-NEXT:    addi a1, zero, 68
+; LMULMAX2-NEXT:    li a1, 68
 ; LMULMAX2-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-NEXT:    vmv.s.x v0, a1
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
@@ -4225,7 +4225,7 @@ define void @mulhu_v8i32(<8 x i32>* %x) {
 ; LMULMAX2-NEXT:    vsub.vv v8, v8, v10
 ; LMULMAX2-NEXT:    vmulhu.vv v8, v8, v12
 ; LMULMAX2-NEXT:    vadd.vv v8, v8, v10
-; LMULMAX2-NEXT:    addi a1, zero, 136
+; LMULMAX2-NEXT:    li a1, 136
 ; LMULMAX2-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-NEXT:    vmv.s.x v0, a1
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
@@ -4254,7 +4254,7 @@ define void @mulhu_v8i32(<8 x i32>* %x) {
 ; LMULMAX1-RV32-NEXT:    vsub.vv v9, v9, v12
 ; LMULMAX1-RV32-NEXT:    vmulhu.vv v9, v9, v11
 ; LMULMAX1-RV32-NEXT:    vadd.vv v9, v9, v12
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 1
+; LMULMAX1-RV32-NEXT:    li a2, 1
 ; LMULMAX1-RV32-NEXT:    vmv.s.x v12, a2
 ; LMULMAX1-RV32-NEXT:    vmv.v.i v13, 2
 ; LMULMAX1-RV32-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
@@ -4324,7 +4324,7 @@ define void @mulhu_v4i64(<4 x i64>* %x) {
 ; LMULMAX2-RV64:       # %bb.0:
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT:    addi a1, zero, -1
+; LMULMAX2-RV64-NEXT:    li a1, -1
 ; LMULMAX2-RV64-NEXT:    slli a1, a1, 63
 ; LMULMAX2-RV64-NEXT:    vmv.s.x v10, a1
 ; LMULMAX2-RV64-NEXT:    vmv.v.i v12, 0
@@ -4374,7 +4374,7 @@ define void @mulhu_v4i64(<4 x i64>* %x) {
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle64.v v9, (a1)
 ; LMULMAX1-RV64-NEXT:    vmv.v.i v10, 0
-; LMULMAX1-RV64-NEXT:    addi a2, zero, -1
+; LMULMAX1-RV64-NEXT:    li a2, -1
 ; LMULMAX1-RV64-NEXT:    slli a2, a2, 63
 ; LMULMAX1-RV64-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
 ; LMULMAX1-RV64-NEXT:    vmv.s.x v10, a2
@@ -4441,16 +4441,16 @@ define void @mulhu_v4i64(<4 x i64>* %x) {
 define void @mulhs_v32i8(<32 x i8>* %x) {
 ; LMULMAX2-RV32-LABEL: mulhs_v32i8:
 ; LMULMAX2-RV32:       # %bb.0:
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV32-NEXT:    li a1, 32
 ; LMULMAX2-RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle8.v v8, (a0)
-; LMULMAX2-RV32-NEXT:    addi a2, zero, -123
+; LMULMAX2-RV32-NEXT:    li a2, -123
 ; LMULMAX2-RV32-NEXT:    vmv.v.x v10, a2
 ; LMULMAX2-RV32-NEXT:    lui a2, 304453
 ; LMULMAX2-RV32-NEXT:    addi a2, a2, -1452
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a2
-; LMULMAX2-RV32-NEXT:    addi a2, zero, 57
+; LMULMAX2-RV32-NEXT:    li a2, 57
 ; LMULMAX2-RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmerge.vxm v10, v10, a2, v0
 ; LMULMAX2-RV32-NEXT:    vmulhu.vv v8, v8, v10
@@ -4462,16 +4462,16 @@ define void @mulhs_v32i8(<32 x i8>* %x) {
 ;
 ; LMULMAX2-RV64-LABEL: mulhs_v32i8:
 ; LMULMAX2-RV64:       # %bb.0:
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 32
+; LMULMAX2-RV64-NEXT:    li a1, 32
 ; LMULMAX2-RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle8.v v8, (a0)
-; LMULMAX2-RV64-NEXT:    addi a2, zero, -123
+; LMULMAX2-RV64-NEXT:    li a2, -123
 ; LMULMAX2-RV64-NEXT:    vmv.v.x v10, a2
 ; LMULMAX2-RV64-NEXT:    lui a2, 304453
 ; LMULMAX2-RV64-NEXT:    addiw a2, a2, -1452
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vmv.s.x v0, a2
-; LMULMAX2-RV64-NEXT:    addi a2, zero, 57
+; LMULMAX2-RV64-NEXT:    li a2, 57
 ; LMULMAX2-RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vmerge.vxm v10, v10, a2, v0
 ; LMULMAX2-RV64-NEXT:    vmulhu.vv v8, v8, v10
@@ -4575,7 +4575,7 @@ define void @mulhs_v16i16(<16 x i16>* %x) {
 ; LMULMAX1-NEXT:    vle16.v v8, (a0)
 ; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vle16.v v9, (a1)
-; LMULMAX1-NEXT:    addi a2, zero, 105
+; LMULMAX1-NEXT:    li a2, 105
 ; LMULMAX1-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX1-NEXT:    vmv.s.x v0, a2
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
@@ -4597,7 +4597,7 @@ define void @mulhs_v8i32(<8 x i32>* %x) {
 ; LMULMAX2-RV32:       # %bb.0:
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle32.v v8, (a0)
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 85
+; LMULMAX2-RV32-NEXT:    li a1, 85
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a1
 ; LMULMAX2-RV32-NEXT:    lui a1, 419430
@@ -4642,7 +4642,7 @@ define void @mulhs_v8i32(<8 x i32>* %x) {
 ; LMULMAX1-RV32-NEXT:    vle32.v v8, (a0)
 ; LMULMAX1-RV32-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV32-NEXT:    vle32.v v9, (a1)
-; LMULMAX1-RV32-NEXT:    addi a2, zero, 5
+; LMULMAX1-RV32-NEXT:    li a2, 5
 ; LMULMAX1-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX1-RV32-NEXT:    vmv.s.x v0, a2
 ; LMULMAX1-RV32-NEXT:    lui a2, 419430
@@ -4670,7 +4670,7 @@ define void @mulhs_v8i32(<8 x i32>* %x) {
 ; LMULMAX1-RV64-NEXT:    vle32.v v8, (a0)
 ; LMULMAX1-RV64-NEXT:    addi a1, a0, 16
 ; LMULMAX1-RV64-NEXT:    vle32.v v9, (a1)
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 3
+; LMULMAX1-RV64-NEXT:    li a2, 3
 ; LMULMAX1-RV64-NEXT:    slli a2, a2, 33
 ; LMULMAX1-RV64-NEXT:    addi a2, a2, -5
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
@@ -4692,7 +4692,7 @@ define void @mulhs_v4i64(<4 x i64>* %x) {
 ; LMULMAX2-RV32:       # %bb.0:
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 17
+; LMULMAX2-RV32-NEXT:    li a1, 17
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a1
 ; LMULMAX2-RV32-NEXT:    lui a1, 349525
@@ -4703,7 +4703,7 @@ define void @mulhs_v4i64(<4 x i64>* %x) {
 ; LMULMAX2-RV32-NEXT:    vmerge.vxm v10, v10, a1, v0
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmulh.vv v10, v8, v10
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 51
+; LMULMAX2-RV32-NEXT:    li a1, 51
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a1
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
@@ -4711,9 +4711,9 @@ define void @mulhs_v4i64(<4 x i64>* %x) {
 ; LMULMAX2-RV32-NEXT:    vmerge.vim v12, v12, 0, v0
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmadd.vv v12, v8, v10
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 63
+; LMULMAX2-RV32-NEXT:    li a1, 63
 ; LMULMAX2-RV32-NEXT:    vsrl.vx v8, v12, a1
-; LMULMAX2-RV32-NEXT:    addi a1, zero, 68
+; LMULMAX2-RV32-NEXT:    li a1, 68
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-RV32-NEXT:    vmv.s.x v0, a1
 ; LMULMAX2-RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
@@ -4729,7 +4729,7 @@ define void @mulhs_v4i64(<4 x i64>* %x) {
 ; LMULMAX2-RV64:       # %bb.0:
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vle64.v v8, (a0)
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 5
+; LMULMAX2-RV64-NEXT:    li a1, 5
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; LMULMAX2-RV64-NEXT:    vmv.s.x v0, a1
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
@@ -4748,7 +4748,7 @@ define void @mulhs_v4i64(<4 x i64>* %x) {
 ; LMULMAX2-RV64-NEXT:    vmerge.vxm v12, v12, a1, v0
 ; LMULMAX2-RV64-NEXT:    vmulh.vv v12, v8, v12
 ; LMULMAX2-RV64-NEXT:    vmacc.vv v12, v8, v10
-; LMULMAX2-RV64-NEXT:    addi a1, zero, 63
+; LMULMAX2-RV64-NEXT:    li a1, 63
 ; LMULMAX2-RV64-NEXT:    vsrl.vx v8, v12, a1
 ; LMULMAX2-RV64-NEXT:    vmv.v.i v10, 1
 ; LMULMAX2-RV64-NEXT:    vmerge.vim v10, v10, 0, v0
@@ -4797,7 +4797,7 @@ define void @mulhs_v4i64(<4 x i64>* %x) {
 ; LMULMAX1-RV64-NEXT:    vid.v v12
 ; LMULMAX1-RV64-NEXT:    vrsub.vi v13, v12, 0
 ; LMULMAX1-RV64-NEXT:    vmacc.vv v11, v13, v9
-; LMULMAX1-RV64-NEXT:    addi a2, zero, 63
+; LMULMAX1-RV64-NEXT:    li a2, 63
 ; LMULMAX1-RV64-NEXT:    vsrl.vx v9, v11, a2
 ; LMULMAX1-RV64-NEXT:    vsra.vv v11, v11, v12
 ; LMULMAX1-RV64-NEXT:    vadd.vv v9, v11, v9
@@ -4818,7 +4818,7 @@ define void @mulhs_v4i64(<4 x i64>* %x) {
 define void @smin_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: smin_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -5007,7 +5007,7 @@ define void @smin_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @smax_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: smax_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -5196,7 +5196,7 @@ define void @smax_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @umin_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: umin_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -5385,7 +5385,7 @@ define void @umin_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
 define void @umax_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-LABEL: umax_v32i8:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vle8.v v8, (a0)
 ; LMULMAX2-NEXT:    vle8.v v10, (a1)
@@ -5800,7 +5800,7 @@ define void @sub_vi_v16i8(<16 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a1, zero, -1
+; CHECK-NEXT:    li a1, -1
 ; CHECK-NEXT:    vsub.vx v8, v8, a1
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
@@ -5817,7 +5817,7 @@ define void @sub_vi_v8i16(<8 x i16>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    addi a1, zero, -1
+; CHECK-NEXT:    li a1, -1
 ; CHECK-NEXT:    vsub.vx v8, v8, a1
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
@@ -5834,7 +5834,7 @@ define void @sub_vi_v4i32(<4 x i32>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    addi a1, zero, -1
+; CHECK-NEXT:    li a1, -1
 ; CHECK-NEXT:    vsub.vx v8, v8, a1
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
@@ -5851,7 +5851,7 @@ define void @sub_vi_v2i64(<2 x i64>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    addi a1, zero, -1
+; CHECK-NEXT:    li a1, -1
 ; CHECK-NEXT:    vsub.vx v8, v8, a1
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
@@ -7324,7 +7324,7 @@ define void @mulhu_vx_v16i8(<16 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a1, zero, 57
+; CHECK-NEXT:    li a1, 57
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a1
 ; CHECK-NEXT:    vsrl.vi v8, v8, 1
 ; CHECK-NEXT:    vse8.v v8, (a0)
@@ -7444,7 +7444,7 @@ define void @mulhs_vx_v16i8(<16 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a1, zero, -123
+; CHECK-NEXT:    li a1, -123
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a1
 ; CHECK-NEXT:    vsrl.vi v8, v8, 7
 ; CHECK-NEXT:    vse8.v v8, (a0)
@@ -7534,7 +7534,7 @@ define void @mulhs_vx_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    addi a1, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a1), zero
 ; RV32-NEXT:    vmulh.vv v8, v8, v9
-; RV32-NEXT:    addi a1, zero, 63
+; RV32-NEXT:    li a1, 63
 ; RV32-NEXT:    vsrl.vx v9, v8, a1
 ; RV32-NEXT:    vadd.vv v8, v8, v9
 ; RV32-NEXT:    vse64.v v8, (a0)
@@ -7554,7 +7554,7 @@ define void @mulhs_vx_v2i64(<2 x i64>* %x) {
 ; RV64-NEXT:    slli a1, a1, 12
 ; RV64-NEXT:    addi a1, a1, 1366
 ; RV64-NEXT:    vmulh.vx v8, v8, a1
-; RV64-NEXT:    addi a1, zero, 63
+; RV64-NEXT:    li a1, 63
 ; RV64-NEXT:    vsrl.vx v9, v8, a1
 ; RV64-NEXT:    vadd.vv v8, v8, v9
 ; RV64-NEXT:    vse64.v v8, (a0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
index 6b45283d3b84..e450a5e81548 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
@@ -71,7 +71,7 @@ define <2 x i1> @buildvec_mask_optsize_nonconst_v2i1(i1 %x, i1 %y) optsize {
 define <3 x i1> @buildvec_mask_v1i1() {
 ; CHECK-LABEL: buildvec_mask_v1i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    ret
@@ -81,7 +81,7 @@ define <3 x i1> @buildvec_mask_v1i1() {
 define <3 x i1> @buildvec_mask_optsize_v1i1() optsize {
 ; CHECK-LABEL: buildvec_mask_optsize_v1i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    ret
@@ -91,7 +91,7 @@ define <3 x i1> @buildvec_mask_optsize_v1i1() optsize {
 define <4 x i1> @buildvec_mask_v4i1() {
 ; CHECK-LABEL: buildvec_mask_v4i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 6
+; CHECK-NEXT:    li a0, 6
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    ret
@@ -101,7 +101,7 @@ define <4 x i1> @buildvec_mask_v4i1() {
 define <4 x i1> @buildvec_mask_nonconst_v4i1(i1 %x, i1 %y) {
 ; CHECK-LABEL: buildvec_mask_nonconst_v4i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 3
+; CHECK-NEXT:    li a2, 3
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a2
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
@@ -147,7 +147,7 @@ define <4 x i1> @buildvec_mask_nonconst_v4i1_2(i1 %x, i1 %y) {
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    sb a1, 15(sp)
-; CHECK-NEXT:    addi a1, zero, 1
+; CHECK-NEXT:    li a1, 1
 ; CHECK-NEXT:    sb a1, 14(sp)
 ; CHECK-NEXT:    sb a0, 13(sp)
 ; CHECK-NEXT:    sb zero, 12(sp)
@@ -168,7 +168,7 @@ define <4 x i1> @buildvec_mask_nonconst_v4i1_2(i1 %x, i1 %y) {
 define <8 x i1> @buildvec_mask_v8i1() {
 ; CHECK-LABEL: buildvec_mask_v8i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 182
+; CHECK-NEXT:    li a0, 182
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    ret
@@ -178,7 +178,7 @@ define <8 x i1> @buildvec_mask_v8i1() {
 define <8 x i1> @buildvec_mask_nonconst_v8i1(i1 %x, i1 %y) {
 ; CHECK-LABEL: buildvec_mask_nonconst_v8i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 19
+; CHECK-NEXT:    li a2, 19
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a2
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -208,7 +208,7 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %w) {
 ; CHECK-NEXT:    sb a3, 13(sp)
 ; CHECK-NEXT:    sb a0, 12(sp)
 ; CHECK-NEXT:    sb a1, 11(sp)
-; CHECK-NEXT:    addi a1, zero, 1
+; CHECK-NEXT:    li a1, 1
 ; CHECK-NEXT:    sb a1, 10(sp)
 ; CHECK-NEXT:    sb a0, 9(sp)
 ; CHECK-NEXT:    sb a0, 8(sp)
@@ -240,7 +240,7 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %
 ; CHECK-NEXT:    sb a3, 13(sp)
 ; CHECK-NEXT:    sb a0, 12(sp)
 ; CHECK-NEXT:    sb a1, 11(sp)
-; CHECK-NEXT:    addi a1, zero, 1
+; CHECK-NEXT:    li a1, 1
 ; CHECK-NEXT:    sb a1, 10(sp)
 ; CHECK-NEXT:    sb a0, 9(sp)
 ; CHECK-NEXT:    sb a0, 8(sp)
@@ -296,7 +296,7 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1(i1 %x, i1 %y) optsize {
 define <10 x i1> @buildvec_mask_v10i1() {
 ; CHECK-LABEL: buildvec_mask_v10i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 949
+; CHECK-NEXT:    li a0, 949
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    ret
@@ -325,7 +325,7 @@ define <16 x i1> @buildvec_mask_v16i1() {
 define <16 x i1> @buildvec_mask_v16i1_undefs() {
 ; CHECK-LABEL: buildvec_mask_v16i1_undefs:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1722
+; CHECK-NEXT:    li a0, 1722
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    ret
@@ -335,7 +335,7 @@ define <16 x i1> @buildvec_mask_v16i1_undefs() {
 define <32 x i1> @buildvec_mask_v32i1() {
 ; RV32-LMULMAX1-LABEL: buildvec_mask_v32i1:
 ; RV32-LMULMAX1:       # %bb.0:
-; RV32-LMULMAX1-NEXT:    addi a0, zero, 1776
+; RV32-LMULMAX1-NEXT:    li a0, 1776
 ; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; RV32-LMULMAX1-NEXT:    vmv.s.x v0, a0
 ; RV32-LMULMAX1-NEXT:    lui a0, 11
@@ -345,7 +345,7 @@ define <32 x i1> @buildvec_mask_v32i1() {
 ;
 ; RV64-LMULMAX1-LABEL: buildvec_mask_v32i1:
 ; RV64-LMULMAX1:       # %bb.0:
-; RV64-LMULMAX1-NEXT:    addi a0, zero, 1776
+; RV64-LMULMAX1-NEXT:    li a0, 1776
 ; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; RV64-LMULMAX1-NEXT:    vmv.s.x v0, a0
 ; RV64-LMULMAX1-NEXT:    lui a0, 11
@@ -406,7 +406,7 @@ define <32 x i1> @buildvec_mask_v32i1() {
 define <64 x i1> @buildvec_mask_v64i1() {
 ; RV32-LMULMAX1-LABEL: buildvec_mask_v64i1:
 ; RV32-LMULMAX1:       # %bb.0:
-; RV32-LMULMAX1-NEXT:    addi a0, zero, 1776
+; RV32-LMULMAX1-NEXT:    li a0, 1776
 ; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; RV32-LMULMAX1-NEXT:    vmv.s.x v0, a0
 ; RV32-LMULMAX1-NEXT:    lui a0, 4
@@ -420,7 +420,7 @@ define <64 x i1> @buildvec_mask_v64i1() {
 ;
 ; RV64-LMULMAX1-LABEL: buildvec_mask_v64i1:
 ; RV64-LMULMAX1:       # %bb.0:
-; RV64-LMULMAX1-NEXT:    addi a0, zero, 1776
+; RV64-LMULMAX1-NEXT:    li a0, 1776
 ; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; RV64-LMULMAX1-NEXT:    vmv.s.x v0, a0
 ; RV64-LMULMAX1-NEXT:    lui a0, 4
@@ -513,7 +513,7 @@ define <64 x i1> @buildvec_mask_v64i1() {
 define <128 x i1> @buildvec_mask_v128i1() {
 ; RV32-LMULMAX1-LABEL: buildvec_mask_v128i1:
 ; RV32-LMULMAX1:       # %bb.0:
-; RV32-LMULMAX1-NEXT:    addi a0, zero, 1776
+; RV32-LMULMAX1-NEXT:    li a0, 1776
 ; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; RV32-LMULMAX1-NEXT:    vmv.s.x v0, a0
 ; RV32-LMULMAX1-NEXT:    lui a0, 11
@@ -535,7 +535,7 @@ define <128 x i1> @buildvec_mask_v128i1() {
 ;
 ; RV64-LMULMAX1-LABEL: buildvec_mask_v128i1:
 ; RV64-LMULMAX1:       # %bb.0:
-; RV64-LMULMAX1-NEXT:    addi a0, zero, 1776
+; RV64-LMULMAX1-NEXT:    li a0, 1776
 ; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; RV64-LMULMAX1-NEXT:    vmv.s.x v0, a0
 ; RV64-LMULMAX1-NEXT:    lui a0, 11
@@ -685,7 +685,7 @@ define <128 x i1> @buildvec_mask_v128i1() {
 define <128 x i1> @buildvec_mask_optsize_v128i1() optsize {
 ; RV32-LMULMAX1-LABEL: buildvec_mask_optsize_v128i1:
 ; RV32-LMULMAX1:       # %bb.0:
-; RV32-LMULMAX1-NEXT:    addi a0, zero, 1776
+; RV32-LMULMAX1-NEXT:    li a0, 1776
 ; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; RV32-LMULMAX1-NEXT:    vmv.s.x v0, a0
 ; RV32-LMULMAX1-NEXT:    lui a0, 11
@@ -707,7 +707,7 @@ define <128 x i1> @buildvec_mask_optsize_v128i1() optsize {
 ;
 ; RV64-LMULMAX1-LABEL: buildvec_mask_optsize_v128i1:
 ; RV64-LMULMAX1:       # %bb.0:
-; RV64-LMULMAX1-NEXT:    addi a0, zero, 1776
+; RV64-LMULMAX1-NEXT:    li a0, 1776
 ; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; RV64-LMULMAX1-NEXT:    vmv.s.x v0, a0
 ; RV64-LMULMAX1-NEXT:    lui a0, 11
@@ -765,7 +765,7 @@ define <128 x i1> @buildvec_mask_optsize_v128i1() optsize {
 ; RV32-LMULMAX4:       # %bb.0:
 ; RV32-LMULMAX4-NEXT:    lui a0, %hi(.LCPI21_0)
 ; RV32-LMULMAX4-NEXT:    addi a0, a0, %lo(.LCPI21_0)
-; RV32-LMULMAX4-NEXT:    addi a1, zero, 64
+; RV32-LMULMAX4-NEXT:    li a1, 64
 ; RV32-LMULMAX4-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; RV32-LMULMAX4-NEXT:    vlm.v v0, (a0)
 ; RV32-LMULMAX4-NEXT:    lui a0, %hi(.LCPI21_1)
@@ -798,7 +798,7 @@ define <128 x i1> @buildvec_mask_optsize_v128i1() optsize {
 ; RV32-LMULMAX8:       # %bb.0:
 ; RV32-LMULMAX8-NEXT:    lui a0, %hi(.LCPI21_0)
 ; RV32-LMULMAX8-NEXT:    addi a0, a0, %lo(.LCPI21_0)
-; RV32-LMULMAX8-NEXT:    addi a1, zero, 128
+; RV32-LMULMAX8-NEXT:    li a1, 128
 ; RV32-LMULMAX8-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; RV32-LMULMAX8-NEXT:    vlm.v v0, (a0)
 ; RV32-LMULMAX8-NEXT:    ret
@@ -807,7 +807,7 @@ define <128 x i1> @buildvec_mask_optsize_v128i1() optsize {
 ; RV64-LMULMAX8:       # %bb.0:
 ; RV64-LMULMAX8-NEXT:    lui a0, %hi(.LCPI21_0)
 ; RV64-LMULMAX8-NEXT:    addi a0, a0, %lo(.LCPI21_0)
-; RV64-LMULMAX8-NEXT:    addi a1, zero, 128
+; RV64-LMULMAX8-NEXT:    li a1, 128
 ; RV64-LMULMAX8-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; RV64-LMULMAX8-NEXT:    vlm.v v0, (a0)
 ; RV64-LMULMAX8-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
index 6b58ecd313b6..d7ff4203017c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
@@ -91,7 +91,7 @@ define void @load_store_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
 define void @load_store_v32i1(<32 x i1>* %x, <32 x i1>* %y) {
 ; LMULMAX2-LABEL: load_store_v32i1:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vlm.v v8, (a0)
 ; LMULMAX2-NEXT:    vsm.v v8, (a1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll
index 69c02c28ec20..73968f06a963 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll
@@ -39,7 +39,7 @@ define void @or_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
 define void @xor_v32i1(<32 x i1>* %x, <32 x i1>* %y) {
 ; CHECK-LABEL: xor_v32i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vlm.v v8, (a0)
 ; CHECK-NEXT:    vlm.v v9, (a1)
@@ -56,7 +56,7 @@ define void @xor_v32i1(<32 x i1>* %x, <32 x i1>* %y) {
 define void @not_v64i1(<64 x i1>* %x, <64 x i1>* %y) {
 ; CHECK-LABEL: not_v64i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vlm.v v8, (a0)
 ; CHECK-NEXT:    vmnand.mm v8, v8, v8
@@ -106,7 +106,7 @@ define void @ornot_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
 define void @xornot_v32i1(<32 x i1>* %x, <32 x i1>* %y) {
 ; CHECK-LABEL: xornot_v32i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vlm.v v8, (a0)
 ; CHECK-NEXT:    vlm.v v9, (a1)
@@ -158,7 +158,7 @@ define void @nor_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
 define void @xnor_v32i1(<32 x i1>* %x, <32 x i1>* %y) {
 ; CHECK-LABEL: xnor_v32i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vlm.v v8, (a0)
 ; CHECK-NEXT:    vlm.v v9, (a1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
index 17300e9eb36c..644a39aada17 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
@@ -187,7 +187,7 @@ define void @splat_v16i1(<16 x i1>* %x, i1 %y) {
 define void @splat_zeros_v32i1(<32 x i1>* %x) {
 ; LMULMAX2-LABEL: splat_zeros_v32i1:
 ; LMULMAX2:       # %bb.0:
-; LMULMAX2-NEXT:    addi a1, zero, 32
+; LMULMAX2-NEXT:    li a1, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vmclr.m v8
 ; LMULMAX2-NEXT:    vsm.v v8, (a0)
@@ -218,7 +218,7 @@ define void @splat_v32i1(<32 x i1>* %x, i1 %y) {
 ; LMULMAX2-LABEL: splat_v32i1:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    andi a1, a1, 1
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vmv.v.x v8, a1
 ; LMULMAX2-NEXT:    vmsne.vi v10, v8, 0
@@ -256,7 +256,7 @@ define void @splat_ones_v64i1(<64 x i1>* %x) {
 ; LMULMAX2-LABEL: splat_ones_v64i1:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    addi a1, a0, 4
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vmset.m v8
 ; LMULMAX2-NEXT:    vsm.v v8, (a1)
@@ -296,7 +296,7 @@ define void @splat_v64i1(<64 x i1>* %x, i1 %y) {
 ; LMULMAX2-LABEL: splat_v64i1:
 ; LMULMAX2:       # %bb.0:
 ; LMULMAX2-NEXT:    andi a1, a1, 1
-; LMULMAX2-NEXT:    addi a2, zero, 32
+; LMULMAX2-NEXT:    li a2, 32
 ; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; LMULMAX2-NEXT:    vmv.v.x v8, a1
 ; LMULMAX2-NEXT:    vmsne.vi v10, v8, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index cbf9cf513ae4..91c7d1ac64a3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -2121,7 +2121,7 @@ declare <32 x i8> @llvm.masked.gather.v32i8.v32p0i8(<32 x i8*>, i32, <32 x i1>,
 define <32 x i8> @mgather_baseidx_v32i8(i8* %base, <32 x i8> %idxs, <32 x i1> %m, <32 x i8> %passthru) {
 ; RV32-LABEL: mgather_baseidx_v32i8:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; RV32-NEXT:    vsext.vf4 v16, v8
 ; RV32-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
@@ -2146,7 +2146,7 @@ define <32 x i8> @mgather_baseidx_v32i8(i8* %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV64-NEXT:    vmv1r.v v0, v12
 ; RV64-NEXT:    vluxei64.v v10, (a0), v16, v0.t
-; RV64-NEXT:    addi a0, zero, 32
+; RV64-NEXT:    li a0, 32
 ; RV64-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; RV64-NEXT:    vmv.v.i v8, 0
 ; RV64-NEXT:    vsetivli zero, 16, e8, m2, tu, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll
index 591f7a2bbc95..861896c5857a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll
@@ -325,7 +325,7 @@ declare <16 x double> @llvm.masked.load.v16f64(<16 x double>*, i32, <16 x i1>, <
 define void @masked_load_v32f16(<32 x half>* %a, <32 x half>* %m_ptr, <32 x half>* %res_ptr) nounwind {
 ; CHECK-LABEL: masked_load_v32f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, zero
@@ -344,7 +344,7 @@ declare <32 x half> @llvm.masked.load.v32f16(<32 x half>*, i32, <32 x i1>, <32 x
 define void @masked_load_v32f32(<32 x float>* %a, <32 x float>* %m_ptr, <32 x float>* %res_ptr) nounwind {
 ; CHECK-LABEL: masked_load_v32f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    fmv.w.x ft0, zero
@@ -407,7 +407,7 @@ declare <32 x double> @llvm.masked.load.v32f64(<32 x double>*, i32, <32 x i1>, <
 define void @masked_load_v64f16(<64 x half>* %a, <64 x half>* %m_ptr, <64 x half>* %res_ptr) nounwind {
 ; CHECK-LABEL: masked_load_v64f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a1)
 ; CHECK-NEXT:    fmv.h.x ft0, zero
@@ -427,7 +427,7 @@ define void @masked_load_v64f32(<64 x float>* %a, <64 x float>* %m_ptr, <64 x fl
 ; CHECK-LABEL: masked_load_v64f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a3, a1, 128
-; CHECK-NEXT:    addi a4, zero, 32
+; CHECK-NEXT:    li a4, 32
 ; CHECK-NEXT:    vsetvli zero, a4, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v16, (a1)
 ; CHECK-NEXT:    vle32.v v24, (a3)
@@ -454,7 +454,7 @@ define void @masked_load_v128f16(<128 x half>* %a, <128 x half>* %m_ptr, <128 x
 ; CHECK-LABEL: masked_load_v128f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a3, a1, 128
-; CHECK-NEXT:    addi a4, zero, 64
+; CHECK-NEXT:    li a4, 64
 ; CHECK-NEXT:    vsetvli zero, a4, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v16, (a1)
 ; CHECK-NEXT:    vle16.v v24, (a3)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
index c66e968de5c0..55edce865c5f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
@@ -345,7 +345,7 @@ declare <16 x i64> @llvm.masked.load.v16i64(<16 x i64>*, i32, <16 x i1>, <16 x i
 define void @masked_load_v32i8(<32 x i8>* %a, <32 x i8>* %m_ptr, <32 x i8>* %res_ptr) nounwind {
 ; CHECK-LABEL: masked_load_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a1)
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
@@ -363,7 +363,7 @@ declare <32 x i8> @llvm.masked.load.v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>)
 define void @masked_load_v32i16(<32 x i16>* %a, <32 x i16>* %m_ptr, <32 x i16>* %res_ptr) nounwind {
 ; CHECK-LABEL: masked_load_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a1)
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
@@ -381,7 +381,7 @@ declare <32 x i16> @llvm.masked.load.v32i16(<32 x i16>*, i32, <32 x i1>, <32 x i
 define void @masked_load_v32i32(<32 x i32>* %a, <32 x i32>* %m_ptr, <32 x i32>* %res_ptr) nounwind {
 ; CHECK-LABEL: masked_load_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
@@ -403,7 +403,7 @@ define void @masked_load_v32i64(<32 x i64>* %a, <32 x i64>* %m_ptr, <32 x i64>*
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vle64.v v16, (a3)
 ; RV32-NEXT:    vle64.v v0, (a1)
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; RV32-NEXT:    vmv.v.i v24, 0
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
@@ -445,7 +445,7 @@ declare <32 x i64> @llvm.masked.load.v32i64(<32 x i64>*, i32, <32 x i1>, <32 x i
 define void @masked_load_v64i8(<64 x i8>* %a, <64 x i8>* %m_ptr, <64 x i8>* %res_ptr) nounwind {
 ; CHECK-LABEL: masked_load_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a1)
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
@@ -463,7 +463,7 @@ declare <64 x i8> @llvm.masked.load.v64i8(<64 x i8>*, i32, <64 x i1>, <64 x i8>)
 define void @masked_load_v64i16(<64 x i16>* %a, <64 x i16>* %m_ptr, <64 x i16>* %res_ptr) nounwind {
 ; CHECK-LABEL: masked_load_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a1)
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
@@ -482,7 +482,7 @@ define void @masked_load_v64i32(<64 x i32>* %a, <64 x i32>* %m_ptr, <64 x i32>*
 ; CHECK-LABEL: masked_load_v64i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a3, a1, 128
-; CHECK-NEXT:    addi a4, zero, 32
+; CHECK-NEXT:    li a4, 32
 ; CHECK-NEXT:    vsetvli zero, a4, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v16, (a1)
 ; CHECK-NEXT:    vle32.v v24, (a3)
@@ -507,7 +507,7 @@ declare <64 x i32> @llvm.masked.load.v64i32(<64 x i32>*, i32, <64 x i1>, <64 x i
 define void @masked_load_v128i8(<128 x i8>* %a, <128 x i8>* %m_ptr, <128 x i8>* %res_ptr) nounwind {
 ; CHECK-LABEL: masked_load_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 128
+; CHECK-NEXT:    li a3, 128
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a1)
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
@@ -526,7 +526,7 @@ define void @masked_load_v256i8(<256 x i8>* %a, <256 x i8>* %m_ptr, <256 x i8>*
 ; CHECK-LABEL: masked_load_v256i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a3, a1, 128
-; CHECK-NEXT:    addi a4, zero, 128
+; CHECK-NEXT:    li a4, 128
 ; CHECK-NEXT:    vsetvli zero, a4, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v16, (a1)
 ; CHECK-NEXT:    vle8.v v24, (a3)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index e738b95d03ff..8ab646ae11c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -1831,7 +1831,7 @@ declare void @llvm.masked.scatter.v32i8.v32p0i8(<32 x i8>, <32 x i8*>, i32, <32
 define void @mscatter_baseidx_v32i8(<32 x i8> %val, i8* %base, <32 x i8> %idxs, <32 x i1> %m) {
 ; RV32-LABEL: mscatter_baseidx_v32i8:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; RV32-NEXT:    vsext.vf4 v16, v10
 ; RV32-NEXT:    vsetvli zero, zero, e8, m2, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
index f9c105e6eddc..49a8221f79b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
@@ -325,7 +325,7 @@ declare void @llvm.masked.store.v16f64.p0v16f64(<16 x double>, <16 x double>*, i
 define void @masked_store_v32f16(<32 x half>* %val_ptr, <32 x half>* %a, <32 x half>* %m_ptr) nounwind {
 ; CHECK-LABEL: masked_store_v32f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a2)
 ; CHECK-NEXT:    vle16.v v12, (a0)
@@ -344,7 +344,7 @@ declare void @llvm.masked.store.v32f16.p0v32f16(<32 x half>, <32 x half>*, i32,
 define void @masked_store_v32f32(<32 x float>* %val_ptr, <32 x float>* %a, <32 x float>* %m_ptr) nounwind {
 ; CHECK-LABEL: masked_store_v32f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a2)
 ; CHECK-NEXT:    vle32.v v16, (a0)
@@ -451,7 +451,7 @@ declare void @llvm.masked.store.v32f32.p0v32f64(<32 x double>, <32 x double>*, i
 define void @masked_store_v64f16(<64 x half>* %val_ptr, <64 x half>* %a, <64 x half>* %m_ptr) nounwind {
 ; CHECK-LABEL: masked_store_v64f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a2)
 ; CHECK-NEXT:    vle16.v v16, (a0)
@@ -474,7 +474,7 @@ define void @masked_store_v64f32(<64 x float>* %val_ptr, <64 x float>* %a, <64 x
 ; CHECK-NEXT:    csrr a3, vlenb
 ; CHECK-NEXT:    slli a3, a3, 4
 ; CHECK-NEXT:    sub sp, sp, a3
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a2)
 ; CHECK-NEXT:    addi a2, a2, 128
@@ -523,7 +523,7 @@ define void @masked_store_v128f16(<128 x half>* %val_ptr, <128 x half>* %a, <128
 ; CHECK-NEXT:    csrr a3, vlenb
 ; CHECK-NEXT:    slli a3, a3, 4
 ; CHECK-NEXT:    sub sp, sp, a3
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a2)
 ; CHECK-NEXT:    addi a2, a2, 128

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll
index 036510dc0f52..035f9021fd85 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll
@@ -345,7 +345,7 @@ declare void @llvm.masked.store.v16i64.p0v16i64(<16 x i64>, <16 x i64>*, i32, <1
 define void @masked_store_v32i8(<32 x i8>* %val_ptr, <32 x i8>* %a, <32 x i8>* %m_ptr) nounwind {
 ; CHECK-LABEL: masked_store_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a2)
 ; CHECK-NEXT:    vle8.v v10, (a0)
@@ -363,7 +363,7 @@ declare void @llvm.masked.store.v32i8.p0v32i8(<32 x i8>, <32 x i8>*, i32, <32 x
 define void @masked_store_v32i16(<32 x i16>* %val_ptr, <32 x i16>* %a, <32 x i16>* %m_ptr) nounwind {
 ; CHECK-LABEL: masked_store_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a2)
 ; CHECK-NEXT:    vle16.v v12, (a0)
@@ -381,7 +381,7 @@ declare void @llvm.masked.store.v32i16.p0v32i16(<32 x i16>, <32 x i16>*, i32, <3
 define void @masked_store_v32i32(<32 x i32>* %val_ptr, <32 x i32>* %a, <32 x i32>* %m_ptr) nounwind {
 ; CHECK-LABEL: masked_store_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a2)
 ; CHECK-NEXT:    vle32.v v16, (a0)
@@ -412,7 +412,7 @@ define void @masked_store_v32i64(<32 x i64>* %val_ptr, <32 x i64>* %a, <32 x i64
 ; RV32-NEXT:    addi a3, a3, 16
 ; RV32-NEXT:    vs8r.v v8, (a3) # Unknown-size Folded Spill
 ; RV32-NEXT:    vle64.v v24, (a2)
-; RV32-NEXT:    addi a2, zero, 32
+; RV32-NEXT:    li a2, 32
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; RV32-NEXT:    vmv.v.i v8, 0
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
@@ -489,7 +489,7 @@ declare void @llvm.masked.store.v32i64.p0v32i64(<32 x i64>, <32 x i64>*, i32, <3
 define void @masked_store_v64i8(<64 x i8>* %val_ptr, <64 x i8>* %a, <64 x i8>* %m_ptr) nounwind {
 ; CHECK-LABEL: masked_store_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a2)
 ; CHECK-NEXT:    vle8.v v12, (a0)
@@ -507,7 +507,7 @@ declare void @llvm.masked.store.v64i8.p0v64i8(<64 x i8>, <64 x i8>*, i32, <64 x
 define void @masked_store_v64i16(<64 x i16>* %val_ptr, <64 x i16>* %a, <64 x i16>* %m_ptr) nounwind {
 ; CHECK-LABEL: masked_store_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a2)
 ; CHECK-NEXT:    vle16.v v16, (a0)
@@ -529,7 +529,7 @@ define void @masked_store_v64i32(<64 x i32>* %val_ptr, <64 x i32>* %a, <64 x i32
 ; CHECK-NEXT:    csrr a3, vlenb
 ; CHECK-NEXT:    slli a3, a3, 4
 ; CHECK-NEXT:    sub sp, sp, a3
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a2)
 ; CHECK-NEXT:    addi a2, a2, 128
@@ -573,7 +573,7 @@ declare void @llvm.masked.store.v64i32.p0v64i32(<64 x i32>, <64 x i32>*, i32, <6
 define void @masked_store_v128i8(<128 x i8>* %val_ptr, <128 x i8>* %a, <128 x i8>* %m_ptr) nounwind {
 ; CHECK-LABEL: masked_store_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 128
+; CHECK-NEXT:    li a3, 128
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a2)
 ; CHECK-NEXT:    vle8.v v16, (a0)
@@ -595,7 +595,7 @@ define void @masked_store_v128i16(<128 x i16>* %val_ptr, <128 x i16>* %a, <128 x
 ; CHECK-NEXT:    csrr a3, vlenb
 ; CHECK-NEXT:    slli a3, a3, 4
 ; CHECK-NEXT:    sub sp, sp, a3
-; CHECK-NEXT:    addi a3, zero, 64
+; CHECK-NEXT:    li a3, 64
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a2)
 ; CHECK-NEXT:    addi a2, a2, 128
@@ -643,7 +643,7 @@ define void @masked_store_v256i8(<256 x i8>* %val_ptr, <256 x i8>* %a, <256 x i8
 ; CHECK-NEXT:    csrr a3, vlenb
 ; CHECK-NEXT:    slli a3, a3, 4
 ; CHECK-NEXT:    sub sp, sp, a3
-; CHECK-NEXT:    addi a3, zero, 128
+; CHECK-NEXT:    li a3, 128
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a2)
 ; CHECK-NEXT:    addi a2, a2, 128

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index ddef4af7b558..b4d6365b76bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -186,7 +186,7 @@ declare half @llvm.vector.reduce.fadd.v32f16(half, <32 x half>)
 define half @vreduce_fadd_v32f16(<32 x half>* %x, half %s) {
 ; RV32-LABEL: vreduce_fadd_v32f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    lui a2, %hi(.LCPI10_0)
 ; RV32-NEXT:    flh ft0, %lo(.LCPI10_0)(a2)
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -203,7 +203,7 @@ define half @vreduce_fadd_v32f16(<32 x half>* %x, half %s) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI10_0)
 ; RV64-NEXT:    flh ft0, %lo(.LCPI10_0)(a1)
-; RV64-NEXT:    addi a1, zero, 32
+; RV64-NEXT:    li a1, 32
 ; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; RV64-NEXT:    vle16.v v8, (a0)
 ; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -221,7 +221,7 @@ define half @vreduce_fadd_v32f16(<32 x half>* %x, half %s) {
 define half @vreduce_ord_fadd_v32f16(<32 x half>* %x, half %s) {
 ; CHECK-LABEL: vreduce_ord_fadd_v32f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -240,7 +240,7 @@ declare half @llvm.vector.reduce.fadd.v64f16(half, <64 x half>)
 define half @vreduce_fadd_v64f16(<64 x half>* %x, half %s) {
 ; RV32-LABEL: vreduce_fadd_v64f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 64
+; RV32-NEXT:    li a1, 64
 ; RV32-NEXT:    lui a2, %hi(.LCPI12_0)
 ; RV32-NEXT:    flh ft0, %lo(.LCPI12_0)(a2)
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -257,7 +257,7 @@ define half @vreduce_fadd_v64f16(<64 x half>* %x, half %s) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI12_0)
 ; RV64-NEXT:    flh ft0, %lo(.LCPI12_0)(a1)
-; RV64-NEXT:    addi a1, zero, 64
+; RV64-NEXT:    li a1, 64
 ; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; RV64-NEXT:    vle16.v v8, (a0)
 ; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -275,7 +275,7 @@ define half @vreduce_fadd_v64f16(<64 x half>* %x, half %s) {
 define half @vreduce_ord_fadd_v64f16(<64 x half>* %x, half %s) {
 ; CHECK-LABEL: vreduce_ord_fadd_v64f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -294,7 +294,7 @@ declare half @llvm.vector.reduce.fadd.v128f16(half, <128 x half>)
 define half @vreduce_fadd_v128f16(<128 x half>* %x, half %s) {
 ; CHECK-LABEL: vreduce_fadd_v128f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -318,7 +318,7 @@ define half @vreduce_ord_fadd_v128f16(<128 x half>* %x, half %s) {
 ; CHECK-LABEL: vreduce_ord_fadd_v128f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a1, a0, 128
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a1)
 ; CHECK-NEXT:    vle16.v v16, (a0)
@@ -522,7 +522,7 @@ declare float @llvm.vector.reduce.fadd.v32f32(float, <32 x float>)
 define float @vreduce_fadd_v32f32(<32 x float>* %x, float %s) {
 ; RV32-LABEL: vreduce_fadd_v32f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    lui a2, %hi(.LCPI26_0)
 ; RV32-NEXT:    flw ft0, %lo(.LCPI26_0)(a2)
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -539,7 +539,7 @@ define float @vreduce_fadd_v32f32(<32 x float>* %x, float %s) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    lui a1, %hi(.LCPI26_0)
 ; RV64-NEXT:    flw ft0, %lo(.LCPI26_0)(a1)
-; RV64-NEXT:    addi a1, zero, 32
+; RV64-NEXT:    li a1, 32
 ; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; RV64-NEXT:    vle32.v v8, (a0)
 ; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
@@ -557,7 +557,7 @@ define float @vreduce_fadd_v32f32(<32 x float>* %x, float %s) {
 define float @vreduce_ord_fadd_v32f32(<32 x float>* %x, float %s) {
 ; CHECK-LABEL: vreduce_ord_fadd_v32f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
@@ -576,7 +576,7 @@ declare float @llvm.vector.reduce.fadd.v64f32(float, <64 x float>)
 define float @vreduce_fadd_v64f32(<64 x float>* %x, float %s) {
 ; CHECK-LABEL: vreduce_fadd_v64f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -600,7 +600,7 @@ define float @vreduce_ord_fadd_v64f32(<64 x float>* %x, float %s) {
 ; CHECK-LABEL: vreduce_ord_fadd_v64f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a1, a0, 128
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a1)
 ; CHECK-NEXT:    vle32.v v16, (a0)
@@ -927,7 +927,7 @@ declare half @llvm.vector.reduce.fmin.v128f16(<128 x half>)
 define half @vreduce_fmin_v128f16(<128 x half>* %x) {
 ; CHECK-LABEL: vreduce_fmin_v128f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -1027,7 +1027,7 @@ declare float @llvm.vector.reduce.fmin.v128f32(<128 x float>)
 define float @vreduce_fmin_v128f32(<128 x float>* %x) {
 ; CHECK-LABEL: vreduce_fmin_v128f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    addi a2, a0, 384
 ; CHECK-NEXT:    vle32.v v8, (a2)
@@ -1232,7 +1232,7 @@ declare half @llvm.vector.reduce.fmax.v128f16(<128 x half>)
 define half @vreduce_fmax_v128f16(<128 x half>* %x) {
 ; CHECK-LABEL: vreduce_fmax_v128f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -1332,7 +1332,7 @@ declare float @llvm.vector.reduce.fmax.v128f32(<128 x float>)
 define float @vreduce_fmax_v128f32(<128 x float>* %x) {
 ; CHECK-LABEL: vreduce_fmax_v128f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    addi a2, a0, 384
 ; CHECK-NEXT:    vle32.v v8, (a2)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
index 163e561f9ad8..4af3249382d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
@@ -847,7 +847,7 @@ define signext i64 @vpreduce_add_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -881,7 +881,7 @@ define signext i64 @vpreduce_umax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -915,7 +915,7 @@ define signext i64 @vpreduce_smax_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -949,7 +949,7 @@ define signext i64 @vpreduce_umin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -983,7 +983,7 @@ define signext i64 @vpreduce_smin_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1017,7 +1017,7 @@ define signext i64 @vpreduce_and_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1051,7 +1051,7 @@ define signext i64 @vpreduce_or_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1085,7 +1085,7 @@ define signext i64 @vpreduce_xor_v2i64(i64 signext %s, <2 x i64> %v, <2 x i1> %m
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1119,7 +1119,7 @@ define signext i64 @vpreduce_add_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredsum.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1153,7 +1153,7 @@ define signext i64 @vpreduce_umax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredmaxu.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1187,7 +1187,7 @@ define signext i64 @vpreduce_smax_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredmax.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1221,7 +1221,7 @@ define signext i64 @vpreduce_umin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredminu.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1255,7 +1255,7 @@ define signext i64 @vpreduce_smin_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredmin.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1289,7 +1289,7 @@ define signext i64 @vpreduce_and_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredand.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1323,7 +1323,7 @@ define signext i64 @vpreduce_or_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredor.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1357,7 +1357,7 @@ define signext i64 @vpreduce_xor_v4i64(i64 signext %s, <4 x i64> %v, <4 x i1> %m
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredxor.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
index 8602731a5731..4c8fcad97086 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
@@ -93,7 +93,7 @@ declare i8 @llvm.vector.reduce.add.v32i8(<32 x i8>)
 define i8 @vreduce_add_v32i8(<32 x i8>* %x) {
 ; CHECK-LABEL: vreduce_add_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -112,7 +112,7 @@ declare i8 @llvm.vector.reduce.add.v64i8(<64 x i8>)
 define i8 @vreduce_add_v64i8(<64 x i8>* %x) {
 ; CHECK-LABEL: vreduce_add_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -131,7 +131,7 @@ declare i8 @llvm.vector.reduce.add.v128i8(<128 x i8>)
 define i8 @vreduce_add_v128i8(<128 x i8>* %x) {
 ; CHECK-LABEL: vreduce_add_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -150,7 +150,7 @@ declare i8 @llvm.vector.reduce.add.v256i8(<256 x i8>)
 define i8 @vreduce_add_v256i8(<256 x i8>* %x) {
 ; CHECK-LABEL: vreduce_add_v256i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -258,7 +258,7 @@ declare i16 @llvm.vector.reduce.add.v32i16(<32 x i16>)
 define i16 @vreduce_add_v32i16(<32 x i16>* %x) {
 ; CHECK-LABEL: vreduce_add_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -277,7 +277,7 @@ declare i16 @llvm.vector.reduce.add.v64i16(<64 x i16>)
 define i16 @vreduce_add_v64i16(<64 x i16>* %x) {
 ; CHECK-LABEL: vreduce_add_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -296,7 +296,7 @@ declare i16 @llvm.vector.reduce.add.v128i16(<128 x i16>)
 define i16 @vreduce_add_v128i16(<128 x i16>* %x) {
 ; CHECK-LABEL: vreduce_add_v128i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -404,7 +404,7 @@ declare i32 @llvm.vector.reduce.add.v32i32(<32 x i32>)
 define i32 @vreduce_add_v32i32(<32 x i32>* %x) {
 ; CHECK-LABEL: vreduce_add_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
@@ -423,7 +423,7 @@ declare i32 @llvm.vector.reduce.add.v64i32(<64 x i32>)
 define i32 @vreduce_add_v64i32(<64 x i32>* %x) {
 ; CHECK-LABEL: vreduce_add_v64i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -447,7 +447,7 @@ define i64 @vreduce_add_v1i64(<1 x i64>* %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -476,7 +476,7 @@ define i64 @vreduce_add_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vredsum.vs v8, v8, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -509,7 +509,7 @@ define i64 @vreduce_add_v4i64(<4 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV32-NEXT:    vredsum.vs v8, v8, v10
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -542,7 +542,7 @@ define i64 @vreduce_add_v8i64(<8 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vredsum.vs v8, v8, v12
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -575,7 +575,7 @@ define i64 @vreduce_add_v16i64(<16 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredsum.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -611,7 +611,7 @@ define i64 @vreduce_add_v32i64(<32 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredsum.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -656,7 +656,7 @@ define i64 @vreduce_add_v64i64(<64 x i64>* %x) nounwind {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredsum.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -777,7 +777,7 @@ declare i8 @llvm.vector.reduce.and.v32i8(<32 x i8>)
 define i8 @vreduce_and_v32i8(<32 x i8>* %x) {
 ; CHECK-LABEL: vreduce_and_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -796,7 +796,7 @@ declare i8 @llvm.vector.reduce.and.v64i8(<64 x i8>)
 define i8 @vreduce_and_v64i8(<64 x i8>* %x) {
 ; CHECK-LABEL: vreduce_and_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -815,7 +815,7 @@ declare i8 @llvm.vector.reduce.and.v128i8(<128 x i8>)
 define i8 @vreduce_and_v128i8(<128 x i8>* %x) {
 ; CHECK-LABEL: vreduce_and_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -834,7 +834,7 @@ declare i8 @llvm.vector.reduce.and.v256i8(<256 x i8>)
 define i8 @vreduce_and_v256i8(<256 x i8>* %x) {
 ; CHECK-LABEL: vreduce_and_v256i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -942,7 +942,7 @@ declare i16 @llvm.vector.reduce.and.v32i16(<32 x i16>)
 define i16 @vreduce_and_v32i16(<32 x i16>* %x) {
 ; CHECK-LABEL: vreduce_and_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -961,7 +961,7 @@ declare i16 @llvm.vector.reduce.and.v64i16(<64 x i16>)
 define i16 @vreduce_and_v64i16(<64 x i16>* %x) {
 ; CHECK-LABEL: vreduce_and_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -980,7 +980,7 @@ declare i16 @llvm.vector.reduce.and.v128i16(<128 x i16>)
 define i16 @vreduce_and_v128i16(<128 x i16>* %x) {
 ; CHECK-LABEL: vreduce_and_v128i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -1088,7 +1088,7 @@ declare i32 @llvm.vector.reduce.and.v32i32(<32 x i32>)
 define i32 @vreduce_and_v32i32(<32 x i32>* %x) {
 ; CHECK-LABEL: vreduce_and_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
@@ -1107,7 +1107,7 @@ declare i32 @llvm.vector.reduce.and.v64i32(<64 x i32>)
 define i32 @vreduce_and_v64i32(<64 x i32>* %x) {
 ; CHECK-LABEL: vreduce_and_v64i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -1131,7 +1131,7 @@ define i64 @vreduce_and_v1i64(<1 x i64>* %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -1160,7 +1160,7 @@ define i64 @vreduce_and_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vredand.vs v8, v8, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1193,7 +1193,7 @@ define i64 @vreduce_and_v4i64(<4 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV32-NEXT:    vredand.vs v8, v8, v10
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1226,7 +1226,7 @@ define i64 @vreduce_and_v8i64(<8 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vredand.vs v8, v8, v12
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1259,7 +1259,7 @@ define i64 @vreduce_and_v16i64(<16 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredand.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1295,7 +1295,7 @@ define i64 @vreduce_and_v32i64(<32 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredand.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1340,7 +1340,7 @@ define i64 @vreduce_and_v64i64(<64 x i64>* %x) nounwind {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredand.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1461,7 +1461,7 @@ declare i8 @llvm.vector.reduce.or.v32i8(<32 x i8>)
 define i8 @vreduce_or_v32i8(<32 x i8>* %x) {
 ; CHECK-LABEL: vreduce_or_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -1480,7 +1480,7 @@ declare i8 @llvm.vector.reduce.or.v64i8(<64 x i8>)
 define i8 @vreduce_or_v64i8(<64 x i8>* %x) {
 ; CHECK-LABEL: vreduce_or_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -1499,7 +1499,7 @@ declare i8 @llvm.vector.reduce.or.v128i8(<128 x i8>)
 define i8 @vreduce_or_v128i8(<128 x i8>* %x) {
 ; CHECK-LABEL: vreduce_or_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -1518,7 +1518,7 @@ declare i8 @llvm.vector.reduce.or.v256i8(<256 x i8>)
 define i8 @vreduce_or_v256i8(<256 x i8>* %x) {
 ; CHECK-LABEL: vreduce_or_v256i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -1626,7 +1626,7 @@ declare i16 @llvm.vector.reduce.or.v32i16(<32 x i16>)
 define i16 @vreduce_or_v32i16(<32 x i16>* %x) {
 ; CHECK-LABEL: vreduce_or_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -1645,7 +1645,7 @@ declare i16 @llvm.vector.reduce.or.v64i16(<64 x i16>)
 define i16 @vreduce_or_v64i16(<64 x i16>* %x) {
 ; CHECK-LABEL: vreduce_or_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -1664,7 +1664,7 @@ declare i16 @llvm.vector.reduce.or.v128i16(<128 x i16>)
 define i16 @vreduce_or_v128i16(<128 x i16>* %x) {
 ; CHECK-LABEL: vreduce_or_v128i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -1772,7 +1772,7 @@ declare i32 @llvm.vector.reduce.or.v32i32(<32 x i32>)
 define i32 @vreduce_or_v32i32(<32 x i32>* %x) {
 ; CHECK-LABEL: vreduce_or_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
@@ -1791,7 +1791,7 @@ declare i32 @llvm.vector.reduce.or.v64i32(<64 x i32>)
 define i32 @vreduce_or_v64i32(<64 x i32>* %x) {
 ; CHECK-LABEL: vreduce_or_v64i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -1815,7 +1815,7 @@ define i64 @vreduce_or_v1i64(<1 x i64>* %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -1844,7 +1844,7 @@ define i64 @vreduce_or_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vredor.vs v8, v8, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1877,7 +1877,7 @@ define i64 @vreduce_or_v4i64(<4 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV32-NEXT:    vredor.vs v8, v8, v10
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1910,7 +1910,7 @@ define i64 @vreduce_or_v8i64(<8 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vredor.vs v8, v8, v12
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1943,7 +1943,7 @@ define i64 @vreduce_or_v16i64(<16 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredor.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1979,7 +1979,7 @@ define i64 @vreduce_or_v32i64(<32 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredor.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -2024,7 +2024,7 @@ define i64 @vreduce_or_v64i64(<64 x i64>* %x) nounwind {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredor.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -2145,7 +2145,7 @@ declare i8 @llvm.vector.reduce.xor.v32i8(<32 x i8>)
 define i8 @vreduce_xor_v32i8(<32 x i8>* %x) {
 ; CHECK-LABEL: vreduce_xor_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -2164,7 +2164,7 @@ declare i8 @llvm.vector.reduce.xor.v64i8(<64 x i8>)
 define i8 @vreduce_xor_v64i8(<64 x i8>* %x) {
 ; CHECK-LABEL: vreduce_xor_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -2183,7 +2183,7 @@ declare i8 @llvm.vector.reduce.xor.v128i8(<128 x i8>)
 define i8 @vreduce_xor_v128i8(<128 x i8>* %x) {
 ; CHECK-LABEL: vreduce_xor_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -2202,7 +2202,7 @@ declare i8 @llvm.vector.reduce.xor.v256i8(<256 x i8>)
 define i8 @vreduce_xor_v256i8(<256 x i8>* %x) {
 ; CHECK-LABEL: vreduce_xor_v256i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -2310,7 +2310,7 @@ declare i16 @llvm.vector.reduce.xor.v32i16(<32 x i16>)
 define i16 @vreduce_xor_v32i16(<32 x i16>* %x) {
 ; CHECK-LABEL: vreduce_xor_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -2329,7 +2329,7 @@ declare i16 @llvm.vector.reduce.xor.v64i16(<64 x i16>)
 define i16 @vreduce_xor_v64i16(<64 x i16>* %x) {
 ; CHECK-LABEL: vreduce_xor_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -2348,7 +2348,7 @@ declare i16 @llvm.vector.reduce.xor.v128i16(<128 x i16>)
 define i16 @vreduce_xor_v128i16(<128 x i16>* %x) {
 ; CHECK-LABEL: vreduce_xor_v128i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -2456,7 +2456,7 @@ declare i32 @llvm.vector.reduce.xor.v32i32(<32 x i32>)
 define i32 @vreduce_xor_v32i32(<32 x i32>* %x) {
 ; CHECK-LABEL: vreduce_xor_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
@@ -2475,7 +2475,7 @@ declare i32 @llvm.vector.reduce.xor.v64i32(<64 x i32>)
 define i32 @vreduce_xor_v64i32(<64 x i32>* %x) {
 ; CHECK-LABEL: vreduce_xor_v64i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -2499,7 +2499,7 @@ define i64 @vreduce_xor_v1i64(<1 x i64>* %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -2528,7 +2528,7 @@ define i64 @vreduce_xor_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vredxor.vs v8, v8, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -2561,7 +2561,7 @@ define i64 @vreduce_xor_v4i64(<4 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV32-NEXT:    vredxor.vs v8, v8, v10
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -2594,7 +2594,7 @@ define i64 @vreduce_xor_v8i64(<8 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vredxor.vs v8, v8, v12
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -2627,7 +2627,7 @@ define i64 @vreduce_xor_v16i64(<16 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredxor.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -2663,7 +2663,7 @@ define i64 @vreduce_xor_v32i64(<32 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredxor.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -2708,7 +2708,7 @@ define i64 @vreduce_xor_v64i64(<64 x i64>* %x) nounwind {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredxor.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -2759,7 +2759,7 @@ define i8 @vreduce_smin_v2i8(<2 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
@@ -2778,7 +2778,7 @@ define i8 @vreduce_smin_v4i8(<4 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
@@ -2797,7 +2797,7 @@ define i8 @vreduce_smin_v8i8(<8 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -2816,7 +2816,7 @@ define i8 @vreduce_smin_v16i8(<16 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
@@ -2833,10 +2833,10 @@ declare i8 @llvm.vector.reduce.smin.v32i8(<32 x i8>)
 define i8 @vreduce_smin_v32i8(<32 x i8>* %x) {
 ; CHECK-LABEL: vreduce_smin_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v10, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -2853,10 +2853,10 @@ declare i8 @llvm.vector.reduce.smin.v64i8(<64 x i8>)
 define i8 @vreduce_smin_v64i8(<64 x i8>* %x) {
 ; CHECK-LABEL: vreduce_smin_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v12, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -2873,10 +2873,10 @@ declare i8 @llvm.vector.reduce.smin.v128i8(<128 x i8>)
 define i8 @vreduce_smin_v128i8(<128 x i8>* %x) {
 ; CHECK-LABEL: vreduce_smin_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v16, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -2893,13 +2893,13 @@ declare i8 @llvm.vector.reduce.smin.v256i8(<256 x i8>)
 define i8 @vreduce_smin_v256i8(<256 x i8>* %x) {
 ; CHECK-LABEL: vreduce_smin_v256i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
 ; CHECK-NEXT:    vle8.v v16, (a0)
 ; CHECK-NEXT:    vmin.vv v8, v8, v16
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v16, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -3062,7 +3062,7 @@ declare i16 @llvm.vector.reduce.smin.v32i16(<32 x i16>)
 define i16 @vreduce_smin_v32i16(<32 x i16>* %x) {
 ; RV32-LABEL: vreduce_smin_v32i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; RV32-NEXT:    vle16.v v8, (a0)
 ; RV32-NEXT:    lui a0, 8
@@ -3076,7 +3076,7 @@ define i16 @vreduce_smin_v32i16(<32 x i16>* %x) {
 ;
 ; RV64-LABEL: vreduce_smin_v32i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 32
+; RV64-NEXT:    li a1, 32
 ; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; RV64-NEXT:    vle16.v v8, (a0)
 ; RV64-NEXT:    lui a0, 8
@@ -3097,7 +3097,7 @@ declare i16 @llvm.vector.reduce.smin.v64i16(<64 x i16>)
 define i16 @vreduce_smin_v64i16(<64 x i16>* %x) {
 ; RV32-LABEL: vreduce_smin_v64i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 64
+; RV32-NEXT:    li a1, 64
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; RV32-NEXT:    vle16.v v8, (a0)
 ; RV32-NEXT:    lui a0, 8
@@ -3111,7 +3111,7 @@ define i16 @vreduce_smin_v64i16(<64 x i16>* %x) {
 ;
 ; RV64-LABEL: vreduce_smin_v64i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 64
+; RV64-NEXT:    li a1, 64
 ; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; RV64-NEXT:    vle16.v v8, (a0)
 ; RV64-NEXT:    lui a0, 8
@@ -3132,7 +3132,7 @@ declare i16 @llvm.vector.reduce.smin.v128i16(<128 x i16>)
 define i16 @vreduce_smin_v128i16(<128 x i16>* %x) {
 ; RV32-LABEL: vreduce_smin_v128i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 64
+; RV32-NEXT:    li a1, 64
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; RV32-NEXT:    vle16.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 128
@@ -3149,7 +3149,7 @@ define i16 @vreduce_smin_v128i16(<128 x i16>* %x) {
 ;
 ; RV64-LABEL: vreduce_smin_v128i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 64
+; RV64-NEXT:    li a1, 64
 ; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; RV64-NEXT:    vle16.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 128
@@ -3319,7 +3319,7 @@ declare i32 @llvm.vector.reduce.smin.v32i32(<32 x i32>)
 define i32 @vreduce_smin_v32i32(<32 x i32>* %x) {
 ; RV32-LABEL: vreduce_smin_v32i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    lui a0, 524288
@@ -3333,7 +3333,7 @@ define i32 @vreduce_smin_v32i32(<32 x i32>* %x) {
 ;
 ; RV64-LABEL: vreduce_smin_v32i32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 32
+; RV64-NEXT:    li a1, 32
 ; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; RV64-NEXT:    vle32.v v8, (a0)
 ; RV64-NEXT:    lui a0, 524288
@@ -3354,7 +3354,7 @@ declare i32 @llvm.vector.reduce.smin.v64i32(<64 x i32>)
 define i32 @vreduce_smin_v64i32(<64 x i32>* %x) {
 ; RV32-LABEL: vreduce_smin_v64i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; RV32-NEXT:    vle32.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 128
@@ -3371,7 +3371,7 @@ define i32 @vreduce_smin_v64i32(<64 x i32>* %x) {
 ;
 ; RV64-LABEL: vreduce_smin_v64i32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 32
+; RV64-NEXT:    li a1, 32
 ; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; RV64-NEXT:    vle32.v v8, (a0)
 ; RV64-NEXT:    addi a0, a0, 128
@@ -3397,7 +3397,7 @@ define i64 @vreduce_smin_v1i64(<1 x i64>* %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -3423,7 +3423,7 @@ define i64 @vreduce_smin_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a0, zero, -1
+; RV32-NEXT:    li a0, -1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    lui a0, 524288
 ; RV32-NEXT:    addi a0, a0, -1
@@ -3434,7 +3434,7 @@ define i64 @vreduce_smin_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vredmin.vs v8, v8, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -3445,7 +3445,7 @@ define i64 @vreduce_smin_v2i64(<2 x i64>* %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi a0, zero, -1
+; RV64-NEXT:    li a0, -1
 ; RV64-NEXT:    srli a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v9, a0
@@ -3467,7 +3467,7 @@ define i64 @vreduce_smin_v4i64(<4 x i64>* %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a0, zero, -1
+; RV32-NEXT:    li a0, -1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    lui a0, 524288
 ; RV32-NEXT:    addi a0, a0, -1
@@ -3478,7 +3478,7 @@ define i64 @vreduce_smin_v4i64(<4 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV32-NEXT:    vredmin.vs v8, v8, v10
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -3489,7 +3489,7 @@ define i64 @vreduce_smin_v4i64(<4 x i64>* %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi a0, zero, -1
+; RV64-NEXT:    li a0, -1
 ; RV64-NEXT:    srli a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v10, a0
@@ -3511,7 +3511,7 @@ define i64 @vreduce_smin_v8i64(<8 x i64>* %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a0, zero, -1
+; RV32-NEXT:    li a0, -1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    lui a0, 524288
 ; RV32-NEXT:    addi a0, a0, -1
@@ -3522,7 +3522,7 @@ define i64 @vreduce_smin_v8i64(<8 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vredmin.vs v8, v8, v12
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -3533,7 +3533,7 @@ define i64 @vreduce_smin_v8i64(<8 x i64>* %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi a0, zero, -1
+; RV64-NEXT:    li a0, -1
 ; RV64-NEXT:    srli a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v12, a0
@@ -3555,7 +3555,7 @@ define i64 @vreduce_smin_v16i64(<16 x i64>* %x) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a0, zero, -1
+; RV32-NEXT:    li a0, -1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    lui a0, 524288
 ; RV32-NEXT:    addi a0, a0, -1
@@ -3566,7 +3566,7 @@ define i64 @vreduce_smin_v16i64(<16 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredmin.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -3577,7 +3577,7 @@ define i64 @vreduce_smin_v16i64(<16 x i64>* %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi a0, zero, -1
+; RV64-NEXT:    li a0, -1
 ; RV64-NEXT:    srli a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v16, a0
@@ -3601,7 +3601,7 @@ define i64 @vreduce_smin_v32i64(<32 x i64>* %x) {
 ; RV32-NEXT:    vle64.v v8, (a0)
 ; RV32-NEXT:    addi a0, a0, 128
 ; RV32-NEXT:    vle64.v v16, (a0)
-; RV32-NEXT:    addi a0, zero, -1
+; RV32-NEXT:    li a0, -1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    lui a0, 524288
 ; RV32-NEXT:    addi a0, a0, -1
@@ -3613,7 +3613,7 @@ define i64 @vreduce_smin_v32i64(<32 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredmin.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -3627,7 +3627,7 @@ define i64 @vreduce_smin_v32i64(<32 x i64>* %x) {
 ; RV64-NEXT:    addi a0, a0, 128
 ; RV64-NEXT:    vle64.v v16, (a0)
 ; RV64-NEXT:    vmin.vv v8, v8, v16
-; RV64-NEXT:    addi a0, zero, -1
+; RV64-NEXT:    li a0, -1
 ; RV64-NEXT:    srli a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v16, a0
@@ -3654,7 +3654,7 @@ define i64 @vreduce_smin_v64i64(<64 x i64>* %x) nounwind {
 ; RV32-NEXT:    vle64.v v24, (a1)
 ; RV32-NEXT:    addi a0, a0, 128
 ; RV32-NEXT:    vle64.v v0, (a0)
-; RV32-NEXT:    addi a0, zero, -1
+; RV32-NEXT:    li a0, -1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    lui a0, 524288
 ; RV32-NEXT:    addi a0, a0, -1
@@ -3668,7 +3668,7 @@ define i64 @vreduce_smin_v64i64(<64 x i64>* %x) nounwind {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredmin.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -3688,7 +3688,7 @@ define i64 @vreduce_smin_v64i64(<64 x i64>* %x) nounwind {
 ; RV64-NEXT:    vmin.vv v16, v24, v16
 ; RV64-NEXT:    vmin.vv v8, v8, v0
 ; RV64-NEXT:    vmin.vv v8, v8, v16
-; RV64-NEXT:    addi a0, zero, -1
+; RV64-NEXT:    li a0, -1
 ; RV64-NEXT:    srli a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v16, a0
@@ -3722,7 +3722,7 @@ define i8 @vreduce_smax_v2i8(<2 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
@@ -3741,7 +3741,7 @@ define i8 @vreduce_smax_v4i8(<4 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
@@ -3760,7 +3760,7 @@ define i8 @vreduce_smax_v8i8(<8 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
@@ -3779,7 +3779,7 @@ define i8 @vreduce_smax_v16i8(<16 x i8>* %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
@@ -3796,10 +3796,10 @@ declare i8 @llvm.vector.reduce.smax.v32i8(<32 x i8>)
 define i8 @vreduce_smax_v32i8(<32 x i8>* %x) {
 ; CHECK-LABEL: vreduce_smax_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v10, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -3816,10 +3816,10 @@ declare i8 @llvm.vector.reduce.smax.v64i8(<64 x i8>)
 define i8 @vreduce_smax_v64i8(<64 x i8>* %x) {
 ; CHECK-LABEL: vreduce_smax_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v12, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -3836,10 +3836,10 @@ declare i8 @llvm.vector.reduce.smax.v128i8(<128 x i8>)
 define i8 @vreduce_smax_v128i8(<128 x i8>* %x) {
 ; CHECK-LABEL: vreduce_smax_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v16, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -3856,13 +3856,13 @@ declare i8 @llvm.vector.reduce.smax.v256i8(<256 x i8>)
 define i8 @vreduce_smax_v256i8(<256 x i8>* %x) {
 ; CHECK-LABEL: vreduce_smax_v256i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
 ; CHECK-NEXT:    vle8.v v16, (a0)
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v16, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -3969,7 +3969,7 @@ declare i16 @llvm.vector.reduce.smax.v32i16(<32 x i16>)
 define i16 @vreduce_smax_v32i16(<32 x i16>* %x) {
 ; CHECK-LABEL: vreduce_smax_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    lui a0, 1048568
@@ -3989,7 +3989,7 @@ declare i16 @llvm.vector.reduce.smax.v64i16(<64 x i16>)
 define i16 @vreduce_smax_v64i16(<64 x i16>* %x) {
 ; CHECK-LABEL: vreduce_smax_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    lui a0, 1048568
@@ -4009,7 +4009,7 @@ declare i16 @llvm.vector.reduce.smax.v128i16(<128 x i16>)
 define i16 @vreduce_smax_v128i16(<128 x i16>* %x) {
 ; CHECK-LABEL: vreduce_smax_v128i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -4122,7 +4122,7 @@ declare i32 @llvm.vector.reduce.smax.v32i32(<32 x i32>)
 define i32 @vreduce_smax_v32i32(<32 x i32>* %x) {
 ; CHECK-LABEL: vreduce_smax_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    lui a0, 524288
@@ -4142,7 +4142,7 @@ declare i32 @llvm.vector.reduce.smax.v64i32(<64 x i32>)
 define i32 @vreduce_smax_v64i32(<64 x i32>* %x) {
 ; CHECK-LABEL: vreduce_smax_v64i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -4167,7 +4167,7 @@ define i64 @vreduce_smax_v1i64(<1 x i64>* %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -4202,7 +4202,7 @@ define i64 @vreduce_smax_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vredmax.vs v8, v8, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -4213,7 +4213,7 @@ define i64 @vreduce_smax_v2i64(<2 x i64>* %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi a0, zero, -1
+; RV64-NEXT:    li a0, -1
 ; RV64-NEXT:    slli a0, a0, 63
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v9, a0
@@ -4244,7 +4244,7 @@ define i64 @vreduce_smax_v4i64(<4 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV32-NEXT:    vredmax.vs v8, v8, v10
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -4255,7 +4255,7 @@ define i64 @vreduce_smax_v4i64(<4 x i64>* %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi a0, zero, -1
+; RV64-NEXT:    li a0, -1
 ; RV64-NEXT:    slli a0, a0, 63
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v10, a0
@@ -4286,7 +4286,7 @@ define i64 @vreduce_smax_v8i64(<8 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vredmax.vs v8, v8, v12
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -4297,7 +4297,7 @@ define i64 @vreduce_smax_v8i64(<8 x i64>* %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi a0, zero, -1
+; RV64-NEXT:    li a0, -1
 ; RV64-NEXT:    slli a0, a0, 63
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v12, a0
@@ -4328,7 +4328,7 @@ define i64 @vreduce_smax_v16i64(<16 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredmax.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -4339,7 +4339,7 @@ define i64 @vreduce_smax_v16i64(<16 x i64>* %x) {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi a0, zero, -1
+; RV64-NEXT:    li a0, -1
 ; RV64-NEXT:    slli a0, a0, 63
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v16, a0
@@ -4373,7 +4373,7 @@ define i64 @vreduce_smax_v32i64(<32 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredmax.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -4387,7 +4387,7 @@ define i64 @vreduce_smax_v32i64(<32 x i64>* %x) {
 ; RV64-NEXT:    addi a0, a0, 128
 ; RV64-NEXT:    vle64.v v16, (a0)
 ; RV64-NEXT:    vmax.vv v8, v8, v16
-; RV64-NEXT:    addi a0, zero, -1
+; RV64-NEXT:    li a0, -1
 ; RV64-NEXT:    slli a0, a0, 63
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v16, a0
@@ -4426,7 +4426,7 @@ define i64 @vreduce_smax_v64i64(<64 x i64>* %x) nounwind {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredmax.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -4446,7 +4446,7 @@ define i64 @vreduce_smax_v64i64(<64 x i64>* %x) nounwind {
 ; RV64-NEXT:    vmax.vv v16, v24, v16
 ; RV64-NEXT:    vmax.vv v8, v8, v0
 ; RV64-NEXT:    vmax.vv v8, v8, v16
-; RV64-NEXT:    addi a0, zero, -1
+; RV64-NEXT:    li a0, -1
 ; RV64-NEXT:    slli a0, a0, 63
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmv.v.x v16, a0
@@ -4550,7 +4550,7 @@ declare i8 @llvm.vector.reduce.umin.v32i8(<32 x i8>)
 define i8 @vreduce_umin_v32i8(<32 x i8>* %x) {
 ; CHECK-LABEL: vreduce_umin_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -4569,7 +4569,7 @@ declare i8 @llvm.vector.reduce.umin.v64i8(<64 x i8>)
 define i8 @vreduce_umin_v64i8(<64 x i8>* %x) {
 ; CHECK-LABEL: vreduce_umin_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -4588,7 +4588,7 @@ declare i8 @llvm.vector.reduce.umin.v128i8(<128 x i8>)
 define i8 @vreduce_umin_v128i8(<128 x i8>* %x) {
 ; CHECK-LABEL: vreduce_umin_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -4607,7 +4607,7 @@ declare i8 @llvm.vector.reduce.umin.v256i8(<256 x i8>)
 define i8 @vreduce_umin_v256i8(<256 x i8>* %x) {
 ; CHECK-LABEL: vreduce_umin_v256i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -4715,7 +4715,7 @@ declare i16 @llvm.vector.reduce.umin.v32i16(<32 x i16>)
 define i16 @vreduce_umin_v32i16(<32 x i16>* %x) {
 ; CHECK-LABEL: vreduce_umin_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -4734,7 +4734,7 @@ declare i16 @llvm.vector.reduce.umin.v64i16(<64 x i16>)
 define i16 @vreduce_umin_v64i16(<64 x i16>* %x) {
 ; CHECK-LABEL: vreduce_umin_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -4753,7 +4753,7 @@ declare i16 @llvm.vector.reduce.umin.v128i16(<128 x i16>)
 define i16 @vreduce_umin_v128i16(<128 x i16>* %x) {
 ; CHECK-LABEL: vreduce_umin_v128i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -4861,7 +4861,7 @@ declare i32 @llvm.vector.reduce.umin.v32i32(<32 x i32>)
 define i32 @vreduce_umin_v32i32(<32 x i32>* %x) {
 ; CHECK-LABEL: vreduce_umin_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
@@ -4880,7 +4880,7 @@ declare i32 @llvm.vector.reduce.umin.v64i32(<64 x i32>)
 define i32 @vreduce_umin_v64i32(<64 x i32>* %x) {
 ; CHECK-LABEL: vreduce_umin_v64i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -4904,7 +4904,7 @@ define i64 @vreduce_umin_v1i64(<1 x i64>* %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -4933,7 +4933,7 @@ define i64 @vreduce_umin_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vredminu.vs v8, v8, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -4966,7 +4966,7 @@ define i64 @vreduce_umin_v4i64(<4 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV32-NEXT:    vredminu.vs v8, v8, v10
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -4999,7 +4999,7 @@ define i64 @vreduce_umin_v8i64(<8 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vredminu.vs v8, v8, v12
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -5032,7 +5032,7 @@ define i64 @vreduce_umin_v16i64(<16 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredminu.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -5068,7 +5068,7 @@ define i64 @vreduce_umin_v32i64(<32 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredminu.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -5113,7 +5113,7 @@ define i64 @vreduce_umin_v64i64(<64 x i64>* %x) nounwind {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredminu.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -5234,7 +5234,7 @@ declare i8 @llvm.vector.reduce.umax.v32i8(<32 x i8>)
 define i8 @vreduce_umax_v32i8(<32 x i8>* %x) {
 ; CHECK-LABEL: vreduce_umax_v32i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -5253,7 +5253,7 @@ declare i8 @llvm.vector.reduce.umax.v64i8(<64 x i8>)
 define i8 @vreduce_umax_v64i8(<64 x i8>* %x) {
 ; CHECK-LABEL: vreduce_umax_v64i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -5272,7 +5272,7 @@ declare i8 @llvm.vector.reduce.umax.v128i8(<128 x i8>)
 define i8 @vreduce_umax_v128i8(<128 x i8>* %x) {
 ; CHECK-LABEL: vreduce_umax_v128i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
@@ -5291,7 +5291,7 @@ declare i8 @llvm.vector.reduce.umax.v256i8(<256 x i8>)
 define i8 @vreduce_umax_v256i8(<256 x i8>* %x) {
 ; CHECK-LABEL: vreduce_umax_v256i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -5399,7 +5399,7 @@ declare i16 @llvm.vector.reduce.umax.v32i16(<32 x i16>)
 define i16 @vreduce_umax_v32i16(<32 x i16>* %x) {
 ; CHECK-LABEL: vreduce_umax_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -5418,7 +5418,7 @@ declare i16 @llvm.vector.reduce.umax.v64i16(<64 x i16>)
 define i16 @vreduce_umax_v64i16(<64 x i16>* %x) {
 ; CHECK-LABEL: vreduce_umax_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
@@ -5437,7 +5437,7 @@ declare i16 @llvm.vector.reduce.umax.v128i16(<128 x i16>)
 define i16 @vreduce_umax_v128i16(<128 x i16>* %x) {
 ; CHECK-LABEL: vreduce_umax_v128i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 64
+; CHECK-NEXT:    li a1, 64
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -5545,7 +5545,7 @@ declare i32 @llvm.vector.reduce.umax.v32i32(<32 x i32>)
 define i32 @vreduce_umax_v32i32(<32 x i32>* %x) {
 ; CHECK-LABEL: vreduce_umax_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
@@ -5564,7 +5564,7 @@ declare i32 @llvm.vector.reduce.umax.v64i32(<64 x i32>)
 define i32 @vreduce_umax_v64i32(<64 x i32>* %x) {
 ; CHECK-LABEL: vreduce_umax_v64i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0)
 ; CHECK-NEXT:    addi a0, a0, 128
@@ -5588,7 +5588,7 @@ define i64 @vreduce_umax_v1i64(<1 x i64>* %x) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vle64.v v8, (a0)
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vmv.x.s a1, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
@@ -5617,7 +5617,7 @@ define i64 @vreduce_umax_v2i64(<2 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; RV32-NEXT:    vredmaxu.vs v8, v8, v9
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -5650,7 +5650,7 @@ define i64 @vreduce_umax_v4i64(<4 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV32-NEXT:    vredmaxu.vs v8, v8, v10
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -5683,7 +5683,7 @@ define i64 @vreduce_umax_v8i64(<8 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vredmaxu.vs v8, v8, v12
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -5716,7 +5716,7 @@ define i64 @vreduce_umax_v16i64(<16 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredmaxu.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -5752,7 +5752,7 @@ define i64 @vreduce_umax_v32i64(<32 x i64>* %x) {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredmaxu.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -5797,7 +5797,7 @@ define i64 @vreduce_umax_v64i64(<64 x i64>* %x) nounwind {
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; RV32-NEXT:    vredmaxu.vs v8, v8, v16
 ; RV32-NEXT:    vmv.x.s a0, v8
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v8, a1
 ; RV32-NEXT:    vmv.x.s a1, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index 03349765c7b9..3f89d62501b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -401,12 +401,12 @@ declare <256 x i8> @llvm.vp.add.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
 define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vadd_vi_v258i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 128
+; CHECK-NEXT:    li a2, 128
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
 ; CHECK-NEXT:    vlm.v v25, (a0)
 ; CHECK-NEXT:    addi a3, a1, -128
 ; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    mv a0, zero
+; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    bltu a1, a3, .LBB31_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a3
@@ -416,7 +416,7 @@ define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vadd.vi v16, v16, -1, v0.t
 ; CHECK-NEXT:    bltu a1, a2, .LBB31_4
 ; CHECK-NEXT:  # %bb.3:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:  .LBB31_4:
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vmv1r.v v0, v24
@@ -432,17 +432,17 @@ define <256 x i8> @vadd_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vadd_vi_v258i8_unmasked:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a1, a0, -128
-; CHECK-NEXT:    mv a2, zero
+; CHECK-NEXT:    li a2, 0
 ; CHECK-NEXT:    bltu a0, a1, .LBB32_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a2, a1
 ; CHECK-NEXT:  .LBB32_2:
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vadd.vi v16, v16, -1
 ; CHECK-NEXT:    bltu a0, a1, .LBB32_4
 ; CHECK-NEXT:  # %bb.3:
-; CHECK-NEXT:    addi a0, zero, 128
+; CHECK-NEXT:    li a0, 128
 ; CHECK-NEXT:  .LBB32_4:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
@@ -460,7 +460,7 @@ define <256 x i8> @vadd_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
 define <256 x i8> @vadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-LABEL: vadd_vi_v258i8_evl129:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 128
+; CHECK-NEXT:    li a1, 128
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vlm.v v24, (a0)
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
@@ -479,7 +479,7 @@ define <256 x i8> @vadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
 define <256 x i8> @vadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
 ; CHECK-LABEL: vadd_vi_v258i8_evl128:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 128
+; CHECK-NEXT:    li a0, 128
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
 ; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
 ; CHECK-NEXT:    ret
@@ -1521,10 +1521,10 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV32-LABEL: vadd_vx_v32i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vmv1r.v v1, v0
-; RV32-NEXT:    mv a1, zero
+; RV32-NEXT:    li a1, 0
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
 ; RV32-NEXT:    vslidedown.vi v0, v0, 2
-; RV32-NEXT:    addi a2, zero, 32
+; RV32-NEXT:    li a2, 32
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; RV32-NEXT:    addi a2, a0, -16
 ; RV32-NEXT:    vmv.v.i v24, -1
@@ -1533,11 +1533,11 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV32-NEXT:    mv a1, a2
 ; RV32-NEXT:  .LBB107_2:
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; RV32-NEXT:    addi a1, zero, 16
+; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:    vadd.vv v16, v16, v24, v0.t
 ; RV32-NEXT:    bltu a0, a1, .LBB107_4
 ; RV32-NEXT:  # %bb.3:
-; RV32-NEXT:    addi a0, zero, 16
+; RV32-NEXT:    li a0, 16
 ; RV32-NEXT:  .LBB107_4:
 ; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
 ; RV32-NEXT:    vmv1r.v v0, v1
@@ -1547,7 +1547,7 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV64-LABEL: vadd_vx_v32i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vmv1r.v v24, v0
-; RV64-NEXT:    mv a1, zero
+; RV64-NEXT:    li a1, 0
 ; RV64-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
 ; RV64-NEXT:    addi a2, a0, -16
 ; RV64-NEXT:    vslidedown.vi v0, v0, 2
@@ -1556,11 +1556,11 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 ; RV64-NEXT:    mv a1, a2
 ; RV64-NEXT:  .LBB107_2:
 ; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; RV64-NEXT:    addi a1, zero, 16
+; RV64-NEXT:    li a1, 16
 ; RV64-NEXT:    vadd.vi v16, v16, -1, v0.t
 ; RV64-NEXT:    bltu a0, a1, .LBB107_4
 ; RV64-NEXT:  # %bb.3:
-; RV64-NEXT:    addi a0, zero, 16
+; RV64-NEXT:    li a0, 16
 ; RV64-NEXT:  .LBB107_4:
 ; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
 ; RV64-NEXT:    vmv1r.v v0, v24
@@ -1575,8 +1575,8 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
 define <32 x i64> @vadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vadd_vi_v32i64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    mv a1, zero
-; RV32-NEXT:    addi a2, zero, 32
+; RV32-NEXT:    li a1, 0
+; RV32-NEXT:    li a2, 32
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; RV32-NEXT:    addi a2, a0, -16
 ; RV32-NEXT:    vmv.v.i v24, -1
@@ -1585,11 +1585,11 @@ define <32 x i64> @vadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV32-NEXT:    mv a1, a2
 ; RV32-NEXT:  .LBB108_2:
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; RV32-NEXT:    addi a1, zero, 16
+; RV32-NEXT:    li a1, 16
 ; RV32-NEXT:    vadd.vv v16, v16, v24
 ; RV32-NEXT:    bltu a0, a1, .LBB108_4
 ; RV32-NEXT:  # %bb.3:
-; RV32-NEXT:    addi a0, zero, 16
+; RV32-NEXT:    li a0, 16
 ; RV32-NEXT:  .LBB108_4:
 ; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
 ; RV32-NEXT:    vadd.vv v8, v8, v24
@@ -1598,17 +1598,17 @@ define <32 x i64> @vadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; RV64-LABEL: vadd_vi_v32i64_unmasked:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi a1, a0, -16
-; RV64-NEXT:    mv a2, zero
+; RV64-NEXT:    li a2, 0
 ; RV64-NEXT:    bltu a0, a1, .LBB108_2
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    mv a2, a1
 ; RV64-NEXT:  .LBB108_2:
 ; RV64-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; RV64-NEXT:    addi a1, zero, 16
+; RV64-NEXT:    li a1, 16
 ; RV64-NEXT:    vadd.vi v16, v16, -1
 ; RV64-NEXT:    bltu a0, a1, .LBB108_4
 ; RV64-NEXT:  # %bb.3:
-; RV64-NEXT:    addi a0, zero, 16
+; RV64-NEXT:    li a0, 16
 ; RV64-NEXT:  .LBB108_4:
 ; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
 ; RV64-NEXT:    vadd.vi v8, v8, -1
@@ -1626,7 +1626,7 @@ define <32 x i64> @vadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 define <32 x i64> @vadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
 ; RV32-LABEL: vadd_vx_v32i64_evl12:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; RV32-NEXT:    vmv.v.i v16, -1
 ; RV32-NEXT:    vsetivli zero, 12, e64, m8, ta, mu
@@ -1649,7 +1649,7 @@ define <32 x i64> @vadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
 ; RV32-NEXT:    vslidedown.vi v1, v0, 2
-; RV32-NEXT:    addi a0, zero, 32
+; RV32-NEXT:    li a0, 32
 ; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
 ; RV32-NEXT:    vmv.v.i v24, -1
 ; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
index 3180f0445d4c..ea41aba5e5d7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
@@ -1268,7 +1268,7 @@ define <11 x i64> @vand_vx_v11i64(<11 x i64> %va, i64 %b, <11 x i1> %m, i32 zero
 ; RV32-LABEL: vand_vx_v11i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vmv1r.v v16, v0
-; RV32-NEXT:    addi a3, zero, 32
+; RV32-NEXT:    li a3, 32
 ; RV32-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
 ; RV32-NEXT:    vmv.v.x v24, a1
 ; RV32-NEXT:    lui a1, 341
@@ -1296,7 +1296,7 @@ define <11 x i64> @vand_vx_v11i64(<11 x i64> %va, i64 %b, <11 x i1> %m, i32 zero
 define <11 x i64> @vand_vx_v11i64_unmasked(<11 x i64> %va, i64 %b, i32 zeroext %evl) {
 ; RV32-LABEL: vand_vx_v11i64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a3, zero, 32
+; RV32-NEXT:    li a3, 32
 ; RV32-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
 ; RV32-NEXT:    vmv.v.x v16, a1
 ; RV32-NEXT:    lui a1, 341

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
index 537583143ced..246dd3100cb1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
@@ -9,7 +9,7 @@ declare <8 x i7> @llvm.vp.udiv.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
 define <8 x i7> @vdivu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vdivu_vv_v8i7:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 127
+; CHECK-NEXT:    li a1, 127
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vand.vx v9, v9, a1
 ; CHECK-NEXT:    vand.vx v8, v8, a1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
index 7887f3c5dbb6..f519a81965ae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll
@@ -238,7 +238,7 @@ define signext i1 @vreduce_or_v32i1(<32 x i1> %v) {
 ;
 ; LMULMAX8-LABEL: vreduce_or_v32i1:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a0, zero, 32
+; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vcpop.m a0, v0
 ; LMULMAX8-NEXT:    snez a0, a0
@@ -262,7 +262,7 @@ define signext i1 @vreduce_xor_v32i1(<32 x i1> %v) {
 ;
 ; LMULMAX8-LABEL: vreduce_xor_v32i1:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a0, zero, 32
+; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vcpop.m a0, v0
 ; LMULMAX8-NEXT:    andi a0, a0, 1
@@ -286,7 +286,7 @@ define signext i1 @vreduce_and_v32i1(<32 x i1> %v) {
 ;
 ; LMULMAX8-LABEL: vreduce_and_v32i1:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a0, zero, 32
+; LMULMAX8-NEXT:    li a0, 32
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; LMULMAX8-NEXT:    vmnand.mm v8, v0, v0
 ; LMULMAX8-NEXT:    vcpop.m a0, v8
@@ -313,7 +313,7 @@ define signext i1 @vreduce_or_v64i1(<64 x i1> %v) {
 ;
 ; LMULMAX8-LABEL: vreduce_or_v64i1:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a0, zero, 64
+; LMULMAX8-NEXT:    li a0, 64
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; LMULMAX8-NEXT:    vcpop.m a0, v0
 ; LMULMAX8-NEXT:    snez a0, a0
@@ -339,7 +339,7 @@ define signext i1 @vreduce_xor_v64i1(<64 x i1> %v) {
 ;
 ; LMULMAX8-LABEL: vreduce_xor_v64i1:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a0, zero, 64
+; LMULMAX8-NEXT:    li a0, 64
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; LMULMAX8-NEXT:    vcpop.m a0, v0
 ; LMULMAX8-NEXT:    andi a0, a0, 1
@@ -365,7 +365,7 @@ define signext i1 @vreduce_and_v64i1(<64 x i1> %v) {
 ;
 ; LMULMAX8-LABEL: vreduce_and_v64i1:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    addi a0, zero, 64
+; LMULMAX8-NEXT:    li a0, 64
 ; LMULMAX8-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; LMULMAX8-NEXT:    vmnand.mm v8, v0, v0
 ; LMULMAX8-NEXT:    vcpop.m a0, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
index 64dadf0769fb..30be5a51002e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
@@ -9,7 +9,7 @@ declare <8 x i7> @llvm.vp.urem.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
 define <8 x i7> @vremu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vremu_vv_v8i7:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 127
+; CHECK-NEXT:    li a1, 127
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vand.vx v9, v9, a1
 ; CHECK-NEXT:    vand.vx v8, v8, a1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
index fee5a617441d..348ef0a734ae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll
@@ -167,7 +167,7 @@ define void @vselect_vi_v16i16(<16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
 define void @vselect_vv_v32f16(<32 x half>* %a, <32 x half>* %b, <32 x i1>* %cc, <32 x half>* %z) {
 ; CHECK-LABEL: vselect_vv_v32f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a4, zero, 32
+; CHECK-NEXT:    li a4, 32
 ; CHECK-NEXT:    vsetvli zero, a4, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0)
 ; CHECK-NEXT:    vlm.v v0, (a2)
@@ -186,7 +186,7 @@ define void @vselect_vv_v32f16(<32 x half>* %a, <32 x half>* %b, <32 x i1>* %cc,
 define void @vselect_vx_v32f16(half %a, <32 x half>* %b, <32 x i1>* %cc, <32 x half>* %z) {
 ; CHECK-LABEL: vselect_vx_v32f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; CHECK-NEXT:    vlm.v v0, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
@@ -205,7 +205,7 @@ define void @vselect_vx_v32f16(half %a, <32 x half>* %b, <32 x i1>* %cc, <32 x h
 define void @vselect_vfpzero_v32f16(<32 x half>* %b, <32 x i1>* %cc, <32 x half>* %z) {
 ; CHECK-LABEL: vselect_vfpzero_v32f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a3, zero, 32
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; CHECK-NEXT:    vlm.v v0, (a1)
 ; CHECK-NEXT:    vle16.v v8, (a0)
@@ -272,7 +272,7 @@ define <16 x i1> @vselect_v16i1(<16 x i1> %a, <16 x i1> %b, <16 x i1> %cc) {
 define <32 x i1> @vselect_v32i1(<32 x i1> %a, <32 x i1> %b, <32 x i1> %cc) {
 ; CHECK-LABEL: vselect_v32i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vmandn.mm v8, v8, v9
 ; CHECK-NEXT:    vmand.mm v9, v0, v9
@@ -285,7 +285,7 @@ define <32 x i1> @vselect_v32i1(<32 x i1> %a, <32 x i1> %b, <32 x i1> %cc) {
 define <64 x i1> @vselect_v64i1(<64 x i1> %a, <64 x i1> %b, <64 x i1> %cc) {
 ; CHECK-LABEL: vselect_v64i1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 64
+; CHECK-NEXT:    li a0, 64
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vmandn.mm v8, v8, v9
 ; CHECK-NEXT:    vmand.mm v9, v0, v9

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll
index 3c191b122e71..2c288ecc9480 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll
@@ -9,7 +9,7 @@ declare <8 x i7> @llvm.vp.shl.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
 define <8 x i7> @vsll_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vsll_vv_v8i7:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 127
+; CHECK-NEXT:    li a1, 127
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vand.vx v9, v9, a1
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll
index 71f0580f87f6..84f7a2e34903 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll
@@ -9,7 +9,7 @@ declare <8 x i7> @llvm.vp.ashr.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
 define <8 x i7> @vsra_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vsra_vv_v8i7:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 127
+; CHECK-NEXT:    li a1, 127
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vand.vx v9, v9, a1
 ; CHECK-NEXT:    vadd.vv v8, v8, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll
index fd5e71e4654a..265acfbf82c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll
@@ -9,7 +9,7 @@ declare <8 x i7> @llvm.vp.lshr.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
 define <8 x i7> @vsrl_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vsrl_vv_v8i7:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 127
+; CHECK-NEXT:    li a1, 127
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vand.vx v9, v9, a1
 ; CHECK-NEXT:    vand.vx v8, v8, a1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
index b38a944f5302..f439cefb128b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
@@ -31,7 +31,7 @@ define <2 x i8> @ssub_v2i8_vx(<2 x i8> %va, i8 %b) {
 define <2 x i8> @ssub_v2i8_vi(<2 x i8> %va) {
 ; CHECK-LABEL: ssub_v2i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -68,7 +68,7 @@ define <4 x i8> @ssub_v4i8_vx(<4 x i8> %va, i8 %b) {
 define <4 x i8> @ssub_v4i8_vi(<4 x i8> %va) {
 ; CHECK-LABEL: ssub_v4i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -105,7 +105,7 @@ define <8 x i8> @ssub_v8i8_vx(<8 x i8> %va, i8 %b) {
 define <8 x i8> @ssub_v8i8_vi(<8 x i8> %va) {
 ; CHECK-LABEL: ssub_v8i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -142,7 +142,7 @@ define <16 x i8> @ssub_v16i8_vx(<16 x i8> %va, i8 %b) {
 define <16 x i8> @ssub_v16i8_vi(<16 x i8> %va) {
 ; CHECK-LABEL: ssub_v16i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -179,7 +179,7 @@ define <2 x i16> @ssub_v2i16_vx(<2 x i16> %va, i16 %b) {
 define <2 x i16> @ssub_v2i16_vi(<2 x i16> %va) {
 ; CHECK-LABEL: ssub_v2i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -216,7 +216,7 @@ define <4 x i16> @ssub_v4i16_vx(<4 x i16> %va, i16 %b) {
 define <4 x i16> @ssub_v4i16_vi(<4 x i16> %va) {
 ; CHECK-LABEL: ssub_v4i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -253,7 +253,7 @@ define <8 x i16> @ssub_v8i16_vx(<8 x i16> %va, i16 %b) {
 define <8 x i16> @ssub_v8i16_vi(<8 x i16> %va) {
 ; CHECK-LABEL: ssub_v8i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -290,7 +290,7 @@ define <16 x i16> @ssub_v16i16_vx(<16 x i16> %va, i16 %b) {
 define <16 x i16> @ssub_v16i16_vi(<16 x i16> %va) {
 ; CHECK-LABEL: ssub_v16i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -327,7 +327,7 @@ define <2 x i32> @ssub_v2i32_vx(<2 x i32> %va, i32 %b) {
 define <2 x i32> @ssub_v2i32_vi(<2 x i32> %va) {
 ; CHECK-LABEL: ssub_v2i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -364,7 +364,7 @@ define <4 x i32> @ssub_v4i32_vx(<4 x i32> %va, i32 %b) {
 define <4 x i32> @ssub_v4i32_vi(<4 x i32> %va) {
 ; CHECK-LABEL: ssub_v4i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -401,7 +401,7 @@ define <8 x i32> @ssub_v8i32_vx(<8 x i32> %va, i32 %b) {
 define <8 x i32> @ssub_v8i32_vi(<8 x i32> %va) {
 ; CHECK-LABEL: ssub_v8i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -438,7 +438,7 @@ define <16 x i32> @ssub_v16i32_vx(<16 x i32> %va, i32 %b) {
 define <16 x i32> @ssub_v16i32_vi(<16 x i32> %va) {
 ; CHECK-LABEL: ssub_v16i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -488,7 +488,7 @@ define <2 x i64> @ssub_v2i64_vx(<2 x i64> %va, i64 %b) {
 define <2 x i64> @ssub_v2i64_vi(<2 x i64> %va) {
 ; CHECK-LABEL: ssub_v2i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -538,7 +538,7 @@ define <4 x i64> @ssub_v4i64_vx(<4 x i64> %va, i64 %b) {
 define <4 x i64> @ssub_v4i64_vi(<4 x i64> %va) {
 ; CHECK-LABEL: ssub_v4i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -588,7 +588,7 @@ define <8 x i64> @ssub_v8i64_vx(<8 x i64> %va, i64 %b) {
 define <8 x i64> @ssub_v8i64_vi(<8 x i64> %va) {
 ; CHECK-LABEL: ssub_v8i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -638,7 +638,7 @@ define <16 x i64> @ssub_v16i64_vx(<16 x i64> %va, i64 %b) {
 define <16 x i64> @ssub_v16i64_vi(<16 x i64> %va) {
 ; CHECK-LABEL: ssub_v16i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
index 9e345c93d3a6..6eaadbd4c79e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
@@ -31,7 +31,7 @@ define <2 x i8> @usub_v2i8_vx(<2 x i8> %va, i8 %b) {
 define <2 x i8> @usub_v2i8_vi(<2 x i8> %va) {
 ; CHECK-LABEL: usub_v2i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -68,7 +68,7 @@ define <4 x i8> @usub_v4i8_vx(<4 x i8> %va, i8 %b) {
 define <4 x i8> @usub_v4i8_vi(<4 x i8> %va) {
 ; CHECK-LABEL: usub_v4i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -105,7 +105,7 @@ define <8 x i8> @usub_v8i8_vx(<8 x i8> %va, i8 %b) {
 define <8 x i8> @usub_v8i8_vi(<8 x i8> %va) {
 ; CHECK-LABEL: usub_v8i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -142,7 +142,7 @@ define <16 x i8> @usub_v16i8_vx(<16 x i8> %va, i8 %b) {
 define <16 x i8> @usub_v16i8_vi(<16 x i8> %va) {
 ; CHECK-LABEL: usub_v16i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -179,7 +179,7 @@ define <2 x i16> @usub_v2i16_vx(<2 x i16> %va, i16 %b) {
 define <2 x i16> @usub_v2i16_vi(<2 x i16> %va) {
 ; CHECK-LABEL: usub_v2i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -216,7 +216,7 @@ define <4 x i16> @usub_v4i16_vx(<4 x i16> %va, i16 %b) {
 define <4 x i16> @usub_v4i16_vi(<4 x i16> %va) {
 ; CHECK-LABEL: usub_v4i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -253,7 +253,7 @@ define <8 x i16> @usub_v8i16_vx(<8 x i16> %va, i16 %b) {
 define <8 x i16> @usub_v8i16_vi(<8 x i16> %va) {
 ; CHECK-LABEL: usub_v8i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -290,7 +290,7 @@ define <16 x i16> @usub_v16i16_vx(<16 x i16> %va, i16 %b) {
 define <16 x i16> @usub_v16i16_vi(<16 x i16> %va) {
 ; CHECK-LABEL: usub_v16i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -327,7 +327,7 @@ define <2 x i32> @usub_v2i32_vx(<2 x i32> %va, i32 %b) {
 define <2 x i32> @usub_v2i32_vi(<2 x i32> %va) {
 ; CHECK-LABEL: usub_v2i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -364,7 +364,7 @@ define <4 x i32> @usub_v4i32_vx(<4 x i32> %va, i32 %b) {
 define <4 x i32> @usub_v4i32_vi(<4 x i32> %va) {
 ; CHECK-LABEL: usub_v4i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -401,7 +401,7 @@ define <8 x i32> @usub_v8i32_vx(<8 x i32> %va, i32 %b) {
 define <8 x i32> @usub_v8i32_vi(<8 x i32> %va) {
 ; CHECK-LABEL: usub_v8i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -438,7 +438,7 @@ define <16 x i32> @usub_v16i32_vx(<16 x i32> %va, i32 %b) {
 define <16 x i32> @usub_v16i32_vi(<16 x i32> %va) {
 ; CHECK-LABEL: usub_v16i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -488,7 +488,7 @@ define <2 x i64> @usub_v2i64_vx(<2 x i64> %va, i64 %b) {
 define <2 x i64> @usub_v2i64_vi(<2 x i64> %va) {
 ; CHECK-LABEL: usub_v2i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -538,7 +538,7 @@ define <4 x i64> @usub_v4i64_vx(<4 x i64> %va, i64 %b) {
 define <4 x i64> @usub_v4i64_vi(<4 x i64> %va) {
 ; CHECK-LABEL: usub_v4i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -588,7 +588,7 @@ define <8 x i64> @usub_v8i64_vx(<8 x i64> %va, i64 %b) {
 define <8 x i64> @usub_v8i64_vi(<8 x i64> %va) {
 ; CHECK-LABEL: usub_v8i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -638,7 +638,7 @@ define <16 x i64> @usub_v16i64_vx(<16 x i64> %va, i64 %b) {
 define <16 x i64> @usub_v16i64_vi(<16 x i64> %va) {
 ; CHECK-LABEL: usub_v16i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll
index a7ff2aa52af6..3bcd29572a49 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll
@@ -158,7 +158,7 @@ define <4 x i64> @vwmacc_v4i64(<4 x i32>* %x, <4 x i32>* %y, <4 x i64> %z) {
 define <32 x i16> @vwmacc_v32i16(<32 x i8>* %x, <32 x i8>* %y, <32 x i16> %z) {
 ; CHECK-LABEL: vwmacc_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v12, (a0)
 ; CHECK-NEXT:    vle8.v v14, (a1)
@@ -210,7 +210,7 @@ define <8 x i64> @vwmacc_v8i64(<8 x i32>* %x, <8 x i32>* %y, <8 x i64> %z) {
 define <64 x i16> @vwmacc_v64i16(<64 x i8>* %x, <64 x i8>* %y, <64 x i16> %z) {
 ; CHECK-LABEL: vwmacc_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v16, (a0)
 ; CHECK-NEXT:    vle8.v v20, (a1)
@@ -228,7 +228,7 @@ define <64 x i16> @vwmacc_v64i16(<64 x i8>* %x, <64 x i8>* %y, <64 x i16> %z) {
 define <32 x i32> @vwmacc_v32i32(<32 x i16>* %x, <32 x i16>* %y, <32 x i32> %z) {
 ; CHECK-LABEL: vwmacc_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v16, (a0)
 ; CHECK-NEXT:    vle16.v v20, (a1)
@@ -416,7 +416,7 @@ define <4 x i64> @vwmacc_vx_v4i64(<4 x i32>* %x, i32 %y, <4 x i64> %z) {
 define <32 x i16> @vwmacc_vx_v32i16(<32 x i8>* %x, i8 %y, <32 x i16> %z) {
 ; CHECK-LABEL: vwmacc_vx_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v12, (a0)
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v12
@@ -468,7 +468,7 @@ define <8 x i64> @vwmacc_vx_v8i64(<8 x i32>* %x, i32 %y, <8 x i64> %z) {
 define <64 x i16> @vwmacc_vx_v64i16(<64 x i8>* %x, i8 %y, <64 x i16> %z) {
 ; CHECK-LABEL: vwmacc_vx_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v16, (a0)
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v16
@@ -486,7 +486,7 @@ define <64 x i16> @vwmacc_vx_v64i16(<64 x i8>* %x, i8 %y, <64 x i16> %z) {
 define <32 x i32> @vwmacc_vx_v32i32(<32 x i16>* %x, i16 %y, <32 x i32> %z) {
 ; CHECK-LABEL: vwmacc_vx_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v16, (a0)
 ; CHECK-NEXT:    vwmacc.vx v8, a1, v16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll
index 040bee273e9f..fe97f7894cfd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll
@@ -158,7 +158,7 @@ define <4 x i64> @vwmaccu_v4i64(<4 x i32>* %x, <4 x i32>* %y, <4 x i64> %z) {
 define <32 x i16> @vwmaccu_v32i16(<32 x i8>* %x, <32 x i8>* %y, <32 x i16> %z) {
 ; CHECK-LABEL: vwmaccu_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v12, (a0)
 ; CHECK-NEXT:    vle8.v v14, (a1)
@@ -210,7 +210,7 @@ define <8 x i64> @vwmaccu_v8i64(<8 x i32>* %x, <8 x i32>* %y, <8 x i64> %z) {
 define <64 x i16> @vwmaccu_v64i16(<64 x i8>* %x, <64 x i8>* %y, <64 x i16> %z) {
 ; CHECK-LABEL: vwmaccu_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v16, (a0)
 ; CHECK-NEXT:    vle8.v v20, (a1)
@@ -228,7 +228,7 @@ define <64 x i16> @vwmaccu_v64i16(<64 x i8>* %x, <64 x i8>* %y, <64 x i16> %z) {
 define <32 x i32> @vwmaccu_v32i32(<32 x i16>* %x, <32 x i16>* %y, <32 x i32> %z) {
 ; CHECK-LABEL: vwmaccu_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v16, (a0)
 ; CHECK-NEXT:    vle16.v v20, (a1)
@@ -416,7 +416,7 @@ define <4 x i64> @vwmaccu_vx_v4i64(<4 x i32>* %x, i32 %y, <4 x i64> %z) {
 define <32 x i16> @vwmaccu_vx_v32i16(<32 x i8>* %x, i8 %y, <32 x i16> %z) {
 ; CHECK-LABEL: vwmaccu_vx_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v12, (a0)
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v12
@@ -468,7 +468,7 @@ define <8 x i64> @vwmaccu_vx_v8i64(<8 x i32>* %x, i32 %y, <8 x i64> %z) {
 define <64 x i16> @vwmaccu_vx_v64i16(<64 x i8>* %x, i8 %y, <64 x i16> %z) {
 ; CHECK-LABEL: vwmaccu_vx_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v16, (a0)
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v16
@@ -486,7 +486,7 @@ define <64 x i16> @vwmaccu_vx_v64i16(<64 x i8>* %x, i8 %y, <64 x i16> %z) {
 define <32 x i32> @vwmaccu_vx_v32i32(<32 x i16>* %x, i16 %y, <32 x i32> %z) {
 ; CHECK-LABEL: vwmaccu_vx_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v16, (a0)
 ; CHECK-NEXT:    vwmaccu.vx v8, a1, v16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
index 8439b357b30f..d2553b8accb2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
@@ -149,7 +149,7 @@ define <4 x i64> @vwmul_v4i64(<4 x i32>* %x, <4 x i32>* %y) {
 define <32 x i16> @vwmul_v32i16(<32 x i8>* %x, <32 x i8>* %y) {
 ; CHECK-LABEL: vwmul_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v12, (a0)
 ; CHECK-NEXT:    vle8.v v14, (a1)
@@ -198,7 +198,7 @@ define <8 x  i64> @vwmul_v8i64(<8 x  i32>* %x, <8 x  i32>* %y) {
 define <64 x i16> @vwmul_v64i16(<64 x i8>* %x, <64 x i8>* %y) {
 ; CHECK-LABEL: vwmul_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v16, (a0)
 ; CHECK-NEXT:    vle8.v v20, (a1)
@@ -215,7 +215,7 @@ define <64 x i16> @vwmul_v64i16(<64 x i8>* %x, <64 x i8>* %y) {
 define <32 x i32> @vwmul_v32i32(<32 x i16>* %x, <32 x i16>* %y) {
 ; CHECK-LABEL: vwmul_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v16, (a0)
 ; CHECK-NEXT:    vle16.v v20, (a1)
@@ -253,11 +253,11 @@ define <128 x i16> @vwmul_v128i16(<128 x i8>* %x, <128 x i8>* %y) {
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    slli a2, a2, 3
 ; CHECK-NEXT:    sub sp, sp, a2
-; CHECK-NEXT:    addi a2, zero, 128
+; CHECK-NEXT:    li a2, 128
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v16, (a0)
 ; CHECK-NEXT:    vle8.v v24, (a1)
-; CHECK-NEXT:    addi a0, zero, 64
+; CHECK-NEXT:    li a0, 64
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v16, a0
 ; CHECK-NEXT:    addi a1, sp, 16
@@ -289,11 +289,11 @@ define <64 x i32> @vwmul_v64i32(<64 x i16>* %x, <64 x i16>* %y) {
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    slli a2, a2, 3
 ; CHECK-NEXT:    sub sp, sp, a2
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v16, (a0)
 ; CHECK-NEXT:    vle16.v v24, (a1)
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v16, a0
 ; CHECK-NEXT:    addi a1, sp, 16
@@ -325,7 +325,7 @@ define <32 x i64> @vwmul_v32i64(<32 x i32>* %x, <32 x i32>* %y) {
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    slli a2, a2, 3
 ; CHECK-NEXT:    sub sp, sp, a2
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v16, (a0)
 ; CHECK-NEXT:    vle32.v v24, (a1)
@@ -553,7 +553,7 @@ define <4 x i64> @vwmul_vx_v4i64(<4 x i32>* %x, i32 %y) {
 define <32 x i16> @vwmul_vx_v32i16(<32 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: vwmul_vx_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v12, (a0)
 ; CHECK-NEXT:    vwmul.vx v8, v12, a1
@@ -602,7 +602,7 @@ define <8 x i64> @vwmul_vx_v8i64(<8 x i32>* %x, i32 %y) {
 define <64 x i16> @vwmul_vx_v64i16(<64 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: vwmul_vx_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v16, (a0)
 ; CHECK-NEXT:    vwmul.vx v8, v16, a1
@@ -619,7 +619,7 @@ define <64 x i16> @vwmul_vx_v64i16(<64 x i8>* %x, i8 %y) {
 define <32 x i32> @vwmul_vx_v32i32(<32 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: vwmul_vx_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v16, (a0)
 ; CHECK-NEXT:    vwmul.vx v8, v16, a1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
index 7d3ca3bd1c21..425192228490 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
@@ -149,7 +149,7 @@ define <4 x i64> @vwmulu_v4i64(<4 x i32>* %x, <4 x i32>* %y) {
 define <32 x i16> @vwmulu_v32i16(<32 x i8>* %x, <32 x i8>* %y) {
 ; CHECK-LABEL: vwmulu_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v12, (a0)
 ; CHECK-NEXT:    vle8.v v14, (a1)
@@ -198,7 +198,7 @@ define <8 x  i64> @vwmulu_v8i64(<8 x  i32>* %x, <8 x  i32>* %y) {
 define <64 x i16> @vwmulu_v64i16(<64 x i8>* %x, <64 x i8>* %y) {
 ; CHECK-LABEL: vwmulu_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v16, (a0)
 ; CHECK-NEXT:    vle8.v v20, (a1)
@@ -215,7 +215,7 @@ define <64 x i16> @vwmulu_v64i16(<64 x i8>* %x, <64 x i8>* %y) {
 define <32 x i32> @vwmulu_v32i32(<32 x i16>* %x, <32 x i16>* %y) {
 ; CHECK-LABEL: vwmulu_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v16, (a0)
 ; CHECK-NEXT:    vle16.v v20, (a1)
@@ -253,11 +253,11 @@ define <128 x i16> @vwmulu_v128i16(<128 x i8>* %x, <128 x i8>* %y) {
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    slli a2, a2, 3
 ; CHECK-NEXT:    sub sp, sp, a2
-; CHECK-NEXT:    addi a2, zero, 128
+; CHECK-NEXT:    li a2, 128
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
 ; CHECK-NEXT:    vle8.v v16, (a0)
 ; CHECK-NEXT:    vle8.v v24, (a1)
-; CHECK-NEXT:    addi a0, zero, 64
+; CHECK-NEXT:    li a0, 64
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v16, a0
 ; CHECK-NEXT:    addi a1, sp, 16
@@ -289,11 +289,11 @@ define <64 x i32> @vwmulu_v64i32(<64 x i16>* %x, <64 x i16>* %y) {
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    slli a2, a2, 3
 ; CHECK-NEXT:    sub sp, sp, a2
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
 ; CHECK-NEXT:    vle16.v v16, (a0)
 ; CHECK-NEXT:    vle16.v v24, (a1)
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
 ; CHECK-NEXT:    vslidedown.vx v8, v16, a0
 ; CHECK-NEXT:    addi a1, sp, 16
@@ -325,7 +325,7 @@ define <32 x i64> @vwmulu_v32i64(<32 x i32>* %x, <32 x i32>* %y) {
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    slli a2, a2, 3
 ; CHECK-NEXT:    sub sp, sp, a2
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
 ; CHECK-NEXT:    vle32.v v16, (a0)
 ; CHECK-NEXT:    vle32.v v24, (a1)
@@ -553,7 +553,7 @@ define <4 x i64> @vwmulu_vx_v4i64(<4 x i32>* %x, i32 %y) {
 define <32 x i16> @vwmulu_vx_v32i16(<32 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: vwmulu_vx_v32i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
 ; CHECK-NEXT:    vle8.v v12, (a0)
 ; CHECK-NEXT:    vwmulu.vx v8, v12, a1
@@ -602,7 +602,7 @@ define <8 x i64> @vwmulu_vx_v8i64(<8 x i32>* %x, i32 %y) {
 define <64 x i16> @vwmulu_vx_v64i16(<64 x i8>* %x, i8 %y) {
 ; CHECK-LABEL: vwmulu_vx_v64i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 64
+; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
 ; CHECK-NEXT:    vle8.v v16, (a0)
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a1
@@ -619,7 +619,7 @@ define <64 x i16> @vwmulu_vx_v64i16(<64 x i8>* %x, i8 %y) {
 define <32 x i32> @vwmulu_vx_v32i32(<32 x i16>* %x, i16 %y) {
 ; CHECK-LABEL: vwmulu_vx_v32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vle16.v v16, (a0)
 ; CHECK-NEXT:    vwmulu.vx v8, v16, a1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
index e2db8c1ee3ec..14c41e6e1f7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
@@ -808,7 +808,7 @@ define <vscale x 8 x i64> @insertelt_nxv8i64_idx(<vscale x 8 x i64> %v, i64 %elt
 define <vscale x 2 x i64> @insertelt_nxv2i64_0_c10(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: insertelt_nxv2i64_0_c10:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 10
+; CHECK-NEXT:    li a0, 10
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, tu, mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
@@ -819,7 +819,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_0_c10(<vscale x 2 x i64> %v) {
 define <vscale x 2 x i64> @insertelt_nxv2i64_imm_c10(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: insertelt_nxv2i64_imm_c10:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 10
+; CHECK-NEXT:    li a0, 10
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
@@ -832,7 +832,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_imm_c10(<vscale x 2 x i64> %v) {
 define <vscale x 2 x i64> @insertelt_nxv2i64_idx_c10(<vscale x 2 x i64> %v, i32 %idx) {
 ; CHECK-LABEL: insertelt_nxv2i64_idx_c10:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, 10
+; CHECK-NEXT:    li a1, 10
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a1
 ; CHECK-NEXT:    addi a1, a0, 1
@@ -846,7 +846,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_idx_c10(<vscale x 2 x i64> %v, i32
 define <vscale x 2 x i64> @insertelt_nxv2i64_0_cn1(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: insertelt_nxv2i64_0_cn1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -1
+; CHECK-NEXT:    li a0, -1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, tu, mu
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
@@ -857,7 +857,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_0_cn1(<vscale x 2 x i64> %v) {
 define <vscale x 2 x i64> @insertelt_nxv2i64_imm_cn1(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: insertelt_nxv2i64_imm_cn1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -1
+; CHECK-NEXT:    li a0, -1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, mu
@@ -870,7 +870,7 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_imm_cn1(<vscale x 2 x i64> %v) {
 define <vscale x 2 x i64> @insertelt_nxv2i64_idx_cn1(<vscale x 2 x i64> %v, i32 %idx) {
 ; CHECK-LABEL: insertelt_nxv2i64_idx_cn1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a1, zero, -1
+; CHECK-NEXT:    li a1, -1
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmv.s.x v10, a1
 ; CHECK-NEXT:    addi a1, a0, 1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll
index c696f5106fa4..4bd06da73d6f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll
@@ -5,11 +5,11 @@
 define void @interleave256(<256 x i16>* %agg.result, <128 x i16>* %0, <128 x i16>* %1) {
 ; RV64-1024-LABEL: interleave256:
 ; RV64-1024:       # %bb.0: # %entry
-; RV64-1024-NEXT:    addi a3, zero, 128
+; RV64-1024-NEXT:    li a3, 128
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m2, ta, mu
 ; RV64-1024-NEXT:    vle16.v v12, (a1)
 ; RV64-1024-NEXT:    vle16.v v16, (a2)
-; RV64-1024-NEXT:    addi a1, zero, 256
+; RV64-1024-NEXT:    li a1, 256
 ; RV64-1024-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; RV64-1024-NEXT:    vmv.v.i v8, 0
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m4, tu, mu
@@ -53,11 +53,11 @@ define void @interleave256(<256 x i16>* %agg.result, <128 x i16>* %0, <128 x i16
 ;
 ; RV64-2048-LABEL: interleave256:
 ; RV64-2048:       # %bb.0: # %entry
-; RV64-2048-NEXT:    addi a3, zero, 128
+; RV64-2048-NEXT:    li a3, 128
 ; RV64-2048-NEXT:    vsetvli zero, a3, e16, m1, ta, mu
 ; RV64-2048-NEXT:    vle16.v v10, (a1)
 ; RV64-2048-NEXT:    vle16.v v12, (a2)
-; RV64-2048-NEXT:    addi a1, zero, 256
+; RV64-2048-NEXT:    li a1, 256
 ; RV64-2048-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; RV64-2048-NEXT:    vmv.v.i v8, 0
 ; RV64-2048-NEXT:    vsetvli zero, a3, e16, m2, tu, mu
@@ -114,10 +114,10 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-1024-NEXT:    addi sp, sp, -16
 ; RV64-1024-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-1024-NEXT:    csrr a3, vlenb
-; RV64-1024-NEXT:    addi a4, zero, 40
+; RV64-1024-NEXT:    li a4, 40
 ; RV64-1024-NEXT:    mul a3, a3, a4
 ; RV64-1024-NEXT:    sub sp, sp, a3
-; RV64-1024-NEXT:    addi a3, zero, 256
+; RV64-1024-NEXT:    li a3, 256
 ; RV64-1024-NEXT:    vsetvli zero, a3, e16, m4, ta, mu
 ; RV64-1024-NEXT:    vle16.v v16, (a1)
 ; RV64-1024-NEXT:    vle16.v v8, (a2)
@@ -126,7 +126,7 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-1024-NEXT:    add a1, sp, a1
 ; RV64-1024-NEXT:    addi a1, a1, 16
 ; RV64-1024-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-1024-NEXT:    addi a1, zero, 512
+; RV64-1024-NEXT:    li a1, 512
 ; RV64-1024-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; RV64-1024-NEXT:    vmv.v.i v8, 0
 ; RV64-1024-NEXT:    csrr a2, vlenb
@@ -146,7 +146,7 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-1024-NEXT:    vid.v v24
 ; RV64-1024-NEXT:    vsrl.vi v16, v24, 1
 ; RV64-1024-NEXT:    csrr a2, vlenb
-; RV64-1024-NEXT:    addi a4, zero, 24
+; RV64-1024-NEXT:    li a4, 24
 ; RV64-1024-NEXT:    mul a2, a2, a4
 ; RV64-1024-NEXT:    add a2, sp, a2
 ; RV64-1024-NEXT:    addi a2, a2, 16
@@ -212,7 +212,7 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-1024-NEXT:    vslideup.vi v0, v16, 7
 ; RV64-1024-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; RV64-1024-NEXT:    csrr a1, vlenb
-; RV64-1024-NEXT:    addi a2, zero, 24
+; RV64-1024-NEXT:    li a2, 24
 ; RV64-1024-NEXT:    mul a1, a1, a2
 ; RV64-1024-NEXT:    add a1, sp, a1
 ; RV64-1024-NEXT:    addi a1, a1, 16
@@ -225,7 +225,7 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ; RV64-1024-NEXT:    vrgather.vv v8, v24, v16, v0.t
 ; RV64-1024-NEXT:    vse16.v v8, (a0)
 ; RV64-1024-NEXT:    csrr a0, vlenb
-; RV64-1024-NEXT:    addi a1, zero, 40
+; RV64-1024-NEXT:    li a1, 40
 ; RV64-1024-NEXT:    mul a0, a0, a1
 ; RV64-1024-NEXT:    add sp, sp, a0
 ; RV64-1024-NEXT:    addi sp, sp, 16
@@ -233,11 +233,11 @@ define void @interleave512(<512 x i16>* %agg.result, <256 x i16>* %0, <256 x i16
 ;
 ; RV64-2048-LABEL: interleave512:
 ; RV64-2048:       # %bb.0: # %entry
-; RV64-2048-NEXT:    addi a3, zero, 256
+; RV64-2048-NEXT:    li a3, 256
 ; RV64-2048-NEXT:    vsetvli zero, a3, e16, m2, ta, mu
 ; RV64-2048-NEXT:    vle16.v v12, (a1)
 ; RV64-2048-NEXT:    vle16.v v16, (a2)
-; RV64-2048-NEXT:    addi a1, zero, 512
+; RV64-2048-NEXT:    li a1, 512
 ; RV64-2048-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; RV64-2048-NEXT:    vmv.v.i v8, 0
 ; RV64-2048-NEXT:    vsetvli zero, a3, e16, m4, tu, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
index e93ea0b2d851..f325c781b097 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
@@ -12,7 +12,7 @@ define <vscale x 64 x i8> @callee(<vscale x 64 x i8> %arg0, <vscale x 64 x i8> %
 ; RV64IV-LABEL: callee:
 ; RV64IV:       # %bb.0:
 ; RV64IV-NEXT:    vl8r.v v24, (a0)
-; RV64IV-NEXT:    addi a0, zero, 1024
+; RV64IV-NEXT:    li a0, 1024
 ; RV64IV-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
 ; RV64IV-NEXT:    vmacc.vv v8, v16, v24
 ; RV64IV-NEXT:    ret
@@ -39,7 +39,7 @@ define <vscale x 64 x i8> @caller() {
 ; RV64IV-NEXT:    sub sp, sp, a0
 ; RV64IV-NEXT:    andi sp, sp, -64
 ; RV64IV-NEXT:    csrr a0, vlenb
-; RV64IV-NEXT:    addi a1, zero, 24
+; RV64IV-NEXT:    li a1, 24
 ; RV64IV-NEXT:    mul a0, a0, a1
 ; RV64IV-NEXT:    add a0, sp, a0
 ; RV64IV-NEXT:    addi a0, a0, 48

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
index c8a96a8630c1..07ac76efd869 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
@@ -254,7 +254,7 @@ define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(i32* %base, i32 %vl) nounwind
 ; SPILL-O2:       # %bb.0: # %entry
 ; SPILL-O2-NEXT:    addi sp, sp, -16
 ; SPILL-O2-NEXT:    csrr a2, vlenb
-; SPILL-O2-NEXT:    addi a3, zero, 6
+; SPILL-O2-NEXT:    li a3, 6
 ; SPILL-O2-NEXT:    mul a2, a2, a3
 ; SPILL-O2-NEXT:    sub sp, sp, a2
 ; SPILL-O2-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -279,7 +279,7 @@ define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(i32* %base, i32 %vl) nounwind
 ; SPILL-O2-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; SPILL-O2-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; SPILL-O2-NEXT:    csrr a0, vlenb
-; SPILL-O2-NEXT:    addi a1, zero, 6
+; SPILL-O2-NEXT:    li a1, 6
 ; SPILL-O2-NEXT:    mul a0, a0, a1
 ; SPILL-O2-NEXT:    add sp, sp, a0
 ; SPILL-O2-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
index e9774a927258..8388c4160bef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
@@ -254,7 +254,7 @@ define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(i32* %base, i64 %vl) nounwind
 ; SPILL-O2:       # %bb.0: # %entry
 ; SPILL-O2-NEXT:    addi sp, sp, -16
 ; SPILL-O2-NEXT:    csrr a2, vlenb
-; SPILL-O2-NEXT:    addi a3, zero, 6
+; SPILL-O2-NEXT:    li a3, 6
 ; SPILL-O2-NEXT:    mul a2, a2, a3
 ; SPILL-O2-NEXT:    sub sp, sp, a2
 ; SPILL-O2-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -279,7 +279,7 @@ define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(i32* %base, i64 %vl) nounwind
 ; SPILL-O2-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
 ; SPILL-O2-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; SPILL-O2-NEXT:    csrr a0, vlenb
-; SPILL-O2-NEXT:    addi a1, zero, 6
+; SPILL-O2-NEXT:    li a1, 6
 ; SPILL-O2-NEXT:    mul a0, a0, a1
 ; SPILL-O2-NEXT:    add sp, sp, a0
 ; SPILL-O2-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll
index e80eecdb56a0..e28902dc6e4c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll
@@ -86,7 +86,7 @@ define dso_local signext i32 @main() #0 {
 ; CHECK-NEXT:    sub a0, s0, a0
 ; CHECK-NEXT:    addi a0, a0, -112
 ; CHECK-NEXT:    vs8r.v v8, (a0)
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    sw a0, -68(s0)
 ; CHECK-NEXT:    sw a0, -72(s0)
 ; CHECK-NEXT:    sw a0, -76(s0)
@@ -137,7 +137,7 @@ define dso_local signext i32 @main() #0 {
 ; CHECK-NEXT:    sd t0, 0(sp)
 ; CHECK-NEXT:    call lots_args
 ; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    mv a0, zero
+; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    slli a1, a1, 3
 ; CHECK-NEXT:    add sp, sp, a1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll
index 7de0a3b3a6f7..32f05623149e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll
@@ -5,7 +5,7 @@
 define i32 @vscale_zero() nounwind {
 ; CHECK-LABEL: vscale_zero:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    mv a0, zero
+; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call i32 @llvm.vscale.i32()

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll
index 8ff72b3ead29..d233e14ba767 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll
@@ -8,13 +8,13 @@
 define i64 @vscale_zero() nounwind {
 ; RV64-LABEL: vscale_zero:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ;
 ; RV32-LABEL: vscale_zero:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    mv a0, zero
-; RV32-NEXT:    mv a1, zero
+; RV32-NEXT:    li a0, 0
+; RV32-NEXT:    li a1, 0
 ; RV32-NEXT:    ret
 entry:
   %0 = call i64 @llvm.vscale.i64()
@@ -33,7 +33,7 @@ define i64 @vscale_one() nounwind {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    srli a0, a0, 3
-; RV32-NEXT:    mv a1, zero
+; RV32-NEXT:    li a1, 0
 ; RV32-NEXT:    ret
 entry:
   %0 = call i64 @llvm.vscale.i64()
@@ -52,7 +52,7 @@ define i64 @vscale_uimmpow2xlen() nounwind {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 3
-; RV32-NEXT:    mv a1, zero
+; RV32-NEXT:    li a1, 0
 ; RV32-NEXT:    ret
 entry:
   %0 = call i64 @llvm.vscale.i64()
@@ -73,7 +73,7 @@ define i64 @vscale_non_pow2() nounwind {
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a1, a0, 1
 ; RV32-NEXT:    add a0, a1, a0
-; RV32-NEXT:    mv a1, zero
+; RV32-NEXT:    li a1, 0
 ; RV32-NEXT:    ret
 entry:
   %0 = call i64 @llvm.vscale.i64()
@@ -97,7 +97,7 @@ define i64 @vscale_select(i32 %x, i32 %y) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    srli a0, a0, 3
-; RV32-NEXT:    mv a1, zero
+; RV32-NEXT:    li a1, 0
 ; RV32-NEXT:    ret
   %a = call i64 @llvm.vscale.i64()
   %b = and i64 %a, 4294967295

diff  --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll
index 2d6caeee8784..e3ff3ed0e043 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll
@@ -325,7 +325,7 @@ define <vscale x 8 x i1> @icmp_ult_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: icmp_ult_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -638,7 +638,7 @@ define <vscale x 8 x i1> @icmp_slt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: icmp_slt_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -1081,7 +1081,7 @@ define <vscale x 8 x i1> @icmp_ult_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: icmp_ult_vi_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -1380,7 +1380,7 @@ define <vscale x 8 x i1> @icmp_slt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: icmp_slt_vi_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -1809,7 +1809,7 @@ define <vscale x 8 x i1> @icmp_ult_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: icmp_ult_vi_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -2108,7 +2108,7 @@ define <vscale x 8 x i1> @icmp_slt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: icmp_slt_vi_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -2601,7 +2601,7 @@ define <vscale x 8 x i1> @icmp_ult_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: icmp_ult_vi_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -2951,7 +2951,7 @@ define <vscale x 8 x i1> @icmp_slt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: icmp_slt_vi_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll
index b83640584d11..803ecdda84a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll
@@ -339,7 +339,7 @@ define <vscale x 8 x i1> @icmp_ult_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: icmp_ult_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -652,7 +652,7 @@ define <vscale x 8 x i1> @icmp_slt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: icmp_slt_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -1081,7 +1081,7 @@ define <vscale x 8 x i1> @icmp_ult_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: icmp_ult_vi_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -1380,7 +1380,7 @@ define <vscale x 8 x i1> @icmp_slt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: icmp_slt_vi_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -1809,7 +1809,7 @@ define <vscale x 8 x i1> @icmp_ult_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: icmp_ult_vi_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -2108,7 +2108,7 @@ define <vscale x 8 x i1> @icmp_slt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: icmp_slt_vi_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -2537,7 +2537,7 @@ define <vscale x 8 x i1> @icmp_ult_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: icmp_ult_vi_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmsltu.vx v0, v8, a0
 ; CHECK-NEXT:    ret
@@ -2836,7 +2836,7 @@ define <vscale x 8 x i1> @icmp_slt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: icmp_slt_vi_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -16
+; CHECK-NEXT:    li a0, -16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmslt.vx v0, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index f51417c1192c..550ea939f67f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -5,7 +5,7 @@
 define void @sink_splat_mul(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_mul:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:  .LBB0_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -41,7 +41,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_add(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_add:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:  .LBB1_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -77,7 +77,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_sub(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_sub:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:  .LBB2_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -113,7 +113,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_rsub(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_rsub:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:  .LBB3_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -149,7 +149,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_and(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_and:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:  .LBB4_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -185,7 +185,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_or(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_or:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:  .LBB5_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -221,7 +221,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_xor(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_xor:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:  .LBB6_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -259,13 +259,13 @@ define void @sink_splat_mul_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    srli a7, a2, 1
-; CHECK-NEXT:    addi a3, zero, 1024
+; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB7_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv t0, zero
+; CHECK-NEXT:    li t0, 0
 ; CHECK-NEXT:    j .LBB7_5
 ; CHECK-NEXT:  .LBB7_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a3, a7
 ; CHECK-NEXT:    sub t0, a3, a6
 ; CHECK-NEXT:    slli a4, a2, 1
@@ -352,13 +352,13 @@ define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    srli a7, a2, 1
-; CHECK-NEXT:    addi a3, zero, 1024
+; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB8_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv t0, zero
+; CHECK-NEXT:    li t0, 0
 ; CHECK-NEXT:    j .LBB8_5
 ; CHECK-NEXT:  .LBB8_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a3, a7
 ; CHECK-NEXT:    sub t0, a3, a6
 ; CHECK-NEXT:    slli a4, a2, 1
@@ -445,13 +445,13 @@ define void @sink_splat_sub_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    srli a7, a2, 1
-; CHECK-NEXT:    addi a3, zero, 1024
+; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB9_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv t0, zero
+; CHECK-NEXT:    li t0, 0
 ; CHECK-NEXT:    j .LBB9_5
 ; CHECK-NEXT:  .LBB9_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a3, a7
 ; CHECK-NEXT:    sub t0, a3, a6
 ; CHECK-NEXT:    slli a4, a2, 1
@@ -538,13 +538,13 @@ define void @sink_splat_rsub_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    srli a7, a2, 1
-; CHECK-NEXT:    addi a3, zero, 1024
+; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB10_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv t0, zero
+; CHECK-NEXT:    li t0, 0
 ; CHECK-NEXT:    j .LBB10_5
 ; CHECK-NEXT:  .LBB10_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a3, a7
 ; CHECK-NEXT:    sub t0, a3, a6
 ; CHECK-NEXT:    slli a4, a2, 1
@@ -631,13 +631,13 @@ define void @sink_splat_and_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    srli a7, a2, 1
-; CHECK-NEXT:    addi a3, zero, 1024
+; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB11_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv t0, zero
+; CHECK-NEXT:    li t0, 0
 ; CHECK-NEXT:    j .LBB11_5
 ; CHECK-NEXT:  .LBB11_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a3, a7
 ; CHECK-NEXT:    sub t0, a3, a6
 ; CHECK-NEXT:    slli a4, a2, 1
@@ -724,13 +724,13 @@ define void @sink_splat_or_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    srli a7, a2, 1
-; CHECK-NEXT:    addi a3, zero, 1024
+; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB12_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv t0, zero
+; CHECK-NEXT:    li t0, 0
 ; CHECK-NEXT:    j .LBB12_5
 ; CHECK-NEXT:  .LBB12_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a3, a7
 ; CHECK-NEXT:    sub t0, a3, a6
 ; CHECK-NEXT:    slli a4, a2, 1
@@ -817,13 +817,13 @@ define void @sink_splat_xor_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    srli a7, a2, 1
-; CHECK-NEXT:    addi a3, zero, 1024
+; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB13_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv t0, zero
+; CHECK-NEXT:    li t0, 0
 ; CHECK-NEXT:    j .LBB13_5
 ; CHECK-NEXT:  .LBB13_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a3, a7
 ; CHECK-NEXT:    sub t0, a3, a6
 ; CHECK-NEXT:    slli a4, a2, 1
@@ -908,7 +908,7 @@ for.body:                                         ; preds = %for.body.preheader,
 define void @sink_splat_shl(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_shl:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:  .LBB14_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -944,7 +944,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_lshr(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_lshr:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:  .LBB15_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -980,7 +980,7 @@ for.cond.cleanup:                                 ; preds = %vector.body
 define void @sink_splat_ashr(i32* nocapture %a, i32 signext %x) {
 ; CHECK-LABEL: sink_splat_ashr:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:  .LBB16_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -1018,13 +1018,13 @@ define void @sink_splat_shl_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    srli a7, a2, 1
-; CHECK-NEXT:    addi a3, zero, 1024
+; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB17_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv t0, zero
+; CHECK-NEXT:    li t0, 0
 ; CHECK-NEXT:    j .LBB17_5
 ; CHECK-NEXT:  .LBB17_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a3, a7
 ; CHECK-NEXT:    sub t0, a3, a6
 ; CHECK-NEXT:    slli a4, a2, 1
@@ -1111,13 +1111,13 @@ define void @sink_splat_lshr_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    srli a7, a2, 1
-; CHECK-NEXT:    addi a3, zero, 1024
+; CHECK-NEXT:    li a3, 1024
 ; CHECK-NEXT:    bgeu a3, a7, .LBB18_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv t0, zero
+; CHECK-NEXT:    li t0, 0
 ; CHECK-NEXT:    j .LBB18_5
 ; CHECK-NEXT:  .LBB18_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a3, a7
 ; CHECK-NEXT:    sub t0, a3, a6
 ; CHECK-NEXT:    slli a4, a2, 1
@@ -1204,13 +1204,13 @@ define void @sink_splat_ashr_scalable(i32* nocapture %a) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a3, vlenb
 ; CHECK-NEXT:    srli a7, a3, 1
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:    bgeu a2, a7, .LBB19_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a2, zero
+; CHECK-NEXT:    li a2, 0
 ; CHECK-NEXT:    j .LBB19_5
 ; CHECK-NEXT:  .LBB19_2: # %vector.ph
-; CHECK-NEXT:    mv a4, zero
+; CHECK-NEXT:    li a4, 0
 ; CHECK-NEXT:    remu a6, a2, a7
 ; CHECK-NEXT:    sub a2, a2, a6
 ; CHECK-NEXT:    slli a5, a3, 1
@@ -1296,7 +1296,7 @@ define void @sink_splat_fmul(float* nocapture %a, float %x) {
 ; CHECK-LABEL: sink_splat_fmul:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    li a1, 1024
 ; CHECK-NEXT:  .LBB20_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -1333,7 +1333,7 @@ define void @sink_splat_fdiv(float* nocapture %a, float %x) {
 ; CHECK-LABEL: sink_splat_fdiv:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    li a1, 1024
 ; CHECK-NEXT:  .LBB21_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -1370,7 +1370,7 @@ define void @sink_splat_frdiv(float* nocapture %a, float %x) {
 ; CHECK-LABEL: sink_splat_frdiv:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    li a1, 1024
 ; CHECK-NEXT:  .LBB22_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -1407,7 +1407,7 @@ define void @sink_splat_fadd(float* nocapture %a, float %x) {
 ; CHECK-LABEL: sink_splat_fadd:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    li a1, 1024
 ; CHECK-NEXT:  .LBB23_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -1444,7 +1444,7 @@ define void @sink_splat_fsub(float* nocapture %a, float %x) {
 ; CHECK-LABEL: sink_splat_fsub:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    li a1, 1024
 ; CHECK-NEXT:  .LBB24_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -1481,7 +1481,7 @@ define void @sink_splat_frsub(float* nocapture %a, float %x) {
 ; CHECK-LABEL: sink_splat_frsub:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    li a1, 1024
 ; CHECK-NEXT:  .LBB25_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -1519,14 +1519,14 @@ define void @sink_splat_fmul_scalable(float* nocapture %a, float %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a7, vlenb
 ; CHECK-NEXT:    srli a3, a7, 2
-; CHECK-NEXT:    addi a4, zero, 1024
+; CHECK-NEXT:    li a4, 1024
 ; CHECK-NEXT:    fmv.w.x ft0, a1
 ; CHECK-NEXT:    bgeu a4, a3, .LBB26_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a1, zero
+; CHECK-NEXT:    li a1, 0
 ; CHECK-NEXT:    j .LBB26_5
 ; CHECK-NEXT:  .LBB26_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a4, a3
 ; CHECK-NEXT:    sub a1, a4, a6
 ; CHECK-NEXT:    mv a4, a0
@@ -1612,14 +1612,14 @@ define void @sink_splat_fdiv_scalable(float* nocapture %a, float %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a7, vlenb
 ; CHECK-NEXT:    srli a3, a7, 2
-; CHECK-NEXT:    addi a4, zero, 1024
+; CHECK-NEXT:    li a4, 1024
 ; CHECK-NEXT:    fmv.w.x ft0, a1
 ; CHECK-NEXT:    bgeu a4, a3, .LBB27_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a1, zero
+; CHECK-NEXT:    li a1, 0
 ; CHECK-NEXT:    j .LBB27_5
 ; CHECK-NEXT:  .LBB27_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a4, a3
 ; CHECK-NEXT:    sub a1, a4, a6
 ; CHECK-NEXT:    mv a4, a0
@@ -1705,14 +1705,14 @@ define void @sink_splat_frdiv_scalable(float* nocapture %a, float %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a7, vlenb
 ; CHECK-NEXT:    srli a3, a7, 2
-; CHECK-NEXT:    addi a4, zero, 1024
+; CHECK-NEXT:    li a4, 1024
 ; CHECK-NEXT:    fmv.w.x ft0, a1
 ; CHECK-NEXT:    bgeu a4, a3, .LBB28_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a1, zero
+; CHECK-NEXT:    li a1, 0
 ; CHECK-NEXT:    j .LBB28_5
 ; CHECK-NEXT:  .LBB28_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a4, a3
 ; CHECK-NEXT:    sub a1, a4, a6
 ; CHECK-NEXT:    mv a4, a0
@@ -1798,14 +1798,14 @@ define void @sink_splat_fadd_scalable(float* nocapture %a, float %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a7, vlenb
 ; CHECK-NEXT:    srli a3, a7, 2
-; CHECK-NEXT:    addi a4, zero, 1024
+; CHECK-NEXT:    li a4, 1024
 ; CHECK-NEXT:    fmv.w.x ft0, a1
 ; CHECK-NEXT:    bgeu a4, a3, .LBB29_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a1, zero
+; CHECK-NEXT:    li a1, 0
 ; CHECK-NEXT:    j .LBB29_5
 ; CHECK-NEXT:  .LBB29_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a4, a3
 ; CHECK-NEXT:    sub a1, a4, a6
 ; CHECK-NEXT:    mv a4, a0
@@ -1891,14 +1891,14 @@ define void @sink_splat_fsub_scalable(float* nocapture %a, float %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a7, vlenb
 ; CHECK-NEXT:    srli a3, a7, 2
-; CHECK-NEXT:    addi a4, zero, 1024
+; CHECK-NEXT:    li a4, 1024
 ; CHECK-NEXT:    fmv.w.x ft0, a1
 ; CHECK-NEXT:    bgeu a4, a3, .LBB30_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a1, zero
+; CHECK-NEXT:    li a1, 0
 ; CHECK-NEXT:    j .LBB30_5
 ; CHECK-NEXT:  .LBB30_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a4, a3
 ; CHECK-NEXT:    sub a1, a4, a6
 ; CHECK-NEXT:    mv a4, a0
@@ -1984,14 +1984,14 @@ define void @sink_splat_frsub_scalable(float* nocapture %a, float %x) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a7, vlenb
 ; CHECK-NEXT:    srli a3, a7, 2
-; CHECK-NEXT:    addi a4, zero, 1024
+; CHECK-NEXT:    li a4, 1024
 ; CHECK-NEXT:    fmv.w.x ft0, a1
 ; CHECK-NEXT:    bgeu a4, a3, .LBB31_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a1, zero
+; CHECK-NEXT:    li a1, 0
 ; CHECK-NEXT:    j .LBB31_5
 ; CHECK-NEXT:  .LBB31_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    li a5, 0
 ; CHECK-NEXT:    remu a6, a4, a3
 ; CHECK-NEXT:    sub a1, a4, a6
 ; CHECK-NEXT:    mv a4, a0
@@ -2076,7 +2076,7 @@ define void @sink_splat_fma(float* noalias nocapture %a, float* nocapture readon
 ; CHECK-LABEL: sink_splat_fma:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a2
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:  .LBB32_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -2118,7 +2118,7 @@ define void @sink_splat_fma_commute(float* noalias nocapture %a, float* nocaptur
 ; CHECK-LABEL: sink_splat_fma_commute:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a2
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:  .LBB33_1: # %vector.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
@@ -2161,15 +2161,15 @@ define void @sink_splat_fma_scalable(float* noalias nocapture %a, float* noalias
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a7, vlenb
 ; CHECK-NEXT:    srli t1, a7, 2
-; CHECK-NEXT:    addi t0, zero, 1024
+; CHECK-NEXT:    li t0, 1024
 ; CHECK-NEXT:    fmv.w.x ft0, a2
 ; CHECK-NEXT:    bgeu t0, t1, .LBB34_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv t0, zero
+; CHECK-NEXT:    li t0, 0
 ; CHECK-NEXT:    j .LBB34_5
 ; CHECK-NEXT:  .LBB34_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
-; CHECK-NEXT:    mv a3, zero
+; CHECK-NEXT:    li a5, 0
+; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    remu a6, t0, t1
 ; CHECK-NEXT:    sub t0, t0, a6
 ; CHECK-NEXT:  .LBB34_3: # %vector.body
@@ -2265,15 +2265,15 @@ define void @sink_splat_fma_commute_scalable(float* noalias nocapture %a, float*
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a7, vlenb
 ; CHECK-NEXT:    srli t1, a7, 2
-; CHECK-NEXT:    addi t0, zero, 1024
+; CHECK-NEXT:    li t0, 1024
 ; CHECK-NEXT:    fmv.w.x ft0, a2
 ; CHECK-NEXT:    bgeu t0, t1, .LBB35_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv t0, zero
+; CHECK-NEXT:    li t0, 0
 ; CHECK-NEXT:    j .LBB35_5
 ; CHECK-NEXT:  .LBB35_2: # %vector.ph
-; CHECK-NEXT:    mv a5, zero
-; CHECK-NEXT:    mv a3, zero
+; CHECK-NEXT:    li a5, 0
+; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    remu a6, t0, t1
 ; CHECK-NEXT:    sub t0, t0, a6
 ; CHECK-NEXT:  .LBB35_3: # %vector.body
@@ -2372,7 +2372,7 @@ declare float @llvm.fma.f32(float, float, float)
 define void @sink_splat_icmp(i32* nocapture %x, i32 signext %y) {
 ; CHECK-LABEL: sink_splat_icmp:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a2, zero, 1024
+; CHECK-NEXT:    li a2, 1024
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:  .LBB36_1: # %vector.body
@@ -2411,7 +2411,7 @@ define void @sink_splat_fcmp(float* nocapture %x, float %y) {
 ; CHECK-LABEL: sink_splat_fcmp:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    fmv.w.x ft0, a1
-; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:    li a1, 1024
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:  .LBB37_1: # %vector.body

diff  --git a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll
index 95cb0b391ec5..f79480992fc3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll
@@ -351,7 +351,7 @@ define <vscale x 1 x i64> @smulo_nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i6
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vmulh.vv v10, v8, v9
 ; CHECK-NEXT:    vmul.vv v8, v8, v9
-; CHECK-NEXT:    addi a0, zero, 63
+; CHECK-NEXT:    li a0, 63
 ; CHECK-NEXT:    vsra.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vv v0, v10, v9
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
@@ -371,7 +371,7 @@ define <vscale x 2 x i64> @smulo_nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i6
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmulh.vv v12, v8, v10
 ; CHECK-NEXT:    vmul.vv v8, v8, v10
-; CHECK-NEXT:    addi a0, zero, 63
+; CHECK-NEXT:    li a0, 63
 ; CHECK-NEXT:    vsra.vx v10, v8, a0
 ; CHECK-NEXT:    vmsne.vv v0, v12, v10
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
@@ -391,7 +391,7 @@ define <vscale x 4 x i64> @smulo_nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i6
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vmulh.vv v16, v8, v12
 ; CHECK-NEXT:    vmul.vv v8, v8, v12
-; CHECK-NEXT:    addi a0, zero, 63
+; CHECK-NEXT:    li a0, 63
 ; CHECK-NEXT:    vsra.vx v12, v8, a0
 ; CHECK-NEXT:    vmsne.vv v0, v16, v12
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
@@ -411,7 +411,7 @@ define <vscale x 8 x i64> @smulo_nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i6
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmulh.vv v24, v8, v16
 ; CHECK-NEXT:    vmul.vv v8, v8, v16
-; CHECK-NEXT:    addi a0, zero, 63
+; CHECK-NEXT:    li a0, 63
 ; CHECK-NEXT:    vsra.vx v16, v8, a0
 ; CHECK-NEXT:    vmsne.vv v0, v24, v16
 ; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
index aef5554ffa3d..9f4d075d6345 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
@@ -69,7 +69,7 @@ define <vscale x 8 x i8> @mul_stepvector_nxv8i8() {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    addi a0, zero, 3
+; CHECK-NEXT:    li a0, 3
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -210,7 +210,7 @@ define <vscale x 16 x i16> @mul_stepvector_nxv16i16() {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    addi a0, zero, 3
+; CHECK-NEXT:    li a0, 3
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -327,7 +327,7 @@ define <vscale x 16 x i32> @mul_stepvector_nxv16i32() {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    addi a0, zero, 3
+; CHECK-NEXT:    li a0, 3
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -420,7 +420,7 @@ define <vscale x 8 x i64> @mul_stepvector_nxv8i64() {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vid.v v8
-; CHECK-NEXT:    addi a0, zero, 3
+; CHECK-NEXT:    li a0, 3
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -436,7 +436,7 @@ define <vscale x 8 x i64> @mul_bigimm_stepvector_nxv8i64() {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    addi a0, zero, 7
+; RV32-NEXT:    li a0, 7
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    lui a0, 797989
 ; RV32-NEXT:    addi a0, a0, -683
@@ -560,7 +560,7 @@ define <vscale x 16 x i64> @mul_stepvector_nxv16i64() {
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vid.v v8
-; RV32-NEXT:    addi a0, zero, 3
+; RV32-NEXT:    li a0, 3
 ; RV32-NEXT:    vmul.vx v8, v8, a0
 ; RV32-NEXT:    vadd.vv v16, v8, v16
 ; RV32-NEXT:    addi sp, sp, 16
@@ -570,7 +570,7 @@ define <vscale x 16 x i64> @mul_stepvector_nxv16i64() {
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vid.v v8
-; RV64-NEXT:    addi a0, zero, 3
+; RV64-NEXT:    li a0, 3
 ; RV64-NEXT:    vmul.vx v8, v8, a0
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a1, a0, 1
@@ -590,7 +590,7 @@ define <vscale x 16 x i64> @mul_bigimm_stepvector_nxv16i64() {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    addi a0, zero, 7
+; RV32-NEXT:    li a0, 7
 ; RV32-NEXT:    sw a0, 12(sp)
 ; RV32-NEXT:    lui a0, 797989
 ; RV32-NEXT:    addi a0, a0, -683
@@ -601,7 +601,7 @@ define <vscale x 16 x i64> @mul_bigimm_stepvector_nxv16i64() {
 ; RV32-NEXT:    mul a1, a0, a1
 ; RV32-NEXT:    sw a1, 8(sp)
 ; RV32-NEXT:    srli a0, a0, 3
-; RV32-NEXT:    addi a1, zero, 62
+; RV32-NEXT:    li a1, 62
 ; RV32-NEXT:    mul a1, a0, a1
 ; RV32-NEXT:    lui a2, 92455
 ; RV32-NEXT:    addi a2, a2, -1368

diff  --git a/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll b/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll
index 837cbc3789a4..9defe1460df3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll
@@ -83,7 +83,7 @@ define <vscale x 1 x i16> @test_urem_vec_odd_divisor_eq0(<vscale x 1 x i16> %x)
 define <vscale x 1 x i16> @test_urem_vec_even_divisor_eq1(<vscale x 1 x i16> %x) nounwind {
 ; RV32-LABEL: test_urem_vec_even_divisor_eq1:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV32-NEXT:    vsub.vx v8, v8, a0
 ; RV32-NEXT:    lui a0, 1048571
@@ -101,7 +101,7 @@ define <vscale x 1 x i16> @test_urem_vec_even_divisor_eq1(<vscale x 1 x i16> %x)
 ;
 ; RV64-LABEL: test_urem_vec_even_divisor_eq1:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV64-NEXT:    vsub.vx v8, v8, a0
 ; RV64-NEXT:    lui a0, 1048571
@@ -129,7 +129,7 @@ define <vscale x 1 x i16> @test_urem_vec_even_divisor_eq1(<vscale x 1 x i16> %x)
 define <vscale x 1 x i16> @test_urem_vec_odd_divisor_eq1(<vscale x 1 x i16> %x) nounwind {
 ; RV32-LABEL: test_urem_vec_odd_divisor_eq1:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV32-NEXT:    vsub.vx v8, v8, a0
 ; RV32-NEXT:    lui a0, 1048573
@@ -144,7 +144,7 @@ define <vscale x 1 x i16> @test_urem_vec_odd_divisor_eq1(<vscale x 1 x i16> %x)
 ;
 ; RV64-LABEL: test_urem_vec_odd_divisor_eq1:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV64-NEXT:    vsub.vx v8, v8, a0
 ; RV64-NEXT:    lui a0, 1048573

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
index 2c0de9f7d45b..13c41718676f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
@@ -640,7 +640,7 @@ define <vscale x 128 x i8> @vadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a3, a2
 ; CHECK-NEXT:  .LBB49_2:
-; CHECK-NEXT:    mv a4, zero
+; CHECK-NEXT:    li a4, 0
 ; CHECK-NEXT:    vsetvli a5, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vlm.v v24, (a0)
 ; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, mu
@@ -670,7 +670,7 @@ define <vscale x 128 x i8> @vadd_vi_nxv128i8_unmasked(<vscale x 128 x i8> %va, i
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a2, a1
 ; CHECK-NEXT:  .LBB50_2:
-; CHECK-NEXT:    mv a3, zero
+; CHECK-NEXT:    li a3, 0
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
 ; CHECK-NEXT:    sub a1, a0, a1
 ; CHECK-NEXT:    vadd.vi v8, v8, -1
@@ -1533,7 +1533,7 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
 ; CHECK-LABEL: vadd_vi_nxv32i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    mv a2, zero
+; CHECK-NEXT:    li a2, 0
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    srli a4, a1, 2
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, mu
@@ -1565,7 +1565,7 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
 define <vscale x 32 x i32> @vadd_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vadd_vi_nxv32i32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    mv a2, zero
+; CHECK-NEXT:    li a2, 0
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    srli a4, a1, 2
 ; CHECK-NEXT:    vsetvli a3, zero, e8, m4, ta, mu
@@ -1607,7 +1607,7 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <v
 ; CHECK-LABEL: vadd_vi_nxv32i32_evl_nx8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    mv a2, zero
+; CHECK-NEXT:    li a2, 0
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a4, a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll
index 203557c67a73..bea39f9740ca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll
@@ -51,7 +51,7 @@ define <vscale x 1 x i8> @vand_vi_nxv1i8_1(<vscale x 1 x i8> %va) {
 define <vscale x 1 x i8> @vand_vi_nxv1i8_2(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vand_vi_nxv1i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -110,7 +110,7 @@ define <vscale x 2 x i8> @vand_vi_nxv2i8_1(<vscale x 2 x i8> %va) {
 define <vscale x 2 x i8> @vand_vi_nxv2i8_2(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vand_vi_nxv2i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -169,7 +169,7 @@ define <vscale x 4 x i8> @vand_vi_nxv4i8_1(<vscale x 4 x i8> %va) {
 define <vscale x 4 x i8> @vand_vi_nxv4i8_2(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vand_vi_nxv4i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -228,7 +228,7 @@ define <vscale x 8 x i8> @vand_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
 define <vscale x 8 x i8> @vand_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vand_vi_nxv8i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -287,7 +287,7 @@ define <vscale x 16 x i8> @vand_vi_nxv16i8_1(<vscale x 16 x i8> %va) {
 define <vscale x 16 x i8> @vand_vi_nxv16i8_2(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vand_vi_nxv16i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -346,7 +346,7 @@ define <vscale x 32 x i8> @vand_vi_nxv32i8_1(<vscale x 32 x i8> %va) {
 define <vscale x 32 x i8> @vand_vi_nxv32i8_2(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vand_vi_nxv32i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -405,7 +405,7 @@ define <vscale x 64 x i8> @vand_vi_nxv64i8_1(<vscale x 64 x i8> %va) {
 define <vscale x 64 x i8> @vand_vi_nxv64i8_2(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vand_vi_nxv64i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -464,7 +464,7 @@ define <vscale x 1 x i16> @vand_vi_nxv1i16_1(<vscale x 1 x i16> %va) {
 define <vscale x 1 x i16> @vand_vi_nxv1i16_2(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vand_vi_nxv1i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -523,7 +523,7 @@ define <vscale x 2 x i16> @vand_vi_nxv2i16_1(<vscale x 2 x i16> %va) {
 define <vscale x 2 x i16> @vand_vi_nxv2i16_2(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vand_vi_nxv2i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -582,7 +582,7 @@ define <vscale x 4 x i16> @vand_vi_nxv4i16_1(<vscale x 4 x i16> %va) {
 define <vscale x 4 x i16> @vand_vi_nxv4i16_2(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vand_vi_nxv4i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -641,7 +641,7 @@ define <vscale x 8 x i16> @vand_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
 define <vscale x 8 x i16> @vand_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vand_vi_nxv8i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -700,7 +700,7 @@ define <vscale x 16 x i16> @vand_vi_nxv16i16_1(<vscale x 16 x i16> %va) {
 define <vscale x 16 x i16> @vand_vi_nxv16i16_2(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: vand_vi_nxv16i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -759,7 +759,7 @@ define <vscale x 32 x i16> @vand_vi_nxv32i16_1(<vscale x 32 x i16> %va) {
 define <vscale x 32 x i16> @vand_vi_nxv32i16_2(<vscale x 32 x i16> %va) {
 ; CHECK-LABEL: vand_vi_nxv32i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -818,7 +818,7 @@ define <vscale x 1 x i32> @vand_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
 define <vscale x 1 x i32> @vand_vi_nxv1i32_2(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vand_vi_nxv1i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @vand_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
 define <vscale x 2 x i32> @vand_vi_nxv2i32_2(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vand_vi_nxv2i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -936,7 +936,7 @@ define <vscale x 4 x i32> @vand_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
 define <vscale x 4 x i32> @vand_vi_nxv4i32_2(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vand_vi_nxv4i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -995,7 +995,7 @@ define <vscale x 8 x i32> @vand_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 define <vscale x 8 x i32> @vand_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vand_vi_nxv8i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -1054,7 +1054,7 @@ define <vscale x 16 x i32> @vand_vi_nxv16i32_1(<vscale x 16 x i32> %va) {
 define <vscale x 16 x i32> @vand_vi_nxv16i32_2(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vand_vi_nxv16i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -1126,7 +1126,7 @@ define <vscale x 1 x i64> @vand_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
 define <vscale x 1 x i64> @vand_vi_nxv1i64_2(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vand_vi_nxv1i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -1198,7 +1198,7 @@ define <vscale x 2 x i64> @vand_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
 define <vscale x 2 x i64> @vand_vi_nxv2i64_2(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vand_vi_nxv2i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -1270,7 +1270,7 @@ define <vscale x 4 x i64> @vand_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
 define <vscale x 4 x i64> @vand_vi_nxv4i64_2(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vand_vi_nxv4i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -1342,7 +1342,7 @@ define <vscale x 8 x i64> @vand_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 define <vscale x 8 x i64> @vand_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vand_vi_nxv8i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
index 915323d110cf..6273a55eb97b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
@@ -27,7 +27,7 @@ define <vscale x 1 x i8> @vdiv_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
 define <vscale x 1 x i8> @vdiv_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vdiv_vi_nxv1i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmulh.vx v9, v8, a0
 ; CHECK-NEXT:    vsub.vv v8, v9, v8
@@ -90,7 +90,7 @@ define <vscale x 2 x i8> @vdiv_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
 define <vscale x 2 x i8> @vdiv_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vdiv_vi_nxv2i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmulh.vx v9, v8, a0
 ; CHECK-NEXT:    vsub.vv v8, v9, v8
@@ -129,7 +129,7 @@ define <vscale x 4 x i8> @vdiv_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
 define <vscale x 4 x i8> @vdiv_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vdiv_vi_nxv4i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmulh.vx v9, v8, a0
 ; CHECK-NEXT:    vsub.vv v8, v9, v8
@@ -168,7 +168,7 @@ define <vscale x 8 x i8> @vdiv_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
 define <vscale x 8 x i8> @vdiv_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vdiv_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmulh.vx v9, v8, a0
 ; CHECK-NEXT:    vsub.vv v8, v9, v8
@@ -207,7 +207,7 @@ define <vscale x 16 x i8> @vdiv_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b
 define <vscale x 16 x i8> @vdiv_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vdiv_vi_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmulh.vx v10, v8, a0
 ; CHECK-NEXT:    vsub.vv v8, v10, v8
@@ -246,7 +246,7 @@ define <vscale x 32 x i8> @vdiv_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b
 define <vscale x 32 x i8> @vdiv_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vdiv_vi_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmulh.vx v12, v8, a0
 ; CHECK-NEXT:    vsub.vv v8, v12, v8
@@ -285,7 +285,7 @@ define <vscale x 64 x i8> @vdiv_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b
 define <vscale x 64 x i8> @vdiv_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vdiv_vi_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
 ; CHECK-NEXT:    vsub.vv v8, v16, v8
@@ -909,7 +909,7 @@ define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vmulh.vv v8, v8, v9
-; RV32-NEXT:    addi a0, zero, 63
+; RV32-NEXT:    li a0, 63
 ; RV32-NEXT:    vsrl.vx v9, v8, a0
 ; RV32-NEXT:    vsra.vi v8, v8, 1
 ; RV32-NEXT:    vadd.vv v8, v8, v9
@@ -928,7 +928,7 @@ define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    addi a0, a0, 1755
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmulh.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 63
+; RV64-NEXT:    li a0, 63
 ; RV64-NEXT:    vsrl.vx v9, v8, a0
 ; RV64-NEXT:    vsra.vi v8, v8, 1
 ; RV64-NEXT:    vadd.vv v8, v8, v9
@@ -989,7 +989,7 @@ define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vmulh.vv v8, v8, v10
-; RV32-NEXT:    addi a0, zero, 63
+; RV32-NEXT:    li a0, 63
 ; RV32-NEXT:    vsrl.vx v10, v8, a0
 ; RV32-NEXT:    vsra.vi v8, v8, 1
 ; RV32-NEXT:    vadd.vv v8, v8, v10
@@ -1008,7 +1008,7 @@ define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    addi a0, a0, 1755
 ; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV64-NEXT:    vmulh.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 63
+; RV64-NEXT:    li a0, 63
 ; RV64-NEXT:    vsrl.vx v10, v8, a0
 ; RV64-NEXT:    vsra.vi v8, v8, 1
 ; RV64-NEXT:    vadd.vv v8, v8, v10
@@ -1069,7 +1069,7 @@ define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vmulh.vv v8, v8, v12
-; RV32-NEXT:    addi a0, zero, 63
+; RV32-NEXT:    li a0, 63
 ; RV32-NEXT:    vsrl.vx v12, v8, a0
 ; RV32-NEXT:    vsra.vi v8, v8, 1
 ; RV32-NEXT:    vadd.vv v8, v8, v12
@@ -1088,7 +1088,7 @@ define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    addi a0, a0, 1755
 ; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV64-NEXT:    vmulh.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 63
+; RV64-NEXT:    li a0, 63
 ; RV64-NEXT:    vsrl.vx v12, v8, a0
 ; RV64-NEXT:    vsra.vi v8, v8, 1
 ; RV64-NEXT:    vadd.vv v8, v8, v12
@@ -1149,7 +1149,7 @@ define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmulh.vv v8, v8, v16
-; RV32-NEXT:    addi a0, zero, 63
+; RV32-NEXT:    li a0, 63
 ; RV32-NEXT:    vsrl.vx v16, v8, a0
 ; RV32-NEXT:    vsra.vi v8, v8, 1
 ; RV32-NEXT:    vadd.vv v8, v8, v16
@@ -1168,7 +1168,7 @@ define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    addi a0, a0, 1755
 ; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vmulh.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 63
+; RV64-NEXT:    li a0, 63
 ; RV64-NEXT:    vsrl.vx v16, v8, a0
 ; RV64-NEXT:    vsra.vi v8, v8, 1
 ; RV64-NEXT:    vadd.vv v8, v8, v16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
index 07f45a3010e4..5c3bcd40f4cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
@@ -27,7 +27,7 @@ define <vscale x 1 x i8> @vdivu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b)
 define <vscale x 1 x i8> @vdivu_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vdivu_vi_nxv1i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
@@ -87,7 +87,7 @@ define <vscale x 2 x i8> @vdivu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b)
 define <vscale x 2 x i8> @vdivu_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vdivu_vi_nxv2i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
@@ -123,7 +123,7 @@ define <vscale x 4 x i8> @vdivu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b)
 define <vscale x 4 x i8> @vdivu_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vdivu_vi_nxv4i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
@@ -159,7 +159,7 @@ define <vscale x 8 x i8> @vdivu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b)
 define <vscale x 8 x i8> @vdivu_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vdivu_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
@@ -195,7 +195,7 @@ define <vscale x 16 x i8> @vdivu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %
 define <vscale x 16 x i8> @vdivu_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vdivu_vi_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
@@ -231,7 +231,7 @@ define <vscale x 32 x i8> @vdivu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %
 define <vscale x 32 x i8> @vdivu_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vdivu_vi_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
@@ -267,7 +267,7 @@ define <vscale x 64 x i8> @vdivu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %
 define <vscale x 64 x i8> @vdivu_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vdivu_vi_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v8, v8, a0
 ; CHECK-NEXT:    vsrl.vi v8, v8, 5
@@ -826,25 +826,25 @@ define <vscale x 1 x i64> @vdivu_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    lui a0, 131072
 ; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vmulhu.vv v8, v8, v9
-; RV32-NEXT:    addi a0, zero, 61
+; RV32-NEXT:    li a0, 61
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vi_nxv1i64_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 61
 ; RV64-NEXT:    addi a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmulhu.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 61
+; RV64-NEXT:    li a0, 61
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
@@ -922,25 +922,25 @@ define <vscale x 2 x i64> @vdivu_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    lui a0, 131072
 ; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vmulhu.vv v8, v8, v10
-; RV32-NEXT:    addi a0, zero, 61
+; RV32-NEXT:    li a0, 61
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vi_nxv2i64_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 61
 ; RV64-NEXT:    addi a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV64-NEXT:    vmulhu.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 61
+; RV64-NEXT:    li a0, 61
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
@@ -1018,25 +1018,25 @@ define <vscale x 4 x i64> @vdivu_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    lui a0, 131072
 ; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vmulhu.vv v8, v8, v12
-; RV32-NEXT:    addi a0, zero, 61
+; RV32-NEXT:    li a0, 61
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vi_nxv4i64_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 61
 ; RV64-NEXT:    addi a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV64-NEXT:    vmulhu.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 61
+; RV64-NEXT:    li a0, 61
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
@@ -1114,25 +1114,25 @@ define <vscale x 8 x i64> @vdivu_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    lui a0, 131072
 ; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmulhu.vv v8, v8, v16
-; RV32-NEXT:    addi a0, zero, 61
+; RV32-NEXT:    li a0, 61
 ; RV32-NEXT:    vsrl.vx v8, v8, a0
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vdivu_vi_nxv8i64_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 61
 ; RV64-NEXT:    addi a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vmulhu.vx v8, v8, a0
-; RV64-NEXT:    addi a0, zero, 61
+; RV64-NEXT:    li a0, 61
 ; RV64-NEXT:    vsrl.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
index a45f1b23e773..97e45f2f4e6e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
@@ -9,7 +9,7 @@ declare <vscale x 8 x i7> @llvm.vp.udiv.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x
 define <vscale x 8 x i7> @vdivu_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
 ; CHECK-LABEL: vdivu_vx_nxv8i7:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 127
+; CHECK-NEXT:    li a2, 127
 ; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a2
 ; CHECK-NEXT:    vmv.v.x v9, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll
index dd104e0a8fda..3cdda837dacc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll
@@ -29,7 +29,7 @@ define <vscale x 1 x i8> @vmax_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
 define <vscale x 1 x i8> @vmax_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv1i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -67,7 +67,7 @@ define <vscale x 2 x i8> @vmax_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
 define <vscale x 2 x i8> @vmax_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv2i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -105,7 +105,7 @@ define <vscale x 4 x i8> @vmax_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
 define <vscale x 4 x i8> @vmax_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv4i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @vmax_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
 define <vscale x 8 x i8> @vmax_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -181,7 +181,7 @@ define <vscale x 16 x i8> @vmax_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b
 define <vscale x 16 x i8> @vmax_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -219,7 +219,7 @@ define <vscale x 32 x i8> @vmax_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b
 define <vscale x 32 x i8> @vmax_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -257,7 +257,7 @@ define <vscale x 64 x i8> @vmax_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b
 define <vscale x 64 x i8> @vmax_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -295,7 +295,7 @@ define <vscale x 1 x i16> @vmax_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %
 define <vscale x 1 x i16> @vmax_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vmax_vi_nxv1i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -333,7 +333,7 @@ define <vscale x 2 x i16> @vmax_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %
 define <vscale x 2 x i16> @vmax_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vmax_vi_nxv2i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -371,7 +371,7 @@ define <vscale x 4 x i16> @vmax_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %
 define <vscale x 4 x i16> @vmax_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vmax_vi_nxv4i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -409,7 +409,7 @@ define <vscale x 8 x i16> @vmax_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %
 define <vscale x 8 x i16> @vmax_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vmax_vi_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -447,7 +447,7 @@ define <vscale x 16 x i16> @vmax_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signex
 define <vscale x 16 x i16> @vmax_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: vmax_vi_nxv16i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -485,7 +485,7 @@ define <vscale x 32 x i16> @vmax_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signex
 define <vscale x 32 x i16> @vmax_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-LABEL: vmax_vi_nxv32i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -523,7 +523,7 @@ define <vscale x 1 x i32> @vmax_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %
 define <vscale x 1 x i32> @vmax_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vmax_vi_nxv1i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -561,7 +561,7 @@ define <vscale x 2 x i32> @vmax_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %
 define <vscale x 2 x i32> @vmax_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vmax_vi_nxv2i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -599,7 +599,7 @@ define <vscale x 4 x i32> @vmax_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %
 define <vscale x 4 x i32> @vmax_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vmax_vi_nxv4i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -637,7 +637,7 @@ define <vscale x 8 x i32> @vmax_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %
 define <vscale x 8 x i32> @vmax_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vmax_vi_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -675,7 +675,7 @@ define <vscale x 16 x i32> @vmax_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signex
 define <vscale x 16 x i32> @vmax_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vmax_vi_nxv16i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -726,7 +726,7 @@ define <vscale x 1 x i64> @vmax_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 define <vscale x 1 x i64> @vmax_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vmax_vi_nxv1i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -777,7 +777,7 @@ define <vscale x 2 x i64> @vmax_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 define <vscale x 2 x i64> @vmax_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vmax_vi_nxv2i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -828,7 +828,7 @@ define <vscale x 4 x i64> @vmax_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 define <vscale x 4 x i64> @vmax_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vmax_vi_nxv4i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -879,7 +879,7 @@ define <vscale x 8 x i64> @vmax_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 define <vscale x 8 x i64> @vmax_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vmax_vi_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmax.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll
index c164a25dae97..772ede13f50a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll
@@ -29,7 +29,7 @@ define <vscale x 1 x i8> @vmax_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
 define <vscale x 1 x i8> @vmax_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv1i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -67,7 +67,7 @@ define <vscale x 2 x i8> @vmax_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
 define <vscale x 2 x i8> @vmax_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv2i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -105,7 +105,7 @@ define <vscale x 4 x i8> @vmax_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
 define <vscale x 4 x i8> @vmax_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv4i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @vmax_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
 define <vscale x 8 x i8> @vmax_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -181,7 +181,7 @@ define <vscale x 16 x i8> @vmax_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b
 define <vscale x 16 x i8> @vmax_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -219,7 +219,7 @@ define <vscale x 32 x i8> @vmax_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b
 define <vscale x 32 x i8> @vmax_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -257,7 +257,7 @@ define <vscale x 64 x i8> @vmax_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b
 define <vscale x 64 x i8> @vmax_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vmax_vi_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -295,7 +295,7 @@ define <vscale x 1 x i16> @vmax_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %
 define <vscale x 1 x i16> @vmax_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vmax_vi_nxv1i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -333,7 +333,7 @@ define <vscale x 2 x i16> @vmax_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %
 define <vscale x 2 x i16> @vmax_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vmax_vi_nxv2i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -371,7 +371,7 @@ define <vscale x 4 x i16> @vmax_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %
 define <vscale x 4 x i16> @vmax_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vmax_vi_nxv4i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -409,7 +409,7 @@ define <vscale x 8 x i16> @vmax_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %
 define <vscale x 8 x i16> @vmax_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vmax_vi_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -447,7 +447,7 @@ define <vscale x 16 x i16> @vmax_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signex
 define <vscale x 16 x i16> @vmax_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: vmax_vi_nxv16i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -485,7 +485,7 @@ define <vscale x 32 x i16> @vmax_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signex
 define <vscale x 32 x i16> @vmax_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-LABEL: vmax_vi_nxv32i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -523,7 +523,7 @@ define <vscale x 1 x i32> @vmax_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %
 define <vscale x 1 x i32> @vmax_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vmax_vi_nxv1i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -561,7 +561,7 @@ define <vscale x 2 x i32> @vmax_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %
 define <vscale x 2 x i32> @vmax_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vmax_vi_nxv2i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -599,7 +599,7 @@ define <vscale x 4 x i32> @vmax_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %
 define <vscale x 4 x i32> @vmax_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vmax_vi_nxv4i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -637,7 +637,7 @@ define <vscale x 8 x i32> @vmax_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %
 define <vscale x 8 x i32> @vmax_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vmax_vi_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -675,7 +675,7 @@ define <vscale x 16 x i32> @vmax_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signex
 define <vscale x 16 x i32> @vmax_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vmax_vi_nxv16i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -726,7 +726,7 @@ define <vscale x 1 x i64> @vmax_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 define <vscale x 1 x i64> @vmax_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vmax_vi_nxv1i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -777,7 +777,7 @@ define <vscale x 2 x i64> @vmax_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 define <vscale x 2 x i64> @vmax_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vmax_vi_nxv2i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -828,7 +828,7 @@ define <vscale x 4 x i64> @vmax_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 define <vscale x 4 x i64> @vmax_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vmax_vi_nxv4i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -879,7 +879,7 @@ define <vscale x 8 x i64> @vmax_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 define <vscale x 8 x i64> @vmax_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vmax_vi_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmaxu.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll
index 355d386699da..15d2a2e0c0ae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll
@@ -29,7 +29,7 @@ define <vscale x 1 x i8> @vmin_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
 define <vscale x 1 x i8> @vmin_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv1i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -67,7 +67,7 @@ define <vscale x 2 x i8> @vmin_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
 define <vscale x 2 x i8> @vmin_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv2i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -105,7 +105,7 @@ define <vscale x 4 x i8> @vmin_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
 define <vscale x 4 x i8> @vmin_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv4i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @vmin_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
 define <vscale x 8 x i8> @vmin_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -181,7 +181,7 @@ define <vscale x 16 x i8> @vmin_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b
 define <vscale x 16 x i8> @vmin_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -219,7 +219,7 @@ define <vscale x 32 x i8> @vmin_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b
 define <vscale x 32 x i8> @vmin_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -257,7 +257,7 @@ define <vscale x 64 x i8> @vmin_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b
 define <vscale x 64 x i8> @vmin_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -295,7 +295,7 @@ define <vscale x 1 x i16> @vmin_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %
 define <vscale x 1 x i16> @vmin_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vmin_vi_nxv1i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -333,7 +333,7 @@ define <vscale x 2 x i16> @vmin_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %
 define <vscale x 2 x i16> @vmin_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vmin_vi_nxv2i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -371,7 +371,7 @@ define <vscale x 4 x i16> @vmin_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %
 define <vscale x 4 x i16> @vmin_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vmin_vi_nxv4i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -409,7 +409,7 @@ define <vscale x 8 x i16> @vmin_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %
 define <vscale x 8 x i16> @vmin_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vmin_vi_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -447,7 +447,7 @@ define <vscale x 16 x i16> @vmin_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signex
 define <vscale x 16 x i16> @vmin_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: vmin_vi_nxv16i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -485,7 +485,7 @@ define <vscale x 32 x i16> @vmin_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signex
 define <vscale x 32 x i16> @vmin_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-LABEL: vmin_vi_nxv32i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -523,7 +523,7 @@ define <vscale x 1 x i32> @vmin_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %
 define <vscale x 1 x i32> @vmin_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vmin_vi_nxv1i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -561,7 +561,7 @@ define <vscale x 2 x i32> @vmin_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %
 define <vscale x 2 x i32> @vmin_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vmin_vi_nxv2i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -599,7 +599,7 @@ define <vscale x 4 x i32> @vmin_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %
 define <vscale x 4 x i32> @vmin_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vmin_vi_nxv4i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -637,7 +637,7 @@ define <vscale x 8 x i32> @vmin_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %
 define <vscale x 8 x i32> @vmin_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vmin_vi_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -675,7 +675,7 @@ define <vscale x 16 x i32> @vmin_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signex
 define <vscale x 16 x i32> @vmin_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vmin_vi_nxv16i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -726,7 +726,7 @@ define <vscale x 1 x i64> @vmin_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 define <vscale x 1 x i64> @vmin_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vmin_vi_nxv1i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -777,7 +777,7 @@ define <vscale x 2 x i64> @vmin_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 define <vscale x 2 x i64> @vmin_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vmin_vi_nxv2i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -828,7 +828,7 @@ define <vscale x 4 x i64> @vmin_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 define <vscale x 4 x i64> @vmin_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vmin_vi_nxv4i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -879,7 +879,7 @@ define <vscale x 8 x i64> @vmin_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 define <vscale x 8 x i64> @vmin_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vmin_vi_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
index 189a93eb598c..e289491af567 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll
@@ -29,7 +29,7 @@ define <vscale x 1 x i8> @vmin_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
 define <vscale x 1 x i8> @vmin_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv1i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -67,7 +67,7 @@ define <vscale x 2 x i8> @vmin_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
 define <vscale x 2 x i8> @vmin_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv2i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -105,7 +105,7 @@ define <vscale x 4 x i8> @vmin_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
 define <vscale x 4 x i8> @vmin_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv4i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -143,7 +143,7 @@ define <vscale x 8 x i8> @vmin_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
 define <vscale x 8 x i8> @vmin_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -181,7 +181,7 @@ define <vscale x 16 x i8> @vmin_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b
 define <vscale x 16 x i8> @vmin_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -219,7 +219,7 @@ define <vscale x 32 x i8> @vmin_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b
 define <vscale x 32 x i8> @vmin_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -257,7 +257,7 @@ define <vscale x 64 x i8> @vmin_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b
 define <vscale x 64 x i8> @vmin_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vmin_vi_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -295,7 +295,7 @@ define <vscale x 1 x i16> @vmin_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %
 define <vscale x 1 x i16> @vmin_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vmin_vi_nxv1i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -333,7 +333,7 @@ define <vscale x 2 x i16> @vmin_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %
 define <vscale x 2 x i16> @vmin_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vmin_vi_nxv2i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -371,7 +371,7 @@ define <vscale x 4 x i16> @vmin_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %
 define <vscale x 4 x i16> @vmin_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vmin_vi_nxv4i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -409,7 +409,7 @@ define <vscale x 8 x i16> @vmin_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %
 define <vscale x 8 x i16> @vmin_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vmin_vi_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -447,7 +447,7 @@ define <vscale x 16 x i16> @vmin_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signex
 define <vscale x 16 x i16> @vmin_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: vmin_vi_nxv16i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -485,7 +485,7 @@ define <vscale x 32 x i16> @vmin_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signex
 define <vscale x 32 x i16> @vmin_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-LABEL: vmin_vi_nxv32i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -523,7 +523,7 @@ define <vscale x 1 x i32> @vmin_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %
 define <vscale x 1 x i32> @vmin_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vmin_vi_nxv1i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -561,7 +561,7 @@ define <vscale x 2 x i32> @vmin_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %
 define <vscale x 2 x i32> @vmin_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vmin_vi_nxv2i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -599,7 +599,7 @@ define <vscale x 4 x i32> @vmin_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %
 define <vscale x 4 x i32> @vmin_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vmin_vi_nxv4i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -637,7 +637,7 @@ define <vscale x 8 x i32> @vmin_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %
 define <vscale x 8 x i32> @vmin_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vmin_vi_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -675,7 +675,7 @@ define <vscale x 16 x i32> @vmin_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signex
 define <vscale x 16 x i32> @vmin_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vmin_vi_nxv16i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -726,7 +726,7 @@ define <vscale x 1 x i64> @vmin_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 define <vscale x 1 x i64> @vmin_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vmin_vi_nxv1i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -777,7 +777,7 @@ define <vscale x 2 x i64> @vmin_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 define <vscale x 2 x i64> @vmin_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vmin_vi_nxv2i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -828,7 +828,7 @@ define <vscale x 4 x i64> @vmin_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 define <vscale x 4 x i64> @vmin_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vmin_vi_nxv4i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -879,7 +879,7 @@ define <vscale x 8 x i64> @vmin_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 define <vscale x 8 x i64> @vmin_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vmin_vi_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -3
+; CHECK-NEXT:    li a0, -3
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vminu.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll
index 6740999d68a1..8ecad6004b9b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll
@@ -27,7 +27,7 @@ define <vscale x 1 x i8> @vmul_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
 define <vscale x 1 x i8> @vmul_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vmul_vi_nxv1i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -62,7 +62,7 @@ define <vscale x 2 x i8> @vmul_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
 define <vscale x 2 x i8> @vmul_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vmul_vi_nxv2i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -97,7 +97,7 @@ define <vscale x 4 x i8> @vmul_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
 define <vscale x 4 x i8> @vmul_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vmul_vi_nxv4i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -132,7 +132,7 @@ define <vscale x 8 x i8> @vmul_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
 define <vscale x 8 x i8> @vmul_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vmul_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -167,7 +167,7 @@ define <vscale x 16 x i8> @vmul_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b
 define <vscale x 16 x i8> @vmul_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vmul_vi_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -202,7 +202,7 @@ define <vscale x 32 x i8> @vmul_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b
 define <vscale x 32 x i8> @vmul_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vmul_vi_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -237,7 +237,7 @@ define <vscale x 64 x i8> @vmul_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b
 define <vscale x 64 x i8> @vmul_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vmul_vi_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -272,7 +272,7 @@ define <vscale x 1 x i16> @vmul_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %
 define <vscale x 1 x i16> @vmul_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vmul_vi_nxv1i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -307,7 +307,7 @@ define <vscale x 2 x i16> @vmul_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %
 define <vscale x 2 x i16> @vmul_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vmul_vi_nxv2i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -342,7 +342,7 @@ define <vscale x 4 x i16> @vmul_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %
 define <vscale x 4 x i16> @vmul_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vmul_vi_nxv4i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -377,7 +377,7 @@ define <vscale x 8 x i16> @vmul_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %
 define <vscale x 8 x i16> @vmul_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vmul_vi_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -412,7 +412,7 @@ define <vscale x 16 x i16> @vmul_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signex
 define <vscale x 16 x i16> @vmul_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: vmul_vi_nxv16i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -447,7 +447,7 @@ define <vscale x 32 x i16> @vmul_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signex
 define <vscale x 32 x i16> @vmul_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-LABEL: vmul_vi_nxv32i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -482,7 +482,7 @@ define <vscale x 1 x i32> @vmul_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %
 define <vscale x 1 x i32> @vmul_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vmul_vi_nxv1i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -517,7 +517,7 @@ define <vscale x 2 x i32> @vmul_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %
 define <vscale x 2 x i32> @vmul_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vmul_vi_nxv2i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -552,7 +552,7 @@ define <vscale x 4 x i32> @vmul_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %
 define <vscale x 4 x i32> @vmul_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vmul_vi_nxv4i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -587,7 +587,7 @@ define <vscale x 8 x i32> @vmul_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %
 define <vscale x 8 x i32> @vmul_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vmul_vi_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -622,7 +622,7 @@ define <vscale x 16 x i32> @vmul_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signex
 define <vscale x 16 x i32> @vmul_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vmul_vi_nxv16i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -670,7 +670,7 @@ define <vscale x 1 x i64> @vmul_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 define <vscale x 1 x i64> @vmul_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vmul_vi_nxv1i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -742,7 +742,7 @@ define <vscale x 2 x i64> @vmul_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 define <vscale x 2 x i64> @vmul_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vmul_vi_nxv2i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -814,7 +814,7 @@ define <vscale x 4 x i64> @vmul_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 define <vscale x 4 x i64> @vmul_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vmul_vi_nxv4i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -886,7 +886,7 @@ define <vscale x 8 x i64> @vmul_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 define <vscale x 8 x i64> @vmul_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vmul_vi_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll
index 19d5141d7810..076f726e7835 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll
@@ -7,10 +7,10 @@
 define <vscale x 4 x i1> @srem_eq_fold_nxv4i8(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: srem_eq_fold_nxv4i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 42
+; CHECK-NEXT:    li a0, 42
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
-; CHECK-NEXT:    addi a1, zero, -85
+; CHECK-NEXT:    li a1, -85
 ; CHECK-NEXT:    vmacc.vx v9, a1, v8
 ; CHECK-NEXT:    vsll.vi v8, v9, 7
 ; CHECK-NEXT:    vsrl.vi v9, v9, 1
@@ -62,14 +62,14 @@ define <vscale x 1 x i32> @vmulh_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %x) {
 define <vscale x 1 x i32> @vmulh_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; RV32-LABEL: vmulh_vi_nxv1i32_0:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; RV32-NEXT:    vmulh.vx v8, v8, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmulh_vi_nxv1i32_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    addi a0, a0, -7
 ; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
@@ -90,7 +90,7 @@ define <vscale x 1 x i32> @vmulh_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 define <vscale x 1 x i32> @vmulh_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vmulh_vi_nxv1i32_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -143,14 +143,14 @@ define <vscale x 2 x i32> @vmulh_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %x) {
 define <vscale x 2 x i32> @vmulh_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; RV32-LABEL: vmulh_vi_nxv2i32_0:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; RV32-NEXT:    vmulh.vx v8, v8, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmulh_vi_nxv2i32_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    addi a0, a0, -7
 ; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
@@ -171,7 +171,7 @@ define <vscale x 2 x i32> @vmulh_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 define <vscale x 2 x i32> @vmulh_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vmulh_vi_nxv2i32_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -224,14 +224,14 @@ define <vscale x 4 x i32> @vmulh_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %x) {
 define <vscale x 4 x i32> @vmulh_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; RV32-LABEL: vmulh_vi_nxv4i32_0:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; RV32-NEXT:    vmulh.vx v8, v8, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmulh_vi_nxv4i32_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    addi a0, a0, -7
 ; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
@@ -252,7 +252,7 @@ define <vscale x 4 x i32> @vmulh_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 define <vscale x 4 x i32> @vmulh_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vmulh_vi_nxv4i32_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -305,14 +305,14 @@ define <vscale x 8 x i32> @vmulh_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %x) {
 define <vscale x 8 x i32> @vmulh_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; RV32-LABEL: vmulh_vi_nxv8i32_0:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; RV32-NEXT:    vmulh.vx v8, v8, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmulh_vi_nxv8i32_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    addi a0, a0, -7
 ; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
@@ -333,7 +333,7 @@ define <vscale x 8 x i32> @vmulh_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 define <vscale x 8 x i32> @vmulh_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vmulh_vi_nxv8i32_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll
index ec06dbffbcf9..54cb5bfaeef5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll
@@ -39,14 +39,14 @@ define <vscale x 1 x i32> @vmulhu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %x) {
 define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; RV32-LABEL: vmulhu_vi_nxv1i32_0:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; RV32-NEXT:    vmulhu.vx v8, v8, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmulhu_vi_nxv1i32_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    addi a0, a0, -7
 ; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
@@ -73,7 +73,7 @@ define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
 ;
 ; RV64-LABEL: vmulhu_vi_nxv1i32_1:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 16
+; RV64-NEXT:    li a0, 16
 ; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; RV64-NEXT:    vmulhu.vx v8, v8, a0
 ; RV64-NEXT:    ret
@@ -126,14 +126,14 @@ define <vscale x 2 x i32> @vmulhu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %x) {
 define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; RV32-LABEL: vmulhu_vi_nxv2i32_0:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; RV32-NEXT:    vmulhu.vx v8, v8, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmulhu_vi_nxv2i32_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    addi a0, a0, -7
 ; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
@@ -160,7 +160,7 @@ define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
 ;
 ; RV64-LABEL: vmulhu_vi_nxv2i32_1:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 16
+; RV64-NEXT:    li a0, 16
 ; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; RV64-NEXT:    vmulhu.vx v8, v8, a0
 ; RV64-NEXT:    ret
@@ -213,14 +213,14 @@ define <vscale x 4 x i32> @vmulhu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %x) {
 define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; RV32-LABEL: vmulhu_vi_nxv4i32_0:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; RV32-NEXT:    vmulhu.vx v8, v8, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmulhu_vi_nxv4i32_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    addi a0, a0, -7
 ; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
@@ -247,7 +247,7 @@ define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
 ;
 ; RV64-LABEL: vmulhu_vi_nxv4i32_1:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 16
+; RV64-NEXT:    li a0, 16
 ; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; RV64-NEXT:    vmulhu.vx v8, v8, a0
 ; RV64-NEXT:    ret
@@ -300,14 +300,14 @@ define <vscale x 8 x i32> @vmulhu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %x) {
 define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; RV32-LABEL: vmulhu_vi_nxv8i32_0:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; RV32-NEXT:    vmulhu.vx v8, v8, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmulhu_vi_nxv8i32_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    addi a0, a0, -7
 ; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
@@ -334,7 +334,7 @@ define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 ;
 ; RV64-LABEL: vmulhu_vi_nxv8i32_1:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 16
+; RV64-NEXT:    li a0, 16
 ; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; RV64-NEXT:    vmulhu.vx v8, v8, a0
 ; RV64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll
index 0e2c7a77431f..190cb0ba1c4d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll
@@ -240,7 +240,7 @@ declare i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64>)
 define i64 @intrinsic_vmv.x.s_s_nxv1i64(<vscale x 1 x i64> %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v9, v8, a0
 ; CHECK-NEXT:    vmv.x.s a1, v9
@@ -256,7 +256,7 @@ declare i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64>)
 define i64 @intrinsic_vmv.x.s_s_nxv2i64(<vscale x 2 x i64> %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, mu
 ; CHECK-NEXT:    vsrl.vx v10, v8, a0
 ; CHECK-NEXT:    vmv.x.s a1, v10
@@ -272,7 +272,7 @@ declare i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64>)
 define i64 @intrinsic_vmv.x.s_s_nxv4i64(<vscale x 4 x i64> %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, mu
 ; CHECK-NEXT:    vsrl.vx v12, v8, a0
 ; CHECK-NEXT:    vmv.x.s a1, v12
@@ -288,7 +288,7 @@ declare i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64>)
 define i64 @intrinsic_vmv.x.s_s_nxv8i64(<vscale x 8 x i64> %0) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, mu
 ; CHECK-NEXT:    vsrl.vx v16, v8, a0
 ; CHECK-NEXT:    vmv.x.s a1, v16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll
index c453b4007326..4a3768c0fd5d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll
@@ -41,7 +41,7 @@ define <vscale x 1 x i8> @vor_vx_nxv1i8_1(<vscale x 1 x i8> %va) {
 define <vscale x 1 x i8> @vor_vx_nxv1i8_2(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vor_vx_nxv1i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -90,7 +90,7 @@ define <vscale x 2 x i8> @vor_vx_nxv2i8_1(<vscale x 2 x i8> %va) {
 define <vscale x 2 x i8> @vor_vx_nxv2i8_2(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vor_vx_nxv2i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -139,7 +139,7 @@ define <vscale x 4 x i8> @vor_vx_nxv4i8_1(<vscale x 4 x i8> %va) {
 define <vscale x 4 x i8> @vor_vx_nxv4i8_2(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vor_vx_nxv4i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -188,7 +188,7 @@ define <vscale x 8 x i8> @vor_vx_nxv8i8_1(<vscale x 8 x i8> %va) {
 define <vscale x 8 x i8> @vor_vx_nxv8i8_2(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vor_vx_nxv8i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -237,7 +237,7 @@ define <vscale x 16 x i8> @vor_vx_nxv16i8_1(<vscale x 16 x i8> %va) {
 define <vscale x 16 x i8> @vor_vx_nxv16i8_2(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vor_vx_nxv16i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -286,7 +286,7 @@ define <vscale x 32 x i8> @vor_vx_nxv32i8_1(<vscale x 32 x i8> %va) {
 define <vscale x 32 x i8> @vor_vx_nxv32i8_2(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vor_vx_nxv32i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -335,7 +335,7 @@ define <vscale x 64 x i8> @vor_vx_nxv64i8_1(<vscale x 64 x i8> %va) {
 define <vscale x 64 x i8> @vor_vx_nxv64i8_2(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vor_vx_nxv64i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -384,7 +384,7 @@ define <vscale x 1 x i16> @vor_vx_nxv1i16_1(<vscale x 1 x i16> %va) {
 define <vscale x 1 x i16> @vor_vx_nxv1i16_2(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vor_vx_nxv1i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -433,7 +433,7 @@ define <vscale x 2 x i16> @vor_vx_nxv2i16_1(<vscale x 2 x i16> %va) {
 define <vscale x 2 x i16> @vor_vx_nxv2i16_2(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vor_vx_nxv2i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -482,7 +482,7 @@ define <vscale x 4 x i16> @vor_vx_nxv4i16_1(<vscale x 4 x i16> %va) {
 define <vscale x 4 x i16> @vor_vx_nxv4i16_2(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vor_vx_nxv4i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -531,7 +531,7 @@ define <vscale x 8 x i16> @vor_vx_nxv8i16_1(<vscale x 8 x i16> %va) {
 define <vscale x 8 x i16> @vor_vx_nxv8i16_2(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vor_vx_nxv8i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -580,7 +580,7 @@ define <vscale x 16 x i16> @vor_vx_nxv16i16_1(<vscale x 16 x i16> %va) {
 define <vscale x 16 x i16> @vor_vx_nxv16i16_2(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: vor_vx_nxv16i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -629,7 +629,7 @@ define <vscale x 32 x i16> @vor_vx_nxv32i16_1(<vscale x 32 x i16> %va) {
 define <vscale x 32 x i16> @vor_vx_nxv32i16_2(<vscale x 32 x i16> %va) {
 ; CHECK-LABEL: vor_vx_nxv32i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -678,7 +678,7 @@ define <vscale x 1 x i32> @vor_vx_nxv1i32_1(<vscale x 1 x i32> %va) {
 define <vscale x 1 x i32> @vor_vx_nxv1i32_2(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vor_vx_nxv1i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -727,7 +727,7 @@ define <vscale x 2 x i32> @vor_vx_nxv2i32_1(<vscale x 2 x i32> %va) {
 define <vscale x 2 x i32> @vor_vx_nxv2i32_2(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vor_vx_nxv2i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -776,7 +776,7 @@ define <vscale x 4 x i32> @vor_vx_nxv4i32_1(<vscale x 4 x i32> %va) {
 define <vscale x 4 x i32> @vor_vx_nxv4i32_2(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vor_vx_nxv4i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -825,7 +825,7 @@ define <vscale x 8 x i32> @vor_vx_nxv8i32_1(<vscale x 8 x i32> %va) {
 define <vscale x 8 x i32> @vor_vx_nxv8i32_2(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vor_vx_nxv8i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -874,7 +874,7 @@ define <vscale x 16 x i32> @vor_vx_nxv16i32_1(<vscale x 16 x i32> %va) {
 define <vscale x 16 x i32> @vor_vx_nxv16i32_2(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vor_vx_nxv16i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -936,7 +936,7 @@ define <vscale x 1 x i64> @vor_vx_nxv1i64_1(<vscale x 1 x i64> %va) {
 define <vscale x 1 x i64> @vor_vx_nxv1i64_2(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vor_vx_nxv1i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -998,7 +998,7 @@ define <vscale x 2 x i64> @vor_vx_nxv2i64_1(<vscale x 2 x i64> %va) {
 define <vscale x 2 x i64> @vor_vx_nxv2i64_2(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vor_vx_nxv2i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -1060,7 +1060,7 @@ define <vscale x 4 x i64> @vor_vx_nxv4i64_1(<vscale x 4 x i64> %va) {
 define <vscale x 4 x i64> @vor_vx_nxv4i64_2(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vor_vx_nxv4i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -1122,7 +1122,7 @@ define <vscale x 8 x i64> @vor_vx_nxv8i64_1(<vscale x 8 x i64> %va) {
 define <vscale x 8 x i64> @vor_vx_nxv8i64_2(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vor_vx_nxv8i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vor.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
index 8709adddce5c..e9b3ec5867dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
@@ -36,7 +36,7 @@ declare i8 @llvm.vector.reduce.smax.nxv1i8(<vscale x 1 x i8>)
 define signext i8 @vreduce_smax_nxv1i8(<vscale x 1 x i8> %v) {
 ; CHECK-LABEL: vreduce_smax_nxv1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
@@ -67,7 +67,7 @@ declare i8 @llvm.vector.reduce.smin.nxv1i8(<vscale x 1 x i8>)
 define signext i8 @vreduce_smin_nxv1i8(<vscale x 1 x i8> %v) {
 ; CHECK-LABEL: vreduce_smin_nxv1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
@@ -158,7 +158,7 @@ declare i8 @llvm.vector.reduce.smax.nxv2i8(<vscale x 2 x i8>)
 define signext i8 @vreduce_smax_nxv2i8(<vscale x 2 x i8> %v) {
 ; CHECK-LABEL: vreduce_smax_nxv2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
@@ -189,7 +189,7 @@ declare i8 @llvm.vector.reduce.smin.nxv2i8(<vscale x 2 x i8>)
 define signext i8 @vreduce_smin_nxv2i8(<vscale x 2 x i8> %v) {
 ; CHECK-LABEL: vreduce_smin_nxv2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
@@ -280,7 +280,7 @@ declare i8 @llvm.vector.reduce.smax.nxv4i8(<vscale x 4 x i8>)
 define signext i8 @vreduce_smax_nxv4i8(<vscale x 4 x i8> %v) {
 ; CHECK-LABEL: vreduce_smax_nxv4i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
@@ -311,7 +311,7 @@ declare i8 @llvm.vector.reduce.smin.nxv4i8(<vscale x 4 x i8>)
 define signext i8 @vreduce_smin_nxv4i8(<vscale x 4 x i8> %v) {
 ; CHECK-LABEL: vreduce_smin_nxv4i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
@@ -1098,7 +1098,7 @@ define i64 @vreduce_add_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vredsum.vs v8, v8, v9
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1116,7 +1116,7 @@ define i64 @vreduce_umax_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vredmaxu.vs v8, v8, v9
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1140,7 +1140,7 @@ define i64 @vreduce_smax_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vlse64.v v9, (a0), zero
 ; CHECK-NEXT:    vredmax.vs v8, v8, v9
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1159,7 +1159,7 @@ define i64 @vreduce_umin_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.v.i v9, -1
 ; CHECK-NEXT:    vredminu.vs v8, v8, v9
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1175,7 +1175,7 @@ define i64 @vreduce_smin_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    addi a0, zero, -1
+; CHECK-NEXT:    li a0, -1
 ; CHECK-NEXT:    sw a0, 8(sp)
 ; CHECK-NEXT:    lui a0, 524288
 ; CHECK-NEXT:    addi a0, a0, -1
@@ -1185,7 +1185,7 @@ define i64 @vreduce_smin_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vlse64.v v9, (a0), zero
 ; CHECK-NEXT:    vredmin.vs v8, v8, v9
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1204,7 +1204,7 @@ define i64 @vreduce_and_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.v.i v9, -1
 ; CHECK-NEXT:    vredand.vs v8, v8, v9
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1222,7 +1222,7 @@ define i64 @vreduce_or_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vredor.vs v8, v8, v9
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1240,7 +1240,7 @@ define i64 @vreduce_xor_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vmv.v.i v9, 0
 ; CHECK-NEXT:    vredxor.vs v8, v8, v9
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1259,7 +1259,7 @@ define i64 @vreduce_add_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vredsum.vs v8, v8, v10
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1278,7 +1278,7 @@ define i64 @vreduce_umax_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v8, v10
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1303,7 +1303,7 @@ define i64 @vreduce_smax_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vredmax.vs v8, v8, v10
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1323,7 +1323,7 @@ define i64 @vreduce_umin_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vredminu.vs v8, v8, v10
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1339,7 +1339,7 @@ define i64 @vreduce_smin_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    addi a0, zero, -1
+; CHECK-NEXT:    li a0, -1
 ; CHECK-NEXT:    sw a0, 8(sp)
 ; CHECK-NEXT:    lui a0, 524288
 ; CHECK-NEXT:    addi a0, a0, -1
@@ -1350,7 +1350,7 @@ define i64 @vreduce_smin_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vredmin.vs v8, v8, v10
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1370,7 +1370,7 @@ define i64 @vreduce_and_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vredand.vs v8, v8, v10
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1389,7 +1389,7 @@ define i64 @vreduce_or_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vredor.vs v8, v8, v10
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1408,7 +1408,7 @@ define i64 @vreduce_xor_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vredxor.vs v8, v8, v10
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1427,7 +1427,7 @@ define i64 @vreduce_add_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vredsum.vs v8, v8, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1446,7 +1446,7 @@ define i64 @vreduce_umax_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vredmaxu.vs v8, v8, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1471,7 +1471,7 @@ define i64 @vreduce_smax_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vredmax.vs v8, v8, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1491,7 +1491,7 @@ define i64 @vreduce_umin_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vredminu.vs v8, v8, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1507,7 +1507,7 @@ define i64 @vreduce_smin_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    addi a0, zero, -1
+; CHECK-NEXT:    li a0, -1
 ; CHECK-NEXT:    sw a0, 8(sp)
 ; CHECK-NEXT:    lui a0, 524288
 ; CHECK-NEXT:    addi a0, a0, -1
@@ -1518,7 +1518,7 @@ define i64 @vreduce_smin_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vredmin.vs v8, v8, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1538,7 +1538,7 @@ define i64 @vreduce_and_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vredand.vs v8, v8, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1557,7 +1557,7 @@ define i64 @vreduce_or_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vredor.vs v8, v8, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
@@ -1576,7 +1576,7 @@ define i64 @vreduce_xor_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vredxor.vs v8, v8, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll
index fa4d650ab710..478ad4f18563 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll
@@ -36,7 +36,7 @@ declare i8 @llvm.vector.reduce.smax.nxv1i8(<vscale x 1 x i8>)
 define signext i8 @vreduce_smax_nxv1i8(<vscale x 1 x i8> %v) {
 ; CHECK-LABEL: vreduce_smax_nxv1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
@@ -67,7 +67,7 @@ declare i8 @llvm.vector.reduce.smin.nxv1i8(<vscale x 1 x i8>)
 define signext i8 @vreduce_smin_nxv1i8(<vscale x 1 x i8> %v) {
 ; CHECK-LABEL: vreduce_smin_nxv1i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
@@ -158,7 +158,7 @@ declare i8 @llvm.vector.reduce.smax.nxv2i8(<vscale x 2 x i8>)
 define signext i8 @vreduce_smax_nxv2i8(<vscale x 2 x i8> %v) {
 ; CHECK-LABEL: vreduce_smax_nxv2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
@@ -189,7 +189,7 @@ declare i8 @llvm.vector.reduce.smin.nxv2i8(<vscale x 2 x i8>)
 define signext i8 @vreduce_smin_nxv2i8(<vscale x 2 x i8> %v) {
 ; CHECK-LABEL: vreduce_smin_nxv2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
@@ -280,7 +280,7 @@ declare i8 @llvm.vector.reduce.smax.nxv4i8(<vscale x 4 x i8>)
 define signext i8 @vreduce_smax_nxv4i8(<vscale x 4 x i8> %v) {
 ; CHECK-LABEL: vreduce_smax_nxv4i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
@@ -311,7 +311,7 @@ declare i8 @llvm.vector.reduce.smin.nxv4i8(<vscale x 4 x i8>)
 define signext i8 @vreduce_smin_nxv4i8(<vscale x 4 x i8> %v) {
 ; CHECK-LABEL: vreduce_smin_nxv4i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
@@ -1122,7 +1122,7 @@ declare i64 @llvm.vector.reduce.smax.nxv1i64(<vscale x 1 x i64>)
 define i64 @vreduce_smax_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-LABEL: vreduce_smax_nxv1i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -1
+; CHECK-NEXT:    li a0, -1
 ; CHECK-NEXT:    slli a0, a0, 63
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
@@ -1152,7 +1152,7 @@ declare i64 @llvm.vector.reduce.smin.nxv1i64(<vscale x 1 x i64>)
 define i64 @vreduce_smin_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-LABEL: vreduce_smin_nxv1i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -1
+; CHECK-NEXT:    li a0, -1
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
@@ -1240,7 +1240,7 @@ declare i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64>)
 define i64 @vreduce_smax_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: vreduce_smax_nxv2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -1
+; CHECK-NEXT:    li a0, -1
 ; CHECK-NEXT:    slli a0, a0, 63
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v10, a0
@@ -1272,7 +1272,7 @@ declare i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64>)
 define i64 @vreduce_smin_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-LABEL: vreduce_smin_nxv2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -1
+; CHECK-NEXT:    li a0, -1
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v10, a0
@@ -1364,7 +1364,7 @@ declare i64 @llvm.vector.reduce.smax.nxv4i64(<vscale x 4 x i64>)
 define i64 @vreduce_smax_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-LABEL: vreduce_smax_nxv4i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -1
+; CHECK-NEXT:    li a0, -1
 ; CHECK-NEXT:    slli a0, a0, 63
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v12, a0
@@ -1396,7 +1396,7 @@ declare i64 @llvm.vector.reduce.smin.nxv4i64(<vscale x 4 x i64>)
 define i64 @vreduce_smin_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-LABEL: vreduce_smin_nxv4i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, -1
+; CHECK-NEXT:    li a0, -1
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v12, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
index c67ca4e20879..67d2325ff91b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
@@ -1261,7 +1261,7 @@ define signext i64 @vpreduce_add_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredsum.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1295,7 +1295,7 @@ define signext i64 @vpreduce_umax_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredmaxu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1329,7 +1329,7 @@ define signext i64 @vpreduce_smax_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredmax.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1363,7 +1363,7 @@ define signext i64 @vpreduce_umin_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredminu.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1397,7 +1397,7 @@ define signext i64 @vpreduce_smin_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredmin.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1431,7 +1431,7 @@ define signext i64 @vpreduce_and_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredand.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1465,7 +1465,7 @@ define signext i64 @vpreduce_or_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v, <
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredor.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1499,7 +1499,7 @@ define signext i64 @vpreduce_xor_nxv1i64(i64 signext %s, <vscale x 1 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
 ; RV32-NEXT:    vredxor.vs v9, v8, v9, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v9
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v9, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1533,7 +1533,7 @@ define signext i64 @vpreduce_add_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredsum.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1567,7 +1567,7 @@ define signext i64 @vpreduce_umax_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredmaxu.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1601,7 +1601,7 @@ define signext i64 @vpreduce_smax_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredmax.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1635,7 +1635,7 @@ define signext i64 @vpreduce_umin_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredminu.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1669,7 +1669,7 @@ define signext i64 @vpreduce_smin_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredmin.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1703,7 +1703,7 @@ define signext i64 @vpreduce_and_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredand.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1737,7 +1737,7 @@ define signext i64 @vpreduce_or_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v, <
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredor.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1771,7 +1771,7 @@ define signext i64 @vpreduce_xor_nxv2i64(i64 signext %s, <vscale x 2 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
 ; RV32-NEXT:    vredxor.vs v10, v8, v10, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v10
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v10, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1805,7 +1805,7 @@ define signext i64 @vpreduce_add_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
 ; RV32-NEXT:    vredsum.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1839,7 +1839,7 @@ define signext i64 @vpreduce_umax_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
 ; RV32-NEXT:    vredmaxu.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1873,7 +1873,7 @@ define signext i64 @vpreduce_smax_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
 ; RV32-NEXT:    vredmax.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1907,7 +1907,7 @@ define signext i64 @vpreduce_umin_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
 ; RV32-NEXT:    vredminu.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1941,7 +1941,7 @@ define signext i64 @vpreduce_smin_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
 ; RV32-NEXT:    vredmin.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -1975,7 +1975,7 @@ define signext i64 @vpreduce_and_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
 ; RV32-NEXT:    vredand.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -2009,7 +2009,7 @@ define signext i64 @vpreduce_or_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v, <
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
 ; RV32-NEXT:    vredor.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8
@@ -2043,7 +2043,7 @@ define signext i64 @vpreduce_xor_nxv4i64(i64 signext %s, <vscale x 4 x i64> %v,
 ; RV32-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
 ; RV32-NEXT:    vredxor.vs v12, v8, v12, v0.t
 ; RV32-NEXT:    vmv.x.s a0, v12
-; RV32-NEXT:    addi a1, zero, 32
+; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV32-NEXT:    vsrl.vx v8, v12, a1
 ; RV32-NEXT:    vmv.x.s a1, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
index 645ea2c480a3..e77aeed9b77d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
@@ -27,14 +27,14 @@ define <vscale x 1 x i8> @vrem_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
 define <vscale x 1 x i8> @vrem_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vrem_vi_nxv1i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmulh.vx v9, v8, a0
 ; CHECK-NEXT:    vsub.vv v9, v9, v8
 ; CHECK-NEXT:    vsra.vi v9, v9, 2
 ; CHECK-NEXT:    vsrl.vi v10, v9, 7
 ; CHECK-NEXT:    vadd.vv v9, v9, v10
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i8> undef, i8 -7, i32 0
@@ -68,14 +68,14 @@ define <vscale x 2 x i8> @vrem_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
 define <vscale x 2 x i8> @vrem_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vrem_vi_nxv2i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmulh.vx v9, v8, a0
 ; CHECK-NEXT:    vsub.vv v9, v9, v8
 ; CHECK-NEXT:    vsra.vi v9, v9, 2
 ; CHECK-NEXT:    vsrl.vi v10, v9, 7
 ; CHECK-NEXT:    vadd.vv v9, v9, v10
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i8> undef, i8 -7, i32 0
@@ -109,14 +109,14 @@ define <vscale x 4 x i8> @vrem_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
 define <vscale x 4 x i8> @vrem_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vrem_vi_nxv4i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmulh.vx v9, v8, a0
 ; CHECK-NEXT:    vsub.vv v9, v9, v8
 ; CHECK-NEXT:    vsra.vi v9, v9, 2
 ; CHECK-NEXT:    vsrl.vi v10, v9, 7
 ; CHECK-NEXT:    vadd.vv v9, v9, v10
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i8> undef, i8 -7, i32 0
@@ -150,14 +150,14 @@ define <vscale x 8 x i8> @vrem_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
 define <vscale x 8 x i8> @vrem_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vrem_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmulh.vx v9, v8, a0
 ; CHECK-NEXT:    vsub.vv v9, v9, v8
 ; CHECK-NEXT:    vsra.vi v9, v9, 2
 ; CHECK-NEXT:    vsrl.vi v10, v9, 7
 ; CHECK-NEXT:    vadd.vv v9, v9, v10
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i8> undef, i8 -7, i32 0
@@ -191,14 +191,14 @@ define <vscale x 16 x i8> @vrem_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b
 define <vscale x 16 x i8> @vrem_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vrem_vi_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmulh.vx v10, v8, a0
 ; CHECK-NEXT:    vsub.vv v10, v10, v8
 ; CHECK-NEXT:    vsra.vi v10, v10, 2
 ; CHECK-NEXT:    vsrl.vi v12, v10, 7
 ; CHECK-NEXT:    vadd.vv v10, v10, v12
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i8> undef, i8 -7, i32 0
@@ -232,14 +232,14 @@ define <vscale x 32 x i8> @vrem_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b
 define <vscale x 32 x i8> @vrem_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vrem_vi_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmulh.vx v12, v8, a0
 ; CHECK-NEXT:    vsub.vv v12, v12, v8
 ; CHECK-NEXT:    vsra.vi v12, v12, 2
 ; CHECK-NEXT:    vsrl.vi v16, v12, 7
 ; CHECK-NEXT:    vadd.vv v12, v12, v16
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 32 x i8> undef, i8 -7, i32 0
@@ -273,14 +273,14 @@ define <vscale x 64 x i8> @vrem_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b
 define <vscale x 64 x i8> @vrem_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vrem_vi_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 109
+; CHECK-NEXT:    li a0, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
 ; CHECK-NEXT:    vsub.vv v16, v16, v8
 ; CHECK-NEXT:    vsra.vi v16, v16, 2
 ; CHECK-NEXT:    vsrl.vi v24, v16, 7
 ; CHECK-NEXT:    vadd.vv v16, v16, v24
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v16
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 64 x i8> undef, i8 -7, i32 0
@@ -321,7 +321,7 @@ define <vscale x 1 x i16> @vrem_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; RV32-NEXT:    vsra.vi v9, v9, 1
 ; RV32-NEXT:    vsrl.vi v10, v9, 15
 ; RV32-NEXT:    vadd.vv v9, v9, v10
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-NEXT:    ret
 ;
@@ -334,7 +334,7 @@ define <vscale x 1 x i16> @vrem_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; RV64-NEXT:    vsra.vi v9, v9, 1
 ; RV64-NEXT:    vsrl.vi v10, v9, 15
 ; RV64-NEXT:    vadd.vv v9, v9, v10
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 1 x i16> undef, i16 -7, i32 0
@@ -375,7 +375,7 @@ define <vscale x 2 x i16> @vrem_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; RV32-NEXT:    vsra.vi v9, v9, 1
 ; RV32-NEXT:    vsrl.vi v10, v9, 15
 ; RV32-NEXT:    vadd.vv v9, v9, v10
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-NEXT:    ret
 ;
@@ -388,7 +388,7 @@ define <vscale x 2 x i16> @vrem_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; RV64-NEXT:    vsra.vi v9, v9, 1
 ; RV64-NEXT:    vsrl.vi v10, v9, 15
 ; RV64-NEXT:    vadd.vv v9, v9, v10
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 2 x i16> undef, i16 -7, i32 0
@@ -429,7 +429,7 @@ define <vscale x 4 x i16> @vrem_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; RV32-NEXT:    vsra.vi v9, v9, 1
 ; RV32-NEXT:    vsrl.vi v10, v9, 15
 ; RV32-NEXT:    vadd.vv v9, v9, v10
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-NEXT:    ret
 ;
@@ -442,7 +442,7 @@ define <vscale x 4 x i16> @vrem_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; RV64-NEXT:    vsra.vi v9, v9, 1
 ; RV64-NEXT:    vsrl.vi v10, v9, 15
 ; RV64-NEXT:    vadd.vv v9, v9, v10
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 4 x i16> undef, i16 -7, i32 0
@@ -483,7 +483,7 @@ define <vscale x 8 x i16> @vrem_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; RV32-NEXT:    vsra.vi v10, v10, 1
 ; RV32-NEXT:    vsrl.vi v12, v10, 15
 ; RV32-NEXT:    vadd.vv v10, v10, v12
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v10
 ; RV32-NEXT:    ret
 ;
@@ -496,7 +496,7 @@ define <vscale x 8 x i16> @vrem_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; RV64-NEXT:    vsra.vi v10, v10, 1
 ; RV64-NEXT:    vsrl.vi v12, v10, 15
 ; RV64-NEXT:    vadd.vv v10, v10, v12
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v10
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 8 x i16> undef, i16 -7, i32 0
@@ -537,7 +537,7 @@ define <vscale x 16 x i16> @vrem_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; RV32-NEXT:    vsra.vi v12, v12, 1
 ; RV32-NEXT:    vsrl.vi v16, v12, 15
 ; RV32-NEXT:    vadd.vv v12, v12, v16
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v12
 ; RV32-NEXT:    ret
 ;
@@ -550,7 +550,7 @@ define <vscale x 16 x i16> @vrem_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; RV64-NEXT:    vsra.vi v12, v12, 1
 ; RV64-NEXT:    vsrl.vi v16, v12, 15
 ; RV64-NEXT:    vadd.vv v12, v12, v16
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v12
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 16 x i16> undef, i16 -7, i32 0
@@ -591,7 +591,7 @@ define <vscale x 32 x i16> @vrem_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; RV32-NEXT:    vsra.vi v16, v16, 1
 ; RV32-NEXT:    vsrl.vi v24, v16, 15
 ; RV32-NEXT:    vadd.vv v16, v16, v24
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v16
 ; RV32-NEXT:    ret
 ;
@@ -604,7 +604,7 @@ define <vscale x 32 x i16> @vrem_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; RV64-NEXT:    vsra.vi v16, v16, 1
 ; RV64-NEXT:    vsrl.vi v24, v16, 15
 ; RV64-NEXT:    vadd.vv v16, v16, v24
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v16
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 32 x i16> undef, i16 -7, i32 0
@@ -646,7 +646,7 @@ define <vscale x 1 x i32> @vrem_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; RV32-NEXT:    vsrl.vi v10, v9, 31
 ; RV32-NEXT:    vsra.vi v9, v9, 2
 ; RV32-NEXT:    vadd.vv v9, v9, v10
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-NEXT:    ret
 ;
@@ -660,7 +660,7 @@ define <vscale x 1 x i32> @vrem_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; RV64-NEXT:    vsra.vi v9, v9, 2
 ; RV64-NEXT:    vsrl.vi v10, v9, 31
 ; RV64-NEXT:    vadd.vv v9, v9, v10
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
@@ -702,7 +702,7 @@ define <vscale x 2 x i32> @vrem_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; RV32-NEXT:    vsrl.vi v10, v9, 31
 ; RV32-NEXT:    vsra.vi v9, v9, 2
 ; RV32-NEXT:    vadd.vv v9, v9, v10
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-NEXT:    ret
 ;
@@ -716,7 +716,7 @@ define <vscale x 2 x i32> @vrem_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; RV64-NEXT:    vsra.vi v9, v9, 2
 ; RV64-NEXT:    vsrl.vi v10, v9, 31
 ; RV64-NEXT:    vadd.vv v9, v9, v10
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
@@ -758,7 +758,7 @@ define <vscale x 4 x i32> @vrem_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; RV32-NEXT:    vsrl.vi v12, v10, 31
 ; RV32-NEXT:    vsra.vi v10, v10, 2
 ; RV32-NEXT:    vadd.vv v10, v10, v12
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v10
 ; RV32-NEXT:    ret
 ;
@@ -772,7 +772,7 @@ define <vscale x 4 x i32> @vrem_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; RV64-NEXT:    vsra.vi v10, v10, 2
 ; RV64-NEXT:    vsrl.vi v12, v10, 31
 ; RV64-NEXT:    vadd.vv v10, v10, v12
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v10
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
@@ -814,7 +814,7 @@ define <vscale x 8 x i32> @vrem_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; RV32-NEXT:    vsrl.vi v16, v12, 31
 ; RV32-NEXT:    vsra.vi v12, v12, 2
 ; RV32-NEXT:    vadd.vv v12, v12, v16
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v12
 ; RV32-NEXT:    ret
 ;
@@ -828,7 +828,7 @@ define <vscale x 8 x i32> @vrem_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; RV64-NEXT:    vsra.vi v12, v12, 2
 ; RV64-NEXT:    vsrl.vi v16, v12, 31
 ; RV64-NEXT:    vadd.vv v12, v12, v16
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v12
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
@@ -870,7 +870,7 @@ define <vscale x 16 x i32> @vrem_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; RV32-NEXT:    vsrl.vi v24, v16, 31
 ; RV32-NEXT:    vsra.vi v16, v16, 2
 ; RV32-NEXT:    vadd.vv v16, v16, v24
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v16
 ; RV32-NEXT:    ret
 ;
@@ -884,7 +884,7 @@ define <vscale x 16 x i32> @vrem_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; RV64-NEXT:    vsra.vi v16, v16, 2
 ; RV64-NEXT:    vsrl.vi v24, v16, 31
 ; RV64-NEXT:    vadd.vv v16, v16, v24
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v16
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 16 x i32> undef, i32 -7, i32 0
@@ -943,11 +943,11 @@ define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vmulh.vv v9, v8, v9
-; RV32-NEXT:    addi a0, zero, 63
+; RV32-NEXT:    li a0, 63
 ; RV32-NEXT:    vsrl.vx v10, v9, a0
 ; RV32-NEXT:    vsra.vi v9, v9, 1
 ; RV32-NEXT:    vadd.vv v9, v9, v10
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -964,11 +964,11 @@ define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV64-NEXT:    addi a0, a0, 1755
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmulh.vx v9, v8, a0
-; RV64-NEXT:    addi a0, zero, 63
+; RV64-NEXT:    li a0, 63
 ; RV64-NEXT:    vsrl.vx v10, v9, a0
 ; RV64-NEXT:    vsra.vi v9, v9, 1
 ; RV64-NEXT:    vadd.vv v9, v9, v10
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
@@ -1027,11 +1027,11 @@ define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vmulh.vv v10, v8, v10
-; RV32-NEXT:    addi a0, zero, 63
+; RV32-NEXT:    li a0, 63
 ; RV32-NEXT:    vsrl.vx v12, v10, a0
 ; RV32-NEXT:    vsra.vi v10, v10, 1
 ; RV32-NEXT:    vadd.vv v10, v10, v12
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v10
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -1048,11 +1048,11 @@ define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV64-NEXT:    addi a0, a0, 1755
 ; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV64-NEXT:    vmulh.vx v10, v8, a0
-; RV64-NEXT:    addi a0, zero, 63
+; RV64-NEXT:    li a0, 63
 ; RV64-NEXT:    vsrl.vx v12, v10, a0
 ; RV64-NEXT:    vsra.vi v10, v10, 1
 ; RV64-NEXT:    vadd.vv v10, v10, v12
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v10
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
@@ -1111,11 +1111,11 @@ define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vmulh.vv v12, v8, v12
-; RV32-NEXT:    addi a0, zero, 63
+; RV32-NEXT:    li a0, 63
 ; RV32-NEXT:    vsrl.vx v16, v12, a0
 ; RV32-NEXT:    vsra.vi v12, v12, 1
 ; RV32-NEXT:    vadd.vv v12, v12, v16
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v12
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -1132,11 +1132,11 @@ define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV64-NEXT:    addi a0, a0, 1755
 ; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV64-NEXT:    vmulh.vx v12, v8, a0
-; RV64-NEXT:    addi a0, zero, 63
+; RV64-NEXT:    li a0, 63
 ; RV64-NEXT:    vsrl.vx v16, v12, a0
 ; RV64-NEXT:    vsra.vi v12, v12, 1
 ; RV64-NEXT:    vadd.vv v12, v12, v16
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v12
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
@@ -1195,11 +1195,11 @@ define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmulh.vv v16, v8, v16
-; RV32-NEXT:    addi a0, zero, 63
+; RV32-NEXT:    li a0, 63
 ; RV32-NEXT:    vsrl.vx v24, v16, a0
 ; RV32-NEXT:    vsra.vi v16, v16, 1
 ; RV32-NEXT:    vadd.vv v16, v16, v24
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v16
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -1216,11 +1216,11 @@ define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV64-NEXT:    addi a0, a0, 1755
 ; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vmulh.vx v16, v8, a0
-; RV64-NEXT:    addi a0, zero, 63
+; RV64-NEXT:    li a0, 63
 ; RV64-NEXT:    vsrl.vx v24, v16, a0
 ; RV64-NEXT:    vsra.vi v16, v16, 1
 ; RV64-NEXT:    vadd.vv v16, v16, v24
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v16
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll
index 81d81fc74580..792ed45d6ab6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll
@@ -27,11 +27,11 @@ define <vscale x 1 x i8> @vremu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b)
 define <vscale x 1 x i8> @vremu_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vremu_vi_nxv1i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v9, v8, a0
 ; CHECK-NEXT:    vsrl.vi v9, v9, 5
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i8> undef, i8 -7, i32 0
@@ -65,11 +65,11 @@ define <vscale x 2 x i8> @vremu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b)
 define <vscale x 2 x i8> @vremu_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vremu_vi_nxv2i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v9, v8, a0
 ; CHECK-NEXT:    vsrl.vi v9, v9, 5
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i8> undef, i8 -7, i32 0
@@ -103,11 +103,11 @@ define <vscale x 4 x i8> @vremu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b)
 define <vscale x 4 x i8> @vremu_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vremu_vi_nxv4i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v9, v8, a0
 ; CHECK-NEXT:    vsrl.vi v9, v9, 5
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i8> undef, i8 -7, i32 0
@@ -141,11 +141,11 @@ define <vscale x 8 x i8> @vremu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b)
 define <vscale x 8 x i8> @vremu_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vremu_vi_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v9, v8, a0
 ; CHECK-NEXT:    vsrl.vi v9, v9, 5
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v9
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i8> undef, i8 -7, i32 0
@@ -179,11 +179,11 @@ define <vscale x 16 x i8> @vremu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %
 define <vscale x 16 x i8> @vremu_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vremu_vi_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v10, v8, a0
 ; CHECK-NEXT:    vsrl.vi v10, v10, 5
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v10
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i8> undef, i8 -7, i32 0
@@ -217,11 +217,11 @@ define <vscale x 32 x i8> @vremu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %
 define <vscale x 32 x i8> @vremu_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vremu_vi_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v12, v8, a0
 ; CHECK-NEXT:    vsrl.vi v12, v12, 5
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v12
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 32 x i8> undef, i8 -7, i32 0
@@ -255,11 +255,11 @@ define <vscale x 64 x i8> @vremu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %
 define <vscale x 64 x i8> @vremu_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vremu_vi_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 33
+; CHECK-NEXT:    li a0, 33
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vmulhu.vx v16, v8, a0
 ; CHECK-NEXT:    vsrl.vi v16, v16, 5
-; CHECK-NEXT:    addi a0, zero, -7
+; CHECK-NEXT:    li a0, -7
 ; CHECK-NEXT:    vnmsac.vx v8, a0, v16
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 64 x i8> undef, i8 -7, i32 0
@@ -298,7 +298,7 @@ define <vscale x 1 x i16> @vremu_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; RV32-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV32-NEXT:    vmulhu.vx v9, v8, a0
 ; RV32-NEXT:    vsrl.vi v9, v9, 13
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-NEXT:    ret
 ;
@@ -309,7 +309,7 @@ define <vscale x 1 x i16> @vremu_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; RV64-NEXT:    vmulhu.vx v9, v8, a0
 ; RV64-NEXT:    vsrl.vi v9, v9, 13
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 1 x i16> undef, i16 -7, i32 0
@@ -348,7 +348,7 @@ define <vscale x 2 x i16> @vremu_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; RV32-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; RV32-NEXT:    vmulhu.vx v9, v8, a0
 ; RV32-NEXT:    vsrl.vi v9, v9, 13
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-NEXT:    ret
 ;
@@ -359,7 +359,7 @@ define <vscale x 2 x i16> @vremu_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; RV64-NEXT:    vmulhu.vx v9, v8, a0
 ; RV64-NEXT:    vsrl.vi v9, v9, 13
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 2 x i16> undef, i16 -7, i32 0
@@ -398,7 +398,7 @@ define <vscale x 4 x i16> @vremu_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; RV32-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; RV32-NEXT:    vmulhu.vx v9, v8, a0
 ; RV32-NEXT:    vsrl.vi v9, v9, 13
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-NEXT:    ret
 ;
@@ -409,7 +409,7 @@ define <vscale x 4 x i16> @vremu_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; RV64-NEXT:    vmulhu.vx v9, v8, a0
 ; RV64-NEXT:    vsrl.vi v9, v9, 13
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 4 x i16> undef, i16 -7, i32 0
@@ -448,7 +448,7 @@ define <vscale x 8 x i16> @vremu_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; RV32-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; RV32-NEXT:    vmulhu.vx v10, v8, a0
 ; RV32-NEXT:    vsrl.vi v10, v10, 13
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v10
 ; RV32-NEXT:    ret
 ;
@@ -459,7 +459,7 @@ define <vscale x 8 x i16> @vremu_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; RV64-NEXT:    vmulhu.vx v10, v8, a0
 ; RV64-NEXT:    vsrl.vi v10, v10, 13
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v10
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 8 x i16> undef, i16 -7, i32 0
@@ -498,7 +498,7 @@ define <vscale x 16 x i16> @vremu_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; RV32-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; RV32-NEXT:    vmulhu.vx v12, v8, a0
 ; RV32-NEXT:    vsrl.vi v12, v12, 13
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v12
 ; RV32-NEXT:    ret
 ;
@@ -509,7 +509,7 @@ define <vscale x 16 x i16> @vremu_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; RV64-NEXT:    vmulhu.vx v12, v8, a0
 ; RV64-NEXT:    vsrl.vi v12, v12, 13
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v12
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 16 x i16> undef, i16 -7, i32 0
@@ -548,7 +548,7 @@ define <vscale x 32 x i16> @vremu_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; RV32-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; RV32-NEXT:    vmulhu.vx v16, v8, a0
 ; RV32-NEXT:    vsrl.vi v16, v16, 13
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v16
 ; RV32-NEXT:    ret
 ;
@@ -559,7 +559,7 @@ define <vscale x 32 x i16> @vremu_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; RV64-NEXT:    vmulhu.vx v16, v8, a0
 ; RV64-NEXT:    vsrl.vi v16, v16, 13
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v16
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 32 x i16> undef, i16 -7, i32 0
@@ -598,7 +598,7 @@ define <vscale x 1 x i32> @vremu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; RV32-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; RV32-NEXT:    vmulhu.vx v9, v8, a0
 ; RV32-NEXT:    vsrl.vi v9, v9, 29
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-NEXT:    ret
 ;
@@ -609,7 +609,7 @@ define <vscale x 1 x i32> @vremu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; RV64-NEXT:    vmulhu.vx v9, v8, a0
 ; RV64-NEXT:    vsrl.vi v9, v9, 29
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
@@ -648,7 +648,7 @@ define <vscale x 2 x i32> @vremu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; RV32-NEXT:    vmulhu.vx v9, v8, a0
 ; RV32-NEXT:    vsrl.vi v9, v9, 29
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-NEXT:    ret
 ;
@@ -659,7 +659,7 @@ define <vscale x 2 x i32> @vremu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; RV64-NEXT:    vmulhu.vx v9, v8, a0
 ; RV64-NEXT:    vsrl.vi v9, v9, 29
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
@@ -698,7 +698,7 @@ define <vscale x 4 x i32> @vremu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; RV32-NEXT:    vmulhu.vx v10, v8, a0
 ; RV32-NEXT:    vsrl.vi v10, v10, 29
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v10
 ; RV32-NEXT:    ret
 ;
@@ -709,7 +709,7 @@ define <vscale x 4 x i32> @vremu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; RV64-NEXT:    vmulhu.vx v10, v8, a0
 ; RV64-NEXT:    vsrl.vi v10, v10, 29
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v10
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
@@ -748,7 +748,7 @@ define <vscale x 8 x i32> @vremu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; RV32-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; RV32-NEXT:    vmulhu.vx v12, v8, a0
 ; RV32-NEXT:    vsrl.vi v12, v12, 29
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v12
 ; RV32-NEXT:    ret
 ;
@@ -759,7 +759,7 @@ define <vscale x 8 x i32> @vremu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; RV64-NEXT:    vmulhu.vx v12, v8, a0
 ; RV64-NEXT:    vsrl.vi v12, v12, 29
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v12
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
@@ -798,7 +798,7 @@ define <vscale x 16 x i32> @vremu_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; RV32-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; RV32-NEXT:    vmulhu.vx v16, v8, a0
 ; RV32-NEXT:    vsrl.vi v16, v16, 29
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v16
 ; RV32-NEXT:    ret
 ;
@@ -809,7 +809,7 @@ define <vscale x 16 x i32> @vremu_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; RV64-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; RV64-NEXT:    vmulhu.vx v16, v8, a0
 ; RV64-NEXT:    vsrl.vi v16, v16, 29
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v16
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 16 x i32> undef, i32 -7, i32 0
@@ -860,29 +860,29 @@ define <vscale x 1 x i64> @vremu_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    lui a0, 131072
 ; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v9, (a0), zero
 ; RV32-NEXT:    vmulhu.vv v9, v8, v9
-; RV32-NEXT:    addi a0, zero, 61
+; RV32-NEXT:    li a0, 61
 ; RV32-NEXT:    vsrl.vx v9, v9, a0
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v9
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vi_nxv1i64_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 61
 ; RV64-NEXT:    addi a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; RV64-NEXT:    vmulhu.vx v9, v8, a0
-; RV64-NEXT:    addi a0, zero, 61
+; RV64-NEXT:    li a0, 61
 ; RV64-NEXT:    vsrl.vx v9, v9, a0
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v9
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
@@ -908,7 +908,7 @@ define <vscale x 1 x i64> @vremu_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
 define <vscale x 1 x i64> @vremu_vi_nxv1i64_2(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
 ; CHECK-LABEL: vremu_vi_nxv1i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v10, a0
 ; CHECK-NEXT:    vsll.vv v9, v10, v9
@@ -964,29 +964,29 @@ define <vscale x 2 x i64> @vremu_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    lui a0, 131072
 ; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
 ; RV32-NEXT:    vmulhu.vv v10, v8, v10
-; RV32-NEXT:    addi a0, zero, 61
+; RV32-NEXT:    li a0, 61
 ; RV32-NEXT:    vsrl.vx v10, v10, a0
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v10
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vi_nxv2i64_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 61
 ; RV64-NEXT:    addi a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; RV64-NEXT:    vmulhu.vx v10, v8, a0
-; RV64-NEXT:    addi a0, zero, 61
+; RV64-NEXT:    li a0, 61
 ; RV64-NEXT:    vsrl.vx v10, v10, a0
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v10
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
@@ -1012,7 +1012,7 @@ define <vscale x 2 x i64> @vremu_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
 define <vscale x 2 x i64> @vremu_vi_nxv2i64_2(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
 ; CHECK-LABEL: vremu_vi_nxv2i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vmv.v.x v12, a0
 ; CHECK-NEXT:    vsll.vv v10, v12, v10
@@ -1068,29 +1068,29 @@ define <vscale x 4 x i64> @vremu_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    lui a0, 131072
 ; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v12, (a0), zero
 ; RV32-NEXT:    vmulhu.vv v12, v8, v12
-; RV32-NEXT:    addi a0, zero, 61
+; RV32-NEXT:    li a0, 61
 ; RV32-NEXT:    vsrl.vx v12, v12, a0
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v12
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vi_nxv4i64_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 61
 ; RV64-NEXT:    addi a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; RV64-NEXT:    vmulhu.vx v12, v8, a0
-; RV64-NEXT:    addi a0, zero, 61
+; RV64-NEXT:    li a0, 61
 ; RV64-NEXT:    vsrl.vx v12, v12, a0
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v12
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
@@ -1116,7 +1116,7 @@ define <vscale x 4 x i64> @vremu_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
 define <vscale x 4 x i64> @vremu_vi_nxv4i64_2(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
 ; CHECK-LABEL: vremu_vi_nxv4i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vmv.v.x v16, a0
 ; CHECK-NEXT:    vsll.vv v12, v16, v12
@@ -1172,29 +1172,29 @@ define <vscale x 8 x i64> @vremu_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    lui a0, 131072
 ; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    sw a0, 8(sp)
 ; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vlse64.v v16, (a0), zero
 ; RV32-NEXT:    vmulhu.vv v16, v8, v16
-; RV32-NEXT:    addi a0, zero, 61
+; RV32-NEXT:    li a0, 61
 ; RV32-NEXT:    vsrl.vx v16, v16, a0
-; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    li a0, -7
 ; RV32-NEXT:    vnmsac.vx v8, a0, v16
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vremu_vi_nxv8i64_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    slli a0, a0, 61
 ; RV64-NEXT:    addi a0, a0, 1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vmulhu.vx v16, v8, a0
-; RV64-NEXT:    addi a0, zero, 61
+; RV64-NEXT:    li a0, 61
 ; RV64-NEXT:    vsrl.vx v16, v16, a0
-; RV64-NEXT:    addi a0, zero, -7
+; RV64-NEXT:    li a0, -7
 ; RV64-NEXT:    vnmsac.vx v8, a0, v16
 ; RV64-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
@@ -1220,7 +1220,7 @@ define <vscale x 8 x i64> @vremu_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 define <vscale x 8 x i64> @vremu_vi_nxv8i64_2(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
 ; CHECK-LABEL: vremu_vi_nxv8i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv.v.x v24, a0
 ; CHECK-NEXT:    vsll.vv v16, v24, v16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
index cd1a4379b38b..dededca7bb72 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
@@ -9,7 +9,7 @@ declare <vscale x 8 x i7> @llvm.vp.urem.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x
 define <vscale x 8 x i7> @vremu_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
 ; CHECK-LABEL: vremu_vx_nxv8i7:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 127
+; CHECK-NEXT:    li a2, 127
 ; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a2
 ; CHECK-NEXT:    vmv.v.x v9, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index 4a6b64ac4879..44c50a7ff607 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -101,7 +101,7 @@ define void @test6(i32* nocapture readonly %A, i32* nocapture %B, i64 %n) {
 ; CHECK-NEXT:    vsetvli a6, a2, e32, m1, ta, mu
 ; CHECK-NEXT:    beqz a6, .LBB5_3
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
-; CHECK-NEXT:    mv a4, zero
+; CHECK-NEXT:    li a4, 0
 ; CHECK-NEXT:  .LBB5_2: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    slli a5, a4, 2

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll
index a1b31c90a467..096944c6a219 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll
@@ -461,7 +461,7 @@ define <vscale x 1 x i64> @vshl_vx_nxv1i64_0(<vscale x 1 x i64> %va) {
 define <vscale x 1 x i64> @vshl_vx_nxv1i64_1(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vshl_vx_nxv1i64_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -510,7 +510,7 @@ define <vscale x 2 x i64> @vshl_vx_nxv2i64_0(<vscale x 2 x i64> %va) {
 define <vscale x 2 x i64> @vshl_vx_nxv2i64_1(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vshl_vx_nxv2i64_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -559,7 +559,7 @@ define <vscale x 4 x i64> @vshl_vx_nxv4i64_0(<vscale x 4 x i64> %va) {
 define <vscale x 4 x i64> @vshl_vx_nxv4i64_1(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vshl_vx_nxv4i64_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -608,7 +608,7 @@ define <vscale x 8 x i64> @vshl_vx_nxv8i64_0(<vscale x 8 x i64> %va) {
 define <vscale x 8 x i64> @vshl_vx_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vshl_vx_nxv8i64_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vsll.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll
index 8dabc1395187..cdbeab6a1f70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll
@@ -11,7 +11,7 @@ define <vscale x 8 x i7> @vsll_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vmv.v.x v9, a0
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vsll.vv v8, v8, v9, v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
index 3754d329cc32..724ffb2f65f2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
@@ -41,14 +41,14 @@ define <vscale x 8 x i64> @vsplat_nxv8i64_2() {
 define <vscale x 8 x i64> @vsplat_nxv8i64_3() {
 ; RV32V-LABEL: vsplat_nxv8i64_3:
 ; RV32V:       # %bb.0:
-; RV32V-NEXT:    addi a0, zero, 255
+; RV32V-NEXT:    li a0, 255
 ; RV32V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32V-NEXT:    vmv.v.x v8, a0
 ; RV32V-NEXT:    ret
 ;
 ; RV64V-LABEL: vsplat_nxv8i64_3:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    addi a0, zero, 255
+; RV64V-NEXT:    li a0, 255
 ; RV64V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV64V-NEXT:    vmv.v.x v8, a0
 ; RV64V-NEXT:    ret
@@ -74,7 +74,7 @@ define <vscale x 8 x i64> @vsplat_nxv8i64_4() {
 ;
 ; RV64V-LABEL: vsplat_nxv8i64_4:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    addi a0, zero, 251
+; RV64V-NEXT:    li a0, 251
 ; RV64V-NEXT:    slli a0, a0, 24
 ; RV64V-NEXT:    addi a0, a0, -1281
 ; RV64V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
@@ -147,14 +147,14 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_7(<vscale x 8 x i64> %v) {
 define <vscale x 8 x i64> @vadd_vx_nxv8i64_8(<vscale x 8 x i64> %v) {
 ; RV32V-LABEL: vadd_vx_nxv8i64_8:
 ; RV32V:       # %bb.0:
-; RV32V-NEXT:    addi a0, zero, 255
+; RV32V-NEXT:    li a0, 255
 ; RV32V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32V-NEXT:    vadd.vx v8, v8, a0
 ; RV32V-NEXT:    ret
 ;
 ; RV64V-LABEL: vadd_vx_nxv8i64_8:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    addi a0, zero, 255
+; RV64V-NEXT:    li a0, 255
 ; RV64V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV64V-NEXT:    vadd.vx v8, v8, a0
 ; RV64V-NEXT:    ret
@@ -204,7 +204,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_10(<vscale x 8 x i64> %v) {
 ;
 ; RV64V-LABEL: vadd_vx_nxv8i64_10:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    addi a0, zero, 251
+; RV64V-NEXT:    li a0, 251
 ; RV64V-NEXT:    slli a0, a0, 24
 ; RV64V-NEXT:    addi a0, a0, -1281
 ; RV64V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
@@ -221,7 +221,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_11(<vscale x 8 x i64> %v) {
 ; RV32V:       # %bb.0:
 ; RV32V-NEXT:    addi sp, sp, -16
 ; RV32V-NEXT:    .cfi_def_cfa_offset 16
-; RV32V-NEXT:    addi a0, zero, 1
+; RV32V-NEXT:    li a0, 1
 ; RV32V-NEXT:    sw a0, 12(sp)
 ; RV32V-NEXT:    lui a0, 1028096
 ; RV32V-NEXT:    addi a0, a0, -1281
@@ -235,7 +235,7 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_11(<vscale x 8 x i64> %v) {
 ;
 ; RV64V-LABEL: vadd_vx_nxv8i64_11:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    addi a0, zero, 507
+; RV64V-NEXT:    li a0, 507
 ; RV64V-NEXT:    slli a0, a0, 24
 ; RV64V-NEXT:    addi a0, a0, -1281
 ; RV64V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll
index 3635e00f4d0f..3c6cf9e089c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll
@@ -651,7 +651,7 @@ define <vscale x 1 x i64> @vsra_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 define <vscale x 1 x i64> @vsra_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vsra_vi_nxv1i64_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -698,7 +698,7 @@ define <vscale x 2 x i64> @vsra_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 define <vscale x 2 x i64> @vsra_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vsra_vi_nxv2i64_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -745,7 +745,7 @@ define <vscale x 4 x i64> @vsra_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 define <vscale x 4 x i64> @vsra_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vsra_vi_nxv4i64_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -792,7 +792,7 @@ define <vscale x 8 x i64> @vsra_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 define <vscale x 8 x i64> @vsra_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vsra_vi_nxv8i64_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vsra.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll
index 32a351f3a766..676c1631c2ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll
@@ -13,7 +13,7 @@ define <vscale x 8 x i7> @vsra_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <v
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vsra.vi v8, v8, 1
 ; CHECK-NEXT:    vmv.v.x v9, a0
-; CHECK-NEXT:    addi a0, zero, 127
+; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vand.vx v9, v9, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll
index ca54bd22c220..2bf34ea07b98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll
@@ -461,7 +461,7 @@ define <vscale x 1 x i64> @vsrl_vx_nxv1i64_0(<vscale x 1 x i64> %va) {
 define <vscale x 1 x i64> @vsrl_vx_nxv1i64_1(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vsrl_vx_nxv1i64_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -498,7 +498,7 @@ define <vscale x 2 x i64> @vsrl_vx_nxv2i64_0(<vscale x 2 x i64> %va) {
 define <vscale x 2 x i64> @vsrl_vx_nxv2i64_1(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vsrl_vx_nxv2i64_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -535,7 +535,7 @@ define <vscale x 4 x i64> @vsrl_vx_nxv4i64_0(<vscale x 4 x i64> %va) {
 define <vscale x 4 x i64> @vsrl_vx_nxv4i64_1(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vsrl_vx_nxv4i64_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -572,7 +572,7 @@ define <vscale x 8 x i64> @vsrl_vx_nxv8i64_0(<vscale x 8 x i64> %va) {
 define <vscale x 8 x i64> @vsrl_vx_nxv8i64_1(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vsrl_vx_nxv8i64_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 32
+; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll
index 24c6497a0a64..663a1aaef6f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll
@@ -9,7 +9,7 @@ declare <vscale x 8 x i7> @llvm.vp.lshr.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x
 define <vscale x 8 x i7> @vsrl_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
 ; CHECK-LABEL: vsrl_vx_nxv8i7:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a2, zero, 127
+; CHECK-NEXT:    li a2, 127
 ; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vand.vx v8, v8, a2
 ; CHECK-NEXT:    vmv.v.x v9, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll
index dd3f5c630e0f..bf8c0fd77304 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll
@@ -31,7 +31,7 @@ define <vscale x 1 x i8> @ssub_nxv1i8_vx(<vscale x 1 x i8> %va, i8 %b) {
 define <vscale x 1 x i8> @ssub_nxv1i8_vi(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: ssub_nxv1i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -68,7 +68,7 @@ define <vscale x 2 x i8> @ssub_nxv2i8_vx(<vscale x 2 x i8> %va, i8 %b) {
 define <vscale x 2 x i8> @ssub_nxv2i8_vi(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: ssub_nxv2i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -105,7 +105,7 @@ define <vscale x 4 x i8> @ssub_nxv4i8_vx(<vscale x 4 x i8> %va, i8 %b) {
 define <vscale x 4 x i8> @ssub_nxv4i8_vi(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: ssub_nxv4i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -142,7 +142,7 @@ define <vscale x 8 x i8> @ssub_nxv8i8_vx(<vscale x 8 x i8> %va, i8 %b) {
 define <vscale x 8 x i8> @ssub_nxv8i8_vi(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: ssub_nxv8i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -179,7 +179,7 @@ define <vscale x 16 x i8> @ssub_nxv16i8_vx(<vscale x 16 x i8> %va, i8 %b) {
 define <vscale x 16 x i8> @ssub_nxv16i8_vi(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: ssub_nxv16i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -216,7 +216,7 @@ define <vscale x 32 x i8> @ssub_nxv32i8_vx(<vscale x 32 x i8> %va, i8 %b) {
 define <vscale x 32 x i8> @ssub_nxv32i8_vi(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: ssub_nxv32i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -253,7 +253,7 @@ define <vscale x 64 x i8> @ssub_nxv64i8_vx(<vscale x 64 x i8> %va, i8 %b) {
 define <vscale x 64 x i8> @ssub_nxv64i8_vi(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: ssub_nxv64i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -290,7 +290,7 @@ define <vscale x 1 x i16> @ssub_nxv1i16_vx(<vscale x 1 x i16> %va, i16 %b) {
 define <vscale x 1 x i16> @ssub_nxv1i16_vi(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: ssub_nxv1i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -327,7 +327,7 @@ define <vscale x 2 x i16> @ssub_nxv2i16_vx(<vscale x 2 x i16> %va, i16 %b) {
 define <vscale x 2 x i16> @ssub_nxv2i16_vi(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: ssub_nxv2i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -364,7 +364,7 @@ define <vscale x 4 x i16> @ssub_nxv4i16_vx(<vscale x 4 x i16> %va, i16 %b) {
 define <vscale x 4 x i16> @ssub_nxv4i16_vi(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: ssub_nxv4i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -401,7 +401,7 @@ define <vscale x 8 x i16> @ssub_nxv8i16_vx(<vscale x 8 x i16> %va, i16 %b) {
 define <vscale x 8 x i16> @ssub_nxv8i16_vi(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: ssub_nxv8i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -438,7 +438,7 @@ define <vscale x 16 x i16> @ssub_nxv16i16_vx(<vscale x 16 x i16> %va, i16 %b) {
 define <vscale x 16 x i16> @ssub_nxv16i16_vi(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: ssub_nxv16i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -475,7 +475,7 @@ define <vscale x 32 x i16> @ssub_nxv32i16_vx(<vscale x 32 x i16> %va, i16 %b) {
 define <vscale x 32 x i16> @ssub_nxv32i16_vi(<vscale x 32 x i16> %va) {
 ; CHECK-LABEL: ssub_nxv32i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -512,7 +512,7 @@ define <vscale x 1 x i32> @ssub_nxv1i32_vx(<vscale x 1 x i32> %va, i32 %b) {
 define <vscale x 1 x i32> @ssub_nxv1i32_vi(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: ssub_nxv1i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -549,7 +549,7 @@ define <vscale x 2 x i32> @ssub_nxv2i32_vx(<vscale x 2 x i32> %va, i32 %b) {
 define <vscale x 2 x i32> @ssub_nxv2i32_vi(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: ssub_nxv2i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -586,7 +586,7 @@ define <vscale x 4 x i32> @ssub_nxv4i32_vx(<vscale x 4 x i32> %va, i32 %b) {
 define <vscale x 4 x i32> @ssub_nxv4i32_vi(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: ssub_nxv4i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -623,7 +623,7 @@ define <vscale x 8 x i32> @ssub_nxv8i32_vx(<vscale x 8 x i32> %va, i32 %b) {
 define <vscale x 8 x i32> @ssub_nxv8i32_vi(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: ssub_nxv8i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -660,7 +660,7 @@ define <vscale x 16 x i32> @ssub_nxv16i32_vx(<vscale x 16 x i32> %va, i32 %b) {
 define <vscale x 16 x i32> @ssub_nxv16i32_vi(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: ssub_nxv16i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -710,7 +710,7 @@ define <vscale x 1 x i64> @ssub_nxv1i64_vx(<vscale x 1 x i64> %va, i64 %b) {
 define <vscale x 1 x i64> @ssub_nxv1i64_vi(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: ssub_nxv1i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -760,7 +760,7 @@ define <vscale x 2 x i64> @ssub_nxv2i64_vx(<vscale x 2 x i64> %va, i64 %b) {
 define <vscale x 2 x i64> @ssub_nxv2i64_vi(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: ssub_nxv2i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -810,7 +810,7 @@ define <vscale x 4 x i64> @ssub_nxv4i64_vx(<vscale x 4 x i64> %va, i64 %b) {
 define <vscale x 4 x i64> @ssub_nxv4i64_vi(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: ssub_nxv4i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -860,7 +860,7 @@ define <vscale x 8 x i64> @ssub_nxv8i64_vx(<vscale x 8 x i64> %va, i64 %b) {
 define <vscale x 8 x i64> @ssub_nxv8i64_vi(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: ssub_nxv8i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vssub.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll
index d8a3989cb04e..20be3f2b29c9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll
@@ -31,7 +31,7 @@ define <vscale x 1 x i8> @usub_nxv1i8_vx(<vscale x 1 x i8> %va, i8 %b) {
 define <vscale x 1 x i8> @usub_nxv1i8_vi(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: usub_nxv1i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -68,7 +68,7 @@ define <vscale x 2 x i8> @usub_nxv2i8_vx(<vscale x 2 x i8> %va, i8 %b) {
 define <vscale x 2 x i8> @usub_nxv2i8_vi(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: usub_nxv2i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -105,7 +105,7 @@ define <vscale x 4 x i8> @usub_nxv4i8_vx(<vscale x 4 x i8> %va, i8 %b) {
 define <vscale x 4 x i8> @usub_nxv4i8_vi(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: usub_nxv4i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -142,7 +142,7 @@ define <vscale x 8 x i8> @usub_nxv8i8_vx(<vscale x 8 x i8> %va, i8 %b) {
 define <vscale x 8 x i8> @usub_nxv8i8_vi(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: usub_nxv8i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -179,7 +179,7 @@ define <vscale x 16 x i8> @usub_nxv16i8_vx(<vscale x 16 x i8> %va, i8 %b) {
 define <vscale x 16 x i8> @usub_nxv16i8_vi(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: usub_nxv16i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -216,7 +216,7 @@ define <vscale x 32 x i8> @usub_nxv32i8_vx(<vscale x 32 x i8> %va, i8 %b) {
 define <vscale x 32 x i8> @usub_nxv32i8_vi(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: usub_nxv32i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -253,7 +253,7 @@ define <vscale x 64 x i8> @usub_nxv64i8_vx(<vscale x 64 x i8> %va, i8 %b) {
 define <vscale x 64 x i8> @usub_nxv64i8_vi(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: usub_nxv64i8_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -290,7 +290,7 @@ define <vscale x 1 x i16> @usub_nxv1i16_vx(<vscale x 1 x i16> %va, i16 %b) {
 define <vscale x 1 x i16> @usub_nxv1i16_vi(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: usub_nxv1i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -327,7 +327,7 @@ define <vscale x 2 x i16> @usub_nxv2i16_vx(<vscale x 2 x i16> %va, i16 %b) {
 define <vscale x 2 x i16> @usub_nxv2i16_vi(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: usub_nxv2i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -364,7 +364,7 @@ define <vscale x 4 x i16> @usub_nxv4i16_vx(<vscale x 4 x i16> %va, i16 %b) {
 define <vscale x 4 x i16> @usub_nxv4i16_vi(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: usub_nxv4i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -401,7 +401,7 @@ define <vscale x 8 x i16> @usub_nxv8i16_vx(<vscale x 8 x i16> %va, i16 %b) {
 define <vscale x 8 x i16> @usub_nxv8i16_vi(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: usub_nxv8i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -438,7 +438,7 @@ define <vscale x 16 x i16> @usub_nxv16i16_vx(<vscale x 16 x i16> %va, i16 %b) {
 define <vscale x 16 x i16> @usub_nxv16i16_vi(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: usub_nxv16i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -475,7 +475,7 @@ define <vscale x 32 x i16> @usub_nxv32i16_vx(<vscale x 32 x i16> %va, i16 %b) {
 define <vscale x 32 x i16> @usub_nxv32i16_vi(<vscale x 32 x i16> %va) {
 ; CHECK-LABEL: usub_nxv32i16_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -512,7 +512,7 @@ define <vscale x 1 x i32> @usub_nxv1i32_vx(<vscale x 1 x i32> %va, i32 %b) {
 define <vscale x 1 x i32> @usub_nxv1i32_vi(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: usub_nxv1i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -549,7 +549,7 @@ define <vscale x 2 x i32> @usub_nxv2i32_vx(<vscale x 2 x i32> %va, i32 %b) {
 define <vscale x 2 x i32> @usub_nxv2i32_vi(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: usub_nxv2i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -586,7 +586,7 @@ define <vscale x 4 x i32> @usub_nxv4i32_vx(<vscale x 4 x i32> %va, i32 %b) {
 define <vscale x 4 x i32> @usub_nxv4i32_vi(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: usub_nxv4i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -623,7 +623,7 @@ define <vscale x 8 x i32> @usub_nxv8i32_vx(<vscale x 8 x i32> %va, i32 %b) {
 define <vscale x 8 x i32> @usub_nxv8i32_vi(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: usub_nxv8i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -660,7 +660,7 @@ define <vscale x 16 x i32> @usub_nxv16i32_vx(<vscale x 16 x i32> %va, i32 %b) {
 define <vscale x 16 x i32> @usub_nxv16i32_vi(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: usub_nxv16i32_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -710,7 +710,7 @@ define <vscale x 1 x i64> @usub_nxv1i64_vx(<vscale x 1 x i64> %va, i64 %b) {
 define <vscale x 1 x i64> @usub_nxv1i64_vi(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: usub_nxv1i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -760,7 +760,7 @@ define <vscale x 2 x i64> @usub_nxv2i64_vx(<vscale x 2 x i64> %va, i64 %b) {
 define <vscale x 2 x i64> @usub_nxv2i64_vi(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: usub_nxv2i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -810,7 +810,7 @@ define <vscale x 4 x i64> @usub_nxv4i64_vx(<vscale x 4 x i64> %va, i64 %b) {
 define <vscale x 4 x i64> @usub_nxv4i64_vi(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: usub_nxv4i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -860,7 +860,7 @@ define <vscale x 8 x i64> @usub_nxv8i64_vx(<vscale x 8 x i64> %va, i64 %b) {
 define <vscale x 8 x i64> @usub_nxv8i64_vi(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: usub_nxv8i64_vi:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vssubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll
index faf2e5d338ae..f9fe81665930 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll
@@ -27,7 +27,7 @@ define <vscale x 1 x i8> @vsub_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
 define <vscale x 1 x i8> @vsub_vx_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vsub_vx_nxv1i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -77,7 +77,7 @@ define <vscale x 2 x i8> @vsub_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
 define <vscale x 2 x i8> @vsub_vx_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vsub_vx_nxv2i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -112,7 +112,7 @@ define <vscale x 4 x i8> @vsub_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
 define <vscale x 4 x i8> @vsub_vx_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vsub_vx_nxv4i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -147,7 +147,7 @@ define <vscale x 8 x i8> @vsub_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
 define <vscale x 8 x i8> @vsub_vx_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vsub_vx_nxv8i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -182,7 +182,7 @@ define <vscale x 16 x i8> @vsub_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b
 define <vscale x 16 x i8> @vsub_vx_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vsub_vx_nxv16i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -217,7 +217,7 @@ define <vscale x 32 x i8> @vsub_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b
 define <vscale x 32 x i8> @vsub_vx_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vsub_vx_nxv32i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -252,7 +252,7 @@ define <vscale x 64 x i8> @vsub_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b
 define <vscale x 64 x i8> @vsub_vx_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vsub_vx_nxv64i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -287,7 +287,7 @@ define <vscale x 1 x i16> @vsub_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %
 define <vscale x 1 x i16> @vsub_vx_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vsub_vx_nxv1i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -322,7 +322,7 @@ define <vscale x 2 x i16> @vsub_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %
 define <vscale x 2 x i16> @vsub_vx_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vsub_vx_nxv2i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -357,7 +357,7 @@ define <vscale x 4 x i16> @vsub_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %
 define <vscale x 4 x i16> @vsub_vx_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vsub_vx_nxv4i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -392,7 +392,7 @@ define <vscale x 8 x i16> @vsub_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %
 define <vscale x 8 x i16> @vsub_vx_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vsub_vx_nxv8i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -427,7 +427,7 @@ define <vscale x 16 x i16> @vsub_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signex
 define <vscale x 16 x i16> @vsub_vx_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: vsub_vx_nxv16i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -462,7 +462,7 @@ define <vscale x 32 x i16> @vsub_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signex
 define <vscale x 32 x i16> @vsub_vx_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-LABEL: vsub_vx_nxv32i16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -497,7 +497,7 @@ define <vscale x 1 x i32> @vsub_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %
 define <vscale x 1 x i32> @vsub_vx_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vsub_vx_nxv1i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -532,7 +532,7 @@ define <vscale x 2 x i32> @vsub_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %
 define <vscale x 2 x i32> @vsub_vx_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vsub_vx_nxv2i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -567,7 +567,7 @@ define <vscale x 4 x i32> @vsub_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %
 define <vscale x 4 x i32> @vsub_vx_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vsub_vx_nxv4i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -602,7 +602,7 @@ define <vscale x 8 x i32> @vsub_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %
 define <vscale x 8 x i32> @vsub_vx_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vsub_vx_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -637,7 +637,7 @@ define <vscale x 16 x i32> @vsub_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signex
 define <vscale x 16 x i32> @vsub_vx_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vsub_vx_nxv16i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -685,7 +685,7 @@ define <vscale x 1 x i64> @vsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 define <vscale x 1 x i64> @vsub_vx_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vsub_vx_nxv1i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -733,7 +733,7 @@ define <vscale x 2 x i64> @vsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 define <vscale x 2 x i64> @vsub_vx_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vsub_vx_nxv2i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -781,7 +781,7 @@ define <vscale x 4 x i64> @vsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 define <vscale x 4 x i64> @vsub_vx_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vsub_vx_nxv4i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -829,7 +829,7 @@ define <vscale x 8 x i64> @vsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 define <vscale x 8 x i64> @vsub_vx_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vsub_vx_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 1
+; CHECK-NEXT:    li a0, 1
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
index 7237ecff82c8..5afb940d9d07 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll
@@ -51,7 +51,7 @@ define <vscale x 1 x i8> @vxor_vi_nxv1i8_1(<vscale x 1 x i8> %va) {
 define <vscale x 1 x i8> @vxor_vi_nxv1i8_2(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv1i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -110,7 +110,7 @@ define <vscale x 2 x i8> @vxor_vi_nxv2i8_1(<vscale x 2 x i8> %va) {
 define <vscale x 2 x i8> @vxor_vi_nxv2i8_2(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv2i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -169,7 +169,7 @@ define <vscale x 4 x i8> @vxor_vi_nxv4i8_1(<vscale x 4 x i8> %va) {
 define <vscale x 4 x i8> @vxor_vi_nxv4i8_2(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv4i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -228,7 +228,7 @@ define <vscale x 8 x i8> @vxor_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
 define <vscale x 8 x i8> @vxor_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv8i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -287,7 +287,7 @@ define <vscale x 16 x i8> @vxor_vi_nxv16i8_1(<vscale x 16 x i8> %va) {
 define <vscale x 16 x i8> @vxor_vi_nxv16i8_2(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv16i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -346,7 +346,7 @@ define <vscale x 32 x i8> @vxor_vi_nxv32i8_1(<vscale x 32 x i8> %va) {
 define <vscale x 32 x i8> @vxor_vi_nxv32i8_2(<vscale x 32 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv32i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -405,7 +405,7 @@ define <vscale x 64 x i8> @vxor_vi_nxv64i8_1(<vscale x 64 x i8> %va) {
 define <vscale x 64 x i8> @vxor_vi_nxv64i8_2(<vscale x 64 x i8> %va) {
 ; CHECK-LABEL: vxor_vi_nxv64i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -464,7 +464,7 @@ define <vscale x 1 x i16> @vxor_vi_nxv1i16_1(<vscale x 1 x i16> %va) {
 define <vscale x 1 x i16> @vxor_vi_nxv1i16_2(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vxor_vi_nxv1i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -523,7 +523,7 @@ define <vscale x 2 x i16> @vxor_vi_nxv2i16_1(<vscale x 2 x i16> %va) {
 define <vscale x 2 x i16> @vxor_vi_nxv2i16_2(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vxor_vi_nxv2i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -582,7 +582,7 @@ define <vscale x 4 x i16> @vxor_vi_nxv4i16_1(<vscale x 4 x i16> %va) {
 define <vscale x 4 x i16> @vxor_vi_nxv4i16_2(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vxor_vi_nxv4i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -641,7 +641,7 @@ define <vscale x 8 x i16> @vxor_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
 define <vscale x 8 x i16> @vxor_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vxor_vi_nxv8i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -700,7 +700,7 @@ define <vscale x 16 x i16> @vxor_vi_nxv16i16_1(<vscale x 16 x i16> %va) {
 define <vscale x 16 x i16> @vxor_vi_nxv16i16_2(<vscale x 16 x i16> %va) {
 ; CHECK-LABEL: vxor_vi_nxv16i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -759,7 +759,7 @@ define <vscale x 32 x i16> @vxor_vi_nxv32i16_1(<vscale x 32 x i16> %va) {
 define <vscale x 32 x i16> @vxor_vi_nxv32i16_2(<vscale x 32 x i16> %va) {
 ; CHECK-LABEL: vxor_vi_nxv32i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -818,7 +818,7 @@ define <vscale x 1 x i32> @vxor_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
 define <vscale x 1 x i32> @vxor_vi_nxv1i32_2(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vxor_vi_nxv1i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -877,7 +877,7 @@ define <vscale x 2 x i32> @vxor_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
 define <vscale x 2 x i32> @vxor_vi_nxv2i32_2(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vxor_vi_nxv2i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -936,7 +936,7 @@ define <vscale x 4 x i32> @vxor_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
 define <vscale x 4 x i32> @vxor_vi_nxv4i32_2(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vxor_vi_nxv4i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -995,7 +995,7 @@ define <vscale x 8 x i32> @vxor_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 define <vscale x 8 x i32> @vxor_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vxor_vi_nxv8i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -1054,7 +1054,7 @@ define <vscale x 16 x i32> @vxor_vi_nxv16i32_1(<vscale x 16 x i32> %va) {
 define <vscale x 16 x i32> @vxor_vi_nxv16i32_2(<vscale x 16 x i32> %va) {
 ; CHECK-LABEL: vxor_vi_nxv16i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -1126,7 +1126,7 @@ define <vscale x 1 x i64> @vxor_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
 define <vscale x 1 x i64> @vxor_vi_nxv1i64_2(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vxor_vi_nxv1i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -1198,7 +1198,7 @@ define <vscale x 2 x i64> @vxor_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
 define <vscale x 2 x i64> @vxor_vi_nxv2i64_2(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vxor_vi_nxv2i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -1270,7 +1270,7 @@ define <vscale x 4 x i64> @vxor_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
 define <vscale x 4 x i64> @vxor_vi_nxv4i64_2(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vxor_vi_nxv4i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret
@@ -1342,7 +1342,7 @@ define <vscale x 8 x i64> @vxor_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
 define <vscale x 8 x i64> @vxor_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vxor_vi_nxv8i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vxor.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/sadd_sat.ll b/llvm/test/CodeGen/RISCV/sadd_sat.ll
index 4ced792c37b3..fdab75c63164 100644
--- a/llvm/test/CodeGen/RISCV/sadd_sat.ll
+++ b/llvm/test/CodeGen/RISCV/sadd_sat.ll
@@ -112,7 +112,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV64I-NEXT:    beq a1, a2, .LBB1_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    srai a0, a0, 63
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:  .LBB1_2:
@@ -146,7 +146,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV64IZbbNOZbt-NEXT:    beq a1, a2, .LBB1_2
 ; RV64IZbbNOZbt-NEXT:  # %bb.1:
 ; RV64IZbbNOZbt-NEXT:    srai a0, a0, 63
-; RV64IZbbNOZbt-NEXT:    addi a1, zero, -1
+; RV64IZbbNOZbt-NEXT:    li a1, -1
 ; RV64IZbbNOZbt-NEXT:    slli a1, a1, 63
 ; RV64IZbbNOZbt-NEXT:    xor a0, a0, a1
 ; RV64IZbbNOZbt-NEXT:  .LBB1_2:
@@ -176,7 +176,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV64IZbbZbt-NEXT:    slti a1, a1, 0
 ; RV64IZbbZbt-NEXT:    xor a0, a1, a0
 ; RV64IZbbZbt-NEXT:    srai a1, a2, 63
-; RV64IZbbZbt-NEXT:    addi a3, zero, -1
+; RV64IZbbZbt-NEXT:    li a3, -1
 ; RV64IZbbZbt-NEXT:    slli a3, a3, 63
 ; RV64IZbbZbt-NEXT:    xor a1, a1, a3
 ; RV64IZbbZbt-NEXT:    cmov a0, a0, a1, a2
@@ -251,54 +251,54 @@ define signext i8 @func8(i8 signext %x, i8 signext %y) nounwind {
 ; RV32I-LABEL: func8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    addi a1, zero, 127
+; RV32I-NEXT:    li a1, 127
 ; RV32I-NEXT:    bge a0, a1, .LBB3_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    li a1, -128
 ; RV32I-NEXT:    bge a1, a0, .LBB3_4
 ; RV32I-NEXT:  .LBB3_2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB3_3:
-; RV32I-NEXT:    addi a0, zero, 127
-; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    li a0, 127
+; RV32I-NEXT:    li a1, -128
 ; RV32I-NEXT:    blt a1, a0, .LBB3_2
 ; RV32I-NEXT:  .LBB3_4:
-; RV32I-NEXT:    addi a0, zero, -128
+; RV32I-NEXT:    li a0, -128
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func8:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, 127
+; RV64I-NEXT:    li a1, 127
 ; RV64I-NEXT:    bge a0, a1, .LBB3_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    li a1, -128
 ; RV64I-NEXT:    bge a1, a0, .LBB3_4
 ; RV64I-NEXT:  .LBB3_2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB3_3:
-; RV64I-NEXT:    addi a0, zero, 127
-; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    li a0, 127
+; RV64I-NEXT:    li a1, -128
 ; RV64I-NEXT:    blt a1, a0, .LBB3_2
 ; RV64I-NEXT:  .LBB3_4:
-; RV64I-NEXT:    addi a0, zero, -128
+; RV64I-NEXT:    li a0, -128
 ; RV64I-NEXT:    ret
 ;
 ; RV32IZbb-LABEL: func8:
 ; RV32IZbb:       # %bb.0:
 ; RV32IZbb-NEXT:    add a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, 127
+; RV32IZbb-NEXT:    li a1, 127
 ; RV32IZbb-NEXT:    min a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, -128
+; RV32IZbb-NEXT:    li a1, -128
 ; RV32IZbb-NEXT:    max a0, a0, a1
 ; RV32IZbb-NEXT:    ret
 ;
 ; RV64IZbb-LABEL: func8:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    add a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, 127
+; RV64IZbb-NEXT:    li a1, 127
 ; RV64IZbb-NEXT:    min a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, -128
+; RV64IZbb-NEXT:    li a1, -128
 ; RV64IZbb-NEXT:    max a0, a0, a1
 ; RV64IZbb-NEXT:    ret
   %tmp = call i8 @llvm.sadd.sat.i8(i8 %x, i8 %y);
@@ -309,54 +309,54 @@ define signext i4 @func3(i4 signext %x, i4 signext %y) nounwind {
 ; RV32I-LABEL: func3:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    addi a1, zero, 7
+; RV32I-NEXT:    li a1, 7
 ; RV32I-NEXT:    bge a0, a1, .LBB4_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    li a1, -8
 ; RV32I-NEXT:    bge a1, a0, .LBB4_4
 ; RV32I-NEXT:  .LBB4_2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB4_3:
-; RV32I-NEXT:    addi a0, zero, 7
-; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    li a0, 7
+; RV32I-NEXT:    li a1, -8
 ; RV32I-NEXT:    blt a1, a0, .LBB4_2
 ; RV32I-NEXT:  .LBB4_4:
-; RV32I-NEXT:    addi a0, zero, -8
+; RV32I-NEXT:    li a0, -8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func3:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, 7
+; RV64I-NEXT:    li a1, 7
 ; RV64I-NEXT:    bge a0, a1, .LBB4_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    li a1, -8
 ; RV64I-NEXT:    bge a1, a0, .LBB4_4
 ; RV64I-NEXT:  .LBB4_2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB4_3:
-; RV64I-NEXT:    addi a0, zero, 7
-; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    li a0, 7
+; RV64I-NEXT:    li a1, -8
 ; RV64I-NEXT:    blt a1, a0, .LBB4_2
 ; RV64I-NEXT:  .LBB4_4:
-; RV64I-NEXT:    addi a0, zero, -8
+; RV64I-NEXT:    li a0, -8
 ; RV64I-NEXT:    ret
 ;
 ; RV32IZbb-LABEL: func3:
 ; RV32IZbb:       # %bb.0:
 ; RV32IZbb-NEXT:    add a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, 7
+; RV32IZbb-NEXT:    li a1, 7
 ; RV32IZbb-NEXT:    min a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, -8
+; RV32IZbb-NEXT:    li a1, -8
 ; RV32IZbb-NEXT:    max a0, a0, a1
 ; RV32IZbb-NEXT:    ret
 ;
 ; RV64IZbb-LABEL: func3:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    add a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, 7
+; RV64IZbb-NEXT:    li a1, 7
 ; RV64IZbb-NEXT:    min a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, -8
+; RV64IZbb-NEXT:    li a1, -8
 ; RV64IZbb-NEXT:    max a0, a0, a1
 ; RV64IZbb-NEXT:    ret
   %tmp = call i4 @llvm.sadd.sat.i4(i4 %x, i4 %y);

diff  --git a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
index eb0988ecbc93..ac59d1087da2 100644
--- a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
@@ -120,7 +120,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV64I-NEXT:    beq a2, a1, .LBB1_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    srai a0, a0, 63
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:  .LBB1_2:
@@ -154,7 +154,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV64IZbbNOZbt-NEXT:    beq a2, a1, .LBB1_2
 ; RV64IZbbNOZbt-NEXT:  # %bb.1:
 ; RV64IZbbNOZbt-NEXT:    srai a0, a0, 63
-; RV64IZbbNOZbt-NEXT:    addi a1, zero, -1
+; RV64IZbbNOZbt-NEXT:    li a1, -1
 ; RV64IZbbNOZbt-NEXT:    slli a1, a1, 63
 ; RV64IZbbNOZbt-NEXT:    xor a0, a0, a1
 ; RV64IZbbNOZbt-NEXT:  .LBB1_2:
@@ -184,7 +184,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV64IZbbZbt-NEXT:    slti a2, a2, 0
 ; RV64IZbbZbt-NEXT:    xor a0, a2, a0
 ; RV64IZbbZbt-NEXT:    srai a2, a1, 63
-; RV64IZbbZbt-NEXT:    addi a3, zero, -1
+; RV64IZbbZbt-NEXT:    li a3, -1
 ; RV64IZbbZbt-NEXT:    slli a3, a3, 63
 ; RV64IZbbZbt-NEXT:    xor a2, a2, a3
 ; RV64IZbbZbt-NEXT:    cmov a0, a0, a2, a1
@@ -282,19 +282,19 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV32I-NEXT:    slli a1, a1, 24
 ; RV32I-NEXT:    srai a1, a1, 24
 ; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    addi a1, zero, 127
+; RV32I-NEXT:    li a1, 127
 ; RV32I-NEXT:    bge a0, a1, .LBB3_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    li a1, -128
 ; RV32I-NEXT:    bge a1, a0, .LBB3_4
 ; RV32I-NEXT:  .LBB3_2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB3_3:
-; RV32I-NEXT:    addi a0, zero, 127
-; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    li a0, 127
+; RV32I-NEXT:    li a1, -128
 ; RV32I-NEXT:    blt a1, a0, .LBB3_2
 ; RV32I-NEXT:  .LBB3_4:
-; RV32I-NEXT:    addi a0, zero, -128
+; RV32I-NEXT:    li a0, -128
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func8:
@@ -305,19 +305,19 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64I-NEXT:    slli a1, a1, 56
 ; RV64I-NEXT:    srai a1, a1, 56
 ; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, 127
+; RV64I-NEXT:    li a1, 127
 ; RV64I-NEXT:    bge a0, a1, .LBB3_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    li a1, -128
 ; RV64I-NEXT:    bge a1, a0, .LBB3_4
 ; RV64I-NEXT:  .LBB3_2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB3_3:
-; RV64I-NEXT:    addi a0, zero, 127
-; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    li a0, 127
+; RV64I-NEXT:    li a1, -128
 ; RV64I-NEXT:    blt a1, a0, .LBB3_2
 ; RV64I-NEXT:  .LBB3_4:
-; RV64I-NEXT:    addi a0, zero, -128
+; RV64I-NEXT:    li a0, -128
 ; RV64I-NEXT:    ret
 ;
 ; RV32IZbb-LABEL: func8:
@@ -326,9 +326,9 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV32IZbb-NEXT:    mul a1, a1, a2
 ; RV32IZbb-NEXT:    sext.b a1, a1
 ; RV32IZbb-NEXT:    add a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, 127
+; RV32IZbb-NEXT:    li a1, 127
 ; RV32IZbb-NEXT:    min a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, -128
+; RV32IZbb-NEXT:    li a1, -128
 ; RV32IZbb-NEXT:    max a0, a0, a1
 ; RV32IZbb-NEXT:    ret
 ;
@@ -338,9 +338,9 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    sext.b a1, a1
 ; RV64IZbb-NEXT:    add a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, 127
+; RV64IZbb-NEXT:    li a1, 127
 ; RV64IZbb-NEXT:    min a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, -128
+; RV64IZbb-NEXT:    li a1, -128
 ; RV64IZbb-NEXT:    max a0, a0, a1
 ; RV64IZbb-NEXT:    ret
   %a = mul i8 %y, %z
@@ -357,19 +357,19 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV32I-NEXT:    slli a1, a1, 28
 ; RV32I-NEXT:    srai a1, a1, 28
 ; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    addi a1, zero, 7
+; RV32I-NEXT:    li a1, 7
 ; RV32I-NEXT:    bge a0, a1, .LBB4_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    li a1, -8
 ; RV32I-NEXT:    bge a1, a0, .LBB4_4
 ; RV32I-NEXT:  .LBB4_2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB4_3:
-; RV32I-NEXT:    addi a0, zero, 7
-; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    li a0, 7
+; RV32I-NEXT:    li a1, -8
 ; RV32I-NEXT:    blt a1, a0, .LBB4_2
 ; RV32I-NEXT:  .LBB4_4:
-; RV32I-NEXT:    addi a0, zero, -8
+; RV32I-NEXT:    li a0, -8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func4:
@@ -380,19 +380,19 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64I-NEXT:    slli a1, a1, 60
 ; RV64I-NEXT:    srai a1, a1, 60
 ; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, 7
+; RV64I-NEXT:    li a1, 7
 ; RV64I-NEXT:    bge a0, a1, .LBB4_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    li a1, -8
 ; RV64I-NEXT:    bge a1, a0, .LBB4_4
 ; RV64I-NEXT:  .LBB4_2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB4_3:
-; RV64I-NEXT:    addi a0, zero, 7
-; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    li a0, 7
+; RV64I-NEXT:    li a1, -8
 ; RV64I-NEXT:    blt a1, a0, .LBB4_2
 ; RV64I-NEXT:  .LBB4_4:
-; RV64I-NEXT:    addi a0, zero, -8
+; RV64I-NEXT:    li a0, -8
 ; RV64I-NEXT:    ret
 ;
 ; RV32IZbb-LABEL: func4:
@@ -403,9 +403,9 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV32IZbb-NEXT:    slli a1, a1, 28
 ; RV32IZbb-NEXT:    srai a1, a1, 28
 ; RV32IZbb-NEXT:    add a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, 7
+; RV32IZbb-NEXT:    li a1, 7
 ; RV32IZbb-NEXT:    min a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, -8
+; RV32IZbb-NEXT:    li a1, -8
 ; RV32IZbb-NEXT:    max a0, a0, a1
 ; RV32IZbb-NEXT:    ret
 ;
@@ -417,9 +417,9 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64IZbb-NEXT:    slli a1, a1, 60
 ; RV64IZbb-NEXT:    srai a1, a1, 60
 ; RV64IZbb-NEXT:    add a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, 7
+; RV64IZbb-NEXT:    li a1, 7
 ; RV64IZbb-NEXT:    min a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, -8
+; RV64IZbb-NEXT:    li a1, -8
 ; RV64IZbb-NEXT:    max a0, a0, a1
 ; RV64IZbb-NEXT:    ret
   %a = mul i4 %y, %z

diff  --git a/llvm/test/CodeGen/RISCV/select-cc.ll b/llvm/test/CodeGen/RISCV/select-cc.ll
index bffba3aaa81b..8bc441b7ac1b 100644
--- a/llvm/test/CodeGen/RISCV/select-cc.ll
+++ b/llvm/test/CodeGen/RISCV/select-cc.ll
@@ -105,7 +105,7 @@ define signext i32 @foo(i32 signext %a, i32 *%b) nounwind {
 ; RV32IBT-NEXT:    slti a2, a5, 1
 ; RV32IBT-NEXT:    lw a1, 0(a1)
 ; RV32IBT-NEXT:    cmov a0, a2, a0, a5
-; RV32IBT-NEXT:    addi a2, zero, -1
+; RV32IBT-NEXT:    li a2, -1
 ; RV32IBT-NEXT:    slt a2, a2, a5
 ; RV32IBT-NEXT:    cmov a0, a2, a0, a1
 ; RV32IBT-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/select-const.ll b/llvm/test/CodeGen/RISCV/select-const.ll
index c98c3d13c09f..bc43758c4d8a 100644
--- a/llvm/test/CodeGen/RISCV/select-const.ll
+++ b/llvm/test/CodeGen/RISCV/select-const.ll
@@ -63,49 +63,49 @@ define signext i32 @select_const_int_easy(i1 zeroext %a) nounwind {
 define signext i32 @select_const_int_one_away(i1 zeroext %a) nounwind {
 ; RV32I-LABEL: select_const_int_one_away:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a1, zero, 4
+; RV32I-NEXT:    li a1, 4
 ; RV32I-NEXT:    sub a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV32IF-LABEL: select_const_int_one_away:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    addi a1, zero, 4
+; RV32IF-NEXT:    li a1, 4
 ; RV32IF-NEXT:    sub a0, a1, a0
 ; RV32IF-NEXT:    ret
 ;
 ; RV32IBT-LABEL: select_const_int_one_away:
 ; RV32IBT:       # %bb.0:
-; RV32IBT-NEXT:    addi a1, zero, 4
+; RV32IBT-NEXT:    li a1, 4
 ; RV32IBT-NEXT:    sub a0, a1, a0
 ; RV32IBT-NEXT:    ret
 ;
 ; RV32IFBT-LABEL: select_const_int_one_away:
 ; RV32IFBT:       # %bb.0:
-; RV32IFBT-NEXT:    addi a1, zero, 4
+; RV32IFBT-NEXT:    li a1, 4
 ; RV32IFBT-NEXT:    sub a0, a1, a0
 ; RV32IFBT-NEXT:    ret
 ;
 ; RV64I-LABEL: select_const_int_one_away:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a1, zero, 4
+; RV64I-NEXT:    li a1, 4
 ; RV64I-NEXT:    sub a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_const_int_one_away:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    addi a1, zero, 4
+; RV64IFD-NEXT:    li a1, 4
 ; RV64IFD-NEXT:    sub a0, a1, a0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV64IBT-LABEL: select_const_int_one_away:
 ; RV64IBT:       # %bb.0:
-; RV64IBT-NEXT:    addi a1, zero, 4
+; RV64IBT-NEXT:    li a1, 4
 ; RV64IBT-NEXT:    sub a0, a1, a0
 ; RV64IBT-NEXT:    ret
 ;
 ; RV64IFDBT-LABEL: select_const_int_one_away:
 ; RV64IFDBT:       # %bb.0:
-; RV64IFDBT-NEXT:    addi a1, zero, 4
+; RV64IFDBT-NEXT:    li a1, 4
 ; RV64IFDBT-NEXT:    sub a0, a1, a0
 ; RV64IFDBT-NEXT:    ret
   %1 = select i1 %a, i32 3, i32 4
@@ -160,68 +160,68 @@ define signext i32 @select_const_int_harder(i1 zeroext %a) nounwind {
 ; RV32I-LABEL: select_const_int_harder:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    addi a0, zero, 6
+; RV32I-NEXT:    li a0, 6
 ; RV32I-NEXT:    bnez a1, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    addi a0, zero, 38
+; RV32I-NEXT:    li a0, 38
 ; RV32I-NEXT:  .LBB3_2:
 ; RV32I-NEXT:    ret
 ;
 ; RV32IF-LABEL: select_const_int_harder:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    mv a1, a0
-; RV32IF-NEXT:    addi a0, zero, 6
+; RV32IF-NEXT:    li a0, 6
 ; RV32IF-NEXT:    bnez a1, .LBB3_2
 ; RV32IF-NEXT:  # %bb.1:
-; RV32IF-NEXT:    addi a0, zero, 38
+; RV32IF-NEXT:    li a0, 38
 ; RV32IF-NEXT:  .LBB3_2:
 ; RV32IF-NEXT:    ret
 ;
 ; RV32IBT-LABEL: select_const_int_harder:
 ; RV32IBT:       # %bb.0:
-; RV32IBT-NEXT:    addi a1, zero, 38
-; RV32IBT-NEXT:    addi a2, zero, 6
+; RV32IBT-NEXT:    li a1, 38
+; RV32IBT-NEXT:    li a2, 6
 ; RV32IBT-NEXT:    cmov a0, a0, a2, a1
 ; RV32IBT-NEXT:    ret
 ;
 ; RV32IFBT-LABEL: select_const_int_harder:
 ; RV32IFBT:       # %bb.0:
-; RV32IFBT-NEXT:    addi a1, zero, 38
-; RV32IFBT-NEXT:    addi a2, zero, 6
+; RV32IFBT-NEXT:    li a1, 38
+; RV32IFBT-NEXT:    li a2, 6
 ; RV32IFBT-NEXT:    cmov a0, a0, a2, a1
 ; RV32IFBT-NEXT:    ret
 ;
 ; RV64I-LABEL: select_const_int_harder:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    addi a0, zero, 6
+; RV64I-NEXT:    li a0, 6
 ; RV64I-NEXT:    bnez a1, .LBB3_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    addi a0, zero, 38
+; RV64I-NEXT:    li a0, 38
 ; RV64I-NEXT:  .LBB3_2:
 ; RV64I-NEXT:    ret
 ;
 ; RV64IFD-LABEL: select_const_int_harder:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    mv a1, a0
-; RV64IFD-NEXT:    addi a0, zero, 6
+; RV64IFD-NEXT:    li a0, 6
 ; RV64IFD-NEXT:    bnez a1, .LBB3_2
 ; RV64IFD-NEXT:  # %bb.1:
-; RV64IFD-NEXT:    addi a0, zero, 38
+; RV64IFD-NEXT:    li a0, 38
 ; RV64IFD-NEXT:  .LBB3_2:
 ; RV64IFD-NEXT:    ret
 ;
 ; RV64IBT-LABEL: select_const_int_harder:
 ; RV64IBT:       # %bb.0:
-; RV64IBT-NEXT:    addi a1, zero, 38
-; RV64IBT-NEXT:    addi a2, zero, 6
+; RV64IBT-NEXT:    li a1, 38
+; RV64IBT-NEXT:    li a2, 6
 ; RV64IBT-NEXT:    cmov a0, a0, a2, a1
 ; RV64IBT-NEXT:    ret
 ;
 ; RV64IFDBT-LABEL: select_const_int_harder:
 ; RV64IFDBT:       # %bb.0:
-; RV64IFDBT-NEXT:    addi a1, zero, 38
-; RV64IFDBT-NEXT:    addi a2, zero, 6
+; RV64IFDBT-NEXT:    li a1, 38
+; RV64IFDBT-NEXT:    li a2, 6
 ; RV64IFDBT-NEXT:    cmov a0, a0, a2, a1
 ; RV64IFDBT-NEXT:    ret
   %1 = select i1 %a, i32 6, i32 38

diff  --git a/llvm/test/CodeGen/RISCV/select-constant-xor.ll b/llvm/test/CodeGen/RISCV/select-constant-xor.ll
index 6af6614a23ba..2e38ed1aa375 100644
--- a/llvm/test/CodeGen/RISCV/select-constant-xor.ll
+++ b/llvm/test/CodeGen/RISCV/select-constant-xor.ll
@@ -48,7 +48,7 @@ define i64 @selecti64i64(i64 %a) {
 define i32 @selecti64i32(i64 %a) {
 ; RV32-LABEL: selecti64i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, zero, -1
+; RV32-NEXT:    li a0, -1
 ; RV32-NEXT:    slt a0, a0, a1
 ; RV32-NEXT:    lui a1, 524288
 ; RV32-NEXT:    sub a0, a1, a0

diff  --git a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
index 2b7f275bf6d6..495555c67e87 100644
--- a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
+++ b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
@@ -15,7 +15,7 @@
 define i64 @cmovcc64(i32 signext %a, i64 %b, i64 %c) nounwind {
 ; RV32I-LABEL: cmovcc64:
 ; RV32I:       # %bb.0: # %entry
-; RV32I-NEXT:    addi a5, zero, 123
+; RV32I-NEXT:    li a5, 123
 ; RV32I-NEXT:    beq a0, a5, .LBB0_2
 ; RV32I-NEXT:  # %bb.1: # %entry
 ; RV32I-NEXT:    mv a1, a3
@@ -34,7 +34,7 @@ define i64 @cmovcc64(i32 signext %a, i64 %b, i64 %c) nounwind {
 ;
 ; RV64I-LABEL: cmovcc64:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    addi a3, zero, 123
+; RV64I-NEXT:    li a3, 123
 ; RV64I-NEXT:    beq a0, a3, .LBB0_2
 ; RV64I-NEXT:  # %bb.1: # %entry
 ; RV64I-NEXT:    mv a1, a2
@@ -119,7 +119,7 @@ define i128 @cmovcc128(i64 signext %a, i128 %b, i128 %c) nounwind {
 ;
 ; RV64I-LABEL: cmovcc128:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    addi a5, zero, 123
+; RV64I-NEXT:    li a5, 123
 ; RV64I-NEXT:    beq a0, a5, .LBB1_2
 ; RV64I-NEXT:  # %bb.1: # %entry
 ; RV64I-NEXT:    mv a1, a3
@@ -412,7 +412,7 @@ entry:
 define i32 @cmovccdep(i32 signext %a, i32 %b, i32 %c, i32 %d) nounwind {
 ; RV32I-LABEL: cmovccdep:
 ; RV32I:       # %bb.0: # %entry
-; RV32I-NEXT:    addi a4, zero, 123
+; RV32I-NEXT:    li a4, 123
 ; RV32I-NEXT:    bne a0, a4, .LBB6_3
 ; RV32I-NEXT:  # %bb.1: # %entry
 ; RV32I-NEXT:    mv a2, a1
@@ -439,7 +439,7 @@ define i32 @cmovccdep(i32 signext %a, i32 %b, i32 %c, i32 %d) nounwind {
 ;
 ; RV64I-LABEL: cmovccdep:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    addi a4, zero, 123
+; RV64I-NEXT:    li a4, 123
 ; RV64I-NEXT:    bne a0, a4, .LBB6_3
 ; RV64I-NEXT:  # %bb.1: # %entry
 ; RV64I-NEXT:    mv a2, a1

diff  --git a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
index 82c59ad16615..a55a086158a2 100644
--- a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
+++ b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
@@ -37,7 +37,7 @@ define i32 @neg_sel_special_constant(i32 signext %a) {
 ;
 ; RV64-LABEL: neg_sel_special_constant:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, 1
+; RV64-NEXT:    li a1, 1
 ; RV64-NEXT:    slli a1, a1, 31
 ; RV64-NEXT:    and a0, a0, a1
 ; RV64-NEXT:    srli a0, a0, 22
@@ -81,10 +81,10 @@ define i32 @pos_sel_constants(i32 signext %a) {
 ; CHECK-LABEL: pos_sel_constants:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mv a1, a0
-; CHECK-NEXT:    addi a0, zero, 5
+; CHECK-NEXT:    li a0, 5
 ; CHECK-NEXT:    bgez a1, .LBB4_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, zero
+; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:  .LBB4_2:
 ; CHECK-NEXT:    ret
   %tmp.1 = icmp sgt i32 %a, -1
@@ -106,7 +106,7 @@ define i32 @pos_sel_special_constant(i32 signext %a) {
 ;
 ; RV64-LABEL: pos_sel_special_constant:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a1, zero, -1
+; RV64-NEXT:    li a1, -1
 ; RV64-NEXT:    slt a0, a1, a0
 ; RV64-NEXT:    slli a0, a0, 9
 ; RV64-NEXT:    ret
@@ -121,7 +121,7 @@ define i32 @pos_sel_variable_and_zero(i32 signext %a, i32 signext %b) {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    bgez a0, .LBB6_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:  .LBB6_2:
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    ret
@@ -130,7 +130,7 @@ define i32 @pos_sel_variable_and_zero(i32 signext %a, i32 signext %b) {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    bgez a0, .LBB6_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:  .LBB6_2:
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ret
@@ -158,7 +158,7 @@ define i32 @not_neg_sel_same_variable(i32 signext %a) {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    bgtz a0, .LBB7_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:  .LBB7_2:
 ; RV32I-NEXT:    ret
 ;
@@ -166,7 +166,7 @@ define i32 @not_neg_sel_same_variable(i32 signext %a) {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    bgtz a0, .LBB7_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:  .LBB7_2:
 ; RV64I-NEXT:    ret
 ;
@@ -191,7 +191,7 @@ define i32 @sub_clamp_zero(i32 signext %x, i32 signext %y) {
 ; RV32I-NEXT:    sub a0, a0, a1
 ; RV32I-NEXT:    bgtz a0, .LBB8_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:  .LBB8_2:
 ; RV32I-NEXT:    ret
 ;
@@ -200,7 +200,7 @@ define i32 @sub_clamp_zero(i32 signext %x, i32 signext %y) {
 ; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    bgtz a0, .LBB8_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:  .LBB8_2:
 ; RV64I-NEXT:    ret
 ;
@@ -225,10 +225,10 @@ define i8 @sel_shift_bool_i8(i1 %t) {
 ; CHECK-LABEL: sel_shift_bool_i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    andi a1, a0, 1
-; CHECK-NEXT:    addi a0, zero, -128
+; CHECK-NEXT:    li a0, -128
 ; CHECK-NEXT:    bnez a1, .LBB9_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, zero
+; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:  .LBB9_2:
 ; CHECK-NEXT:    ret
   %shl = select i1 %t, i8 128, i8 0
@@ -260,7 +260,7 @@ define i64 @sel_shift_bool_i64(i1 %t) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    andi a0, a0, 1
 ; RV32-NEXT:    slli a0, a0, 16
-; RV32-NEXT:    mv a1, zero
+; RV32-NEXT:    li a1, 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: sel_shift_bool_i64:

diff  --git a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
index 8540e5b1f797..c2a980aa16ea 100644
--- a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
+++ b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
@@ -211,7 +211,7 @@ define i64 @zext_i1_to_i64(i1 %a) nounwind {
 ; RV32I-LABEL: zext_i1_to_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: zext_i1_to_i64:
@@ -254,7 +254,7 @@ define i64 @zext_i8_to_i64(i8 %a) nounwind {
 ; RV32I-LABEL: zext_i8_to_i64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    andi a0, a0, 255
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: zext_i8_to_i64:
@@ -289,7 +289,7 @@ define i64 @zext_i16_to_i64(i16 %a) nounwind {
 ; RV32I-NEXT:    lui a1, 16
 ; RV32I-NEXT:    addi a1, a1, -1
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: zext_i16_to_i64:
@@ -305,7 +305,7 @@ define i64 @zext_i16_to_i64(i16 %a) nounwind {
 define i64 @zext_i32_to_i64(i32 %a) nounwind {
 ; RV32I-LABEL: zext_i32_to_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: zext_i32_to_i64:

diff  --git a/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll b/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll
index d7d901e6cf80..cb57757d815b 100644
--- a/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll
+++ b/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll
@@ -166,11 +166,11 @@ define i64 @sll_redundant_mask_zeros_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    bltz a4, .LBB9_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sll a1, a0, a4
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB9_2:
 ; RV32I-NEXT:    sll a1, a1, a2
-; RV32I-NEXT:    addi a4, zero, 31
+; RV32I-NEXT:    li a4, 31
 ; RV32I-NEXT:    sub a3, a4, a3
 ; RV32I-NEXT:    srli a4, a0, 1
 ; RV32I-NEXT:    srl a3, a4, a3
@@ -198,11 +198,11 @@ define i64 @srl_redundant_mask_zeros_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    bltz a4, .LBB10_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srl a0, a1, a4
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB10_2:
 ; RV32I-NEXT:    srl a0, a0, a2
-; RV32I-NEXT:    addi a4, zero, 31
+; RV32I-NEXT:    li a4, 31
 ; RV32I-NEXT:    sub a3, a4, a3
 ; RV32I-NEXT:    slli a4, a1, 1
 ; RV32I-NEXT:    sll a3, a4, a3
@@ -234,7 +234,7 @@ define i64 @sra_redundant_mask_zeros_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB11_2:
 ; RV32I-NEXT:    srl a0, a0, a2
-; RV32I-NEXT:    addi a4, zero, 31
+; RV32I-NEXT:    li a4, 31
 ; RV32I-NEXT:    sub a3, a4, a3
 ; RV32I-NEXT:    slli a4, a1, 1
 ; RV32I-NEXT:    sll a3, a4, a3

diff  --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll
index 5b67ca0f5616..d77a76bab40f 100644
--- a/llvm/test/CodeGen/RISCV/shifts.ll
+++ b/llvm/test/CodeGen/RISCV/shifts.ll
@@ -17,11 +17,11 @@ define i64 @lshr64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    bltz a3, .LBB0_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srl a0, a1, a3
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB0_2:
 ; RV32I-NEXT:    srl a0, a0, a2
-; RV32I-NEXT:    addi a3, zero, 31
+; RV32I-NEXT:    li a3, 31
 ; RV32I-NEXT:    sub a3, a3, a2
 ; RV32I-NEXT:    slli a4, a1, 1
 ; RV32I-NEXT:    sll a3, a4, a3
@@ -66,7 +66,7 @@ define i64 @ashr64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB2_2:
 ; RV32I-NEXT:    srl a0, a0, a2
-; RV32I-NEXT:    addi a3, zero, 31
+; RV32I-NEXT:    li a3, 31
 ; RV32I-NEXT:    sub a3, a3, a2
 ; RV32I-NEXT:    slli a4, a1, 1
 ; RV32I-NEXT:    sll a3, a4, a3
@@ -107,11 +107,11 @@ define i64 @shl64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    bltz a3, .LBB4_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sll a1, a0, a3
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB4_2:
 ; RV32I-NEXT:    sll a1, a1, a2
-; RV32I-NEXT:    addi a3, zero, 31
+; RV32I-NEXT:    li a3, 31
 ; RV32I-NEXT:    sub a3, a3, a2
 ; RV32I-NEXT:    srli a4, a0, 1
 ; RV32I-NEXT:    srl a3, a4, a3
@@ -153,11 +153,11 @@ define i128 @lshr128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    lw a2, 0(a2)
 ; RV32I-NEXT:    lw t0, 8(a1)
 ; RV32I-NEXT:    lw t4, 12(a1)
-; RV32I-NEXT:    addi a6, zero, 64
+; RV32I-NEXT:    li a6, 64
 ; RV32I-NEXT:    sub t1, a6, a2
-; RV32I-NEXT:    addi a3, zero, 32
+; RV32I-NEXT:    li a3, 32
 ; RV32I-NEXT:    sub t5, a3, a2
-; RV32I-NEXT:    addi t2, zero, 31
+; RV32I-NEXT:    li t2, 31
 ; RV32I-NEXT:    bltz t5, .LBB6_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sll a3, t0, t5
@@ -180,7 +180,7 @@ define i128 @lshr128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    addi t3, a2, -64
 ; RV32I-NEXT:    bltz a4, .LBB6_7
 ; RV32I-NEXT:  # %bb.6:
-; RV32I-NEXT:    mv a7, zero
+; RV32I-NEXT:    li a7, 0
 ; RV32I-NEXT:    bgeu a2, a6, .LBB6_8
 ; RV32I-NEXT:    j .LBB6_9
 ; RV32I-NEXT:  .LBB6_7:
@@ -218,7 +218,7 @@ define i128 @lshr128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    bgeu a2, a6, .LBB6_18
 ; RV32I-NEXT:    j .LBB6_19
 ; RV32I-NEXT:  .LBB6_17:
-; RV32I-NEXT:    addi a4, zero, 95
+; RV32I-NEXT:    li a4, 95
 ; RV32I-NEXT:    sub a4, a4, a2
 ; RV32I-NEXT:    sll a4, a3, a4
 ; RV32I-NEXT:    srl a1, t0, t3
@@ -243,18 +243,18 @@ define i128 @lshr128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    or a3, a1, a3
 ; RV32I-NEXT:    bltu a2, a6, .LBB6_25
 ; RV32I-NEXT:  .LBB6_24:
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:  .LBB6_25:
 ; RV32I-NEXT:    bltz t6, .LBB6_27
 ; RV32I-NEXT:  # %bb.26:
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    bgeu a2, a6, .LBB6_28
 ; RV32I-NEXT:    j .LBB6_29
 ; RV32I-NEXT:  .LBB6_27:
 ; RV32I-NEXT:    srl a4, t4, a2
 ; RV32I-NEXT:    bltu a2, a6, .LBB6_29
 ; RV32I-NEXT:  .LBB6_28:
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:  .LBB6_29:
 ; RV32I-NEXT:    sw a4, 12(a0)
 ; RV32I-NEXT:    sw a3, 8(a0)
@@ -270,11 +270,11 @@ define i128 @lshr128(i128 %a, i128 %b) nounwind {
 ; RV64I-NEXT:    bltz a3, .LBB6_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    srl a0, a1, a3
-; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB6_2:
 ; RV64I-NEXT:    srl a0, a0, a2
-; RV64I-NEXT:    addi a3, zero, 63
+; RV64I-NEXT:    li a3, 63
 ; RV64I-NEXT:    sub a3, a3, a2
 ; RV64I-NEXT:    slli a4, a1, 1
 ; RV64I-NEXT:    sll a3, a4, a3
@@ -294,11 +294,11 @@ define i128 @ashr128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    lw a2, 0(a2)
 ; RV32I-NEXT:    lw t2, 8(a1)
 ; RV32I-NEXT:    lw t5, 12(a1)
-; RV32I-NEXT:    addi a6, zero, 64
+; RV32I-NEXT:    li a6, 64
 ; RV32I-NEXT:    sub t1, a6, a2
-; RV32I-NEXT:    addi a3, zero, 32
+; RV32I-NEXT:    li a3, 32
 ; RV32I-NEXT:    sub t6, a3, a2
-; RV32I-NEXT:    addi t4, zero, 31
+; RV32I-NEXT:    li t4, 31
 ; RV32I-NEXT:    bltz t6, .LBB7_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sll s0, t2, t6
@@ -360,7 +360,7 @@ define i128 @ashr128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    bgeu a2, a6, .LBB7_18
 ; RV32I-NEXT:    j .LBB7_19
 ; RV32I-NEXT:  .LBB7_17:
-; RV32I-NEXT:    addi a4, zero, 95
+; RV32I-NEXT:    li a4, 95
 ; RV32I-NEXT:    sub a4, a4, a2
 ; RV32I-NEXT:    sll a4, s0, a4
 ; RV32I-NEXT:    srl a1, t2, t3
@@ -417,7 +417,7 @@ define i128 @ashr128(i128 %a, i128 %b) nounwind {
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB7_2:
 ; RV64I-NEXT:    srl a0, a0, a2
-; RV64I-NEXT:    addi a3, zero, 63
+; RV64I-NEXT:    li a3, 63
 ; RV64I-NEXT:    sub a3, a3, a2
 ; RV64I-NEXT:    slli a4, a1, 1
 ; RV64I-NEXT:    sll a3, a4, a3
@@ -436,11 +436,11 @@ define i128 @shl128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    lw a2, 0(a2)
 ; RV32I-NEXT:    lw t0, 4(a1)
 ; RV32I-NEXT:    lw t4, 0(a1)
-; RV32I-NEXT:    addi a6, zero, 64
+; RV32I-NEXT:    li a6, 64
 ; RV32I-NEXT:    sub t1, a6, a2
-; RV32I-NEXT:    addi a3, zero, 32
+; RV32I-NEXT:    li a3, 32
 ; RV32I-NEXT:    sub t5, a3, a2
-; RV32I-NEXT:    addi t2, zero, 31
+; RV32I-NEXT:    li t2, 31
 ; RV32I-NEXT:    bltz t5, .LBB8_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srl a3, t0, t5
@@ -463,7 +463,7 @@ define i128 @shl128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    addi t3, a2, -64
 ; RV32I-NEXT:    bltz a4, .LBB8_7
 ; RV32I-NEXT:  # %bb.6:
-; RV32I-NEXT:    mv a7, zero
+; RV32I-NEXT:    li a7, 0
 ; RV32I-NEXT:    bgeu a2, a6, .LBB8_8
 ; RV32I-NEXT:    j .LBB8_9
 ; RV32I-NEXT:  .LBB8_7:
@@ -501,7 +501,7 @@ define i128 @shl128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    bgeu a2, a6, .LBB8_18
 ; RV32I-NEXT:    j .LBB8_19
 ; RV32I-NEXT:  .LBB8_17:
-; RV32I-NEXT:    addi a4, zero, 95
+; RV32I-NEXT:    li a4, 95
 ; RV32I-NEXT:    sub a4, a4, a2
 ; RV32I-NEXT:    srl a4, a3, a4
 ; RV32I-NEXT:    sll a1, t0, t3
@@ -526,18 +526,18 @@ define i128 @shl128(i128 %a, i128 %b) nounwind {
 ; RV32I-NEXT:    or a3, a1, a3
 ; RV32I-NEXT:    bltu a2, a6, .LBB8_25
 ; RV32I-NEXT:  .LBB8_24:
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:  .LBB8_25:
 ; RV32I-NEXT:    bltz t6, .LBB8_27
 ; RV32I-NEXT:  # %bb.26:
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:    bgeu a2, a6, .LBB8_28
 ; RV32I-NEXT:    j .LBB8_29
 ; RV32I-NEXT:  .LBB8_27:
 ; RV32I-NEXT:    sll a4, t4, a2
 ; RV32I-NEXT:    bltu a2, a6, .LBB8_29
 ; RV32I-NEXT:  .LBB8_28:
-; RV32I-NEXT:    mv a4, zero
+; RV32I-NEXT:    li a4, 0
 ; RV32I-NEXT:  .LBB8_29:
 ; RV32I-NEXT:    sw a4, 0(a0)
 ; RV32I-NEXT:    sw a3, 4(a0)
@@ -553,11 +553,11 @@ define i128 @shl128(i128 %a, i128 %b) nounwind {
 ; RV64I-NEXT:    bltz a3, .LBB8_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    sll a1, a0, a3
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB8_2:
 ; RV64I-NEXT:    sll a1, a1, a2
-; RV64I-NEXT:    addi a3, zero, 63
+; RV64I-NEXT:    li a3, 63
 ; RV64I-NEXT:    sub a3, a3, a2
 ; RV64I-NEXT:    srli a4, a0, 1
 ; RV64I-NEXT:    srl a3, a4, a3

diff  --git a/llvm/test/CodeGen/RISCV/shrinkwrap.ll b/llvm/test/CodeGen/RISCV/shrinkwrap.ll
index 7ae677ba1f24..178cd03777bc 100644
--- a/llvm/test/CodeGen/RISCV/shrinkwrap.ll
+++ b/llvm/test/CodeGen/RISCV/shrinkwrap.ll
@@ -15,7 +15,7 @@ define void @eliminate_restore(i32 %n) nounwind {
 ; RV32I-SW-NO:       # %bb.0:
 ; RV32I-SW-NO-NEXT:    addi sp, sp, -16
 ; RV32I-SW-NO-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-SW-NO-NEXT:    addi a1, zero, 32
+; RV32I-SW-NO-NEXT:    li a1, 32
 ; RV32I-SW-NO-NEXT:    bgeu a1, a0, .LBB0_2
 ; RV32I-SW-NO-NEXT:  # %bb.1: # %if.end
 ; RV32I-SW-NO-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -26,7 +26,7 @@ define void @eliminate_restore(i32 %n) nounwind {
 ;
 ; RV32I-SW-LABEL: eliminate_restore:
 ; RV32I-SW:       # %bb.0:
-; RV32I-SW-NEXT:    addi a1, zero, 32
+; RV32I-SW-NEXT:    li a1, 32
 ; RV32I-SW-NEXT:    bgeu a1, a0, .LBB0_2
 ; RV32I-SW-NEXT:  # %bb.1: # %if.end
 ; RV32I-SW-NEXT:    ret
@@ -37,7 +37,7 @@ define void @eliminate_restore(i32 %n) nounwind {
 ;
 ; RV32I-SW-SR-LABEL: eliminate_restore:
 ; RV32I-SW-SR:       # %bb.0:
-; RV32I-SW-SR-NEXT:    addi a1, zero, 32
+; RV32I-SW-SR-NEXT:    li a1, 32
 ; RV32I-SW-SR-NEXT:    bgeu a1, a0, .LBB0_2
 ; RV32I-SW-SR-NEXT:  # %bb.1: # %if.end
 ; RV32I-SW-SR-NEXT:    ret
@@ -48,7 +48,7 @@ define void @eliminate_restore(i32 %n) nounwind {
 ; RV64I-SW-LABEL: eliminate_restore:
 ; RV64I-SW:       # %bb.0:
 ; RV64I-SW-NEXT:    sext.w a0, a0
-; RV64I-SW-NEXT:    addi a1, zero, 32
+; RV64I-SW-NEXT:    li a1, 32
 ; RV64I-SW-NEXT:    bgeu a1, a0, .LBB0_2
 ; RV64I-SW-NEXT:  # %bb.1: # %if.end
 ; RV64I-SW-NEXT:    ret
@@ -76,7 +76,7 @@ define void @conditional_alloca(i32 %n) nounwind {
 ; RV32I-SW-NO-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-SW-NO-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-SW-NO-NEXT:    addi s0, sp, 16
-; RV32I-SW-NO-NEXT:    addi a1, zero, 32
+; RV32I-SW-NO-NEXT:    li a1, 32
 ; RV32I-SW-NO-NEXT:    bltu a1, a0, .LBB1_2
 ; RV32I-SW-NO-NEXT:  # %bb.1: # %if.then
 ; RV32I-SW-NO-NEXT:    addi a0, a0, 15
@@ -93,7 +93,7 @@ define void @conditional_alloca(i32 %n) nounwind {
 ;
 ; RV32I-SW-LABEL: conditional_alloca:
 ; RV32I-SW:       # %bb.0:
-; RV32I-SW-NEXT:    addi a1, zero, 32
+; RV32I-SW-NEXT:    li a1, 32
 ; RV32I-SW-NEXT:    bltu a1, a0, .LBB1_2
 ; RV32I-SW-NEXT:  # %bb.1: # %if.then
 ; RV32I-SW-NEXT:    addi sp, sp, -16
@@ -114,7 +114,7 @@ define void @conditional_alloca(i32 %n) nounwind {
 ;
 ; RV32I-SW-SR-LABEL: conditional_alloca:
 ; RV32I-SW-SR:       # %bb.0:
-; RV32I-SW-SR-NEXT:    addi a1, zero, 32
+; RV32I-SW-SR-NEXT:    li a1, 32
 ; RV32I-SW-SR-NEXT:    bltu a1, a0, .LBB1_2
 ; RV32I-SW-SR-NEXT:  # %bb.1: # %if.then
 ; RV32I-SW-SR-NEXT:    call t0, __riscv_save_1
@@ -132,7 +132,7 @@ define void @conditional_alloca(i32 %n) nounwind {
 ; RV64I-SW-LABEL: conditional_alloca:
 ; RV64I-SW:       # %bb.0:
 ; RV64I-SW-NEXT:    sext.w a1, a0
-; RV64I-SW-NEXT:    addi a2, zero, 32
+; RV64I-SW-NEXT:    li a2, 32
 ; RV64I-SW-NEXT:    bltu a2, a1, .LBB1_2
 ; RV64I-SW-NEXT:  # %bb.1: # %if.then
 ; RV64I-SW-NEXT:    addi sp, sp, -16

diff  --git a/llvm/test/CodeGen/RISCV/sink-icmp.ll b/llvm/test/CodeGen/RISCV/sink-icmp.ll
index 4597b623d25a..5af19175a004 100644
--- a/llvm/test/CodeGen/RISCV/sink-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/sink-icmp.ll
@@ -14,7 +14,7 @@ define signext i16 @func(i16* %a, i16* %b) {
 ; RV32-NEXT:  # %bb.2: # %.LBB0_2
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB0_3: # %return
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: func:
@@ -26,7 +26,7 @@ define signext i16 @func(i16* %a, i16* %b) {
 ; RV64-NEXT:  # %bb.2: # %.LBB0_2
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB0_3: # %return
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 entry:
   %0 = load i16, i16* %a

diff  --git a/llvm/test/CodeGen/RISCV/split-offsets.ll b/llvm/test/CodeGen/RISCV/split-offsets.ll
index 9f92edcce167..2587cc11bad7 100644
--- a/llvm/test/CodeGen/RISCV/split-offsets.ll
+++ b/llvm/test/CodeGen/RISCV/split-offsets.ll
@@ -16,9 +16,9 @@ define void @test1([65536 x i32]** %sp, [65536 x i32]* %t, i32 %n) {
 ; RV32I-NEXT:    addi a2, a2, -1920
 ; RV32I-NEXT:    add a1, a1, a2
 ; RV32I-NEXT:    add a0, a0, a2
-; RV32I-NEXT:    addi a2, zero, 2
+; RV32I-NEXT:    li a2, 2
 ; RV32I-NEXT:    sw a2, 0(a0)
-; RV32I-NEXT:    addi a3, zero, 1
+; RV32I-NEXT:    li a3, 1
 ; RV32I-NEXT:    sw a3, 4(a0)
 ; RV32I-NEXT:    sw a3, 0(a1)
 ; RV32I-NEXT:    sw a2, 4(a1)
@@ -31,9 +31,9 @@ define void @test1([65536 x i32]** %sp, [65536 x i32]* %t, i32 %n) {
 ; RV64I-NEXT:    addiw a2, a2, -1920
 ; RV64I-NEXT:    add a1, a1, a2
 ; RV64I-NEXT:    add a0, a0, a2
-; RV64I-NEXT:    addi a2, zero, 2
+; RV64I-NEXT:    li a2, 2
 ; RV64I-NEXT:    sw a2, 0(a0)
-; RV64I-NEXT:    addi a3, zero, 1
+; RV64I-NEXT:    li a3, 1
 ; RV64I-NEXT:    sw a3, 4(a0)
 ; RV64I-NEXT:    sw a3, 0(a1)
 ; RV64I-NEXT:    sw a2, 4(a1)
@@ -55,7 +55,7 @@ entry:
 define void @test2([65536 x i32]** %sp, [65536 x i32]* %t, i32 %n) {
 ; RV32I-LABEL: test2:
 ; RV32I:       # %bb.0: # %entry
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    lui a0, 20
 ; RV32I-NEXT:    addi a5, a0, -1920
@@ -76,7 +76,7 @@ define void @test2([65536 x i32]** %sp, [65536 x i32]* %t, i32 %n) {
 ;
 ; RV64I-LABEL: test2:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    mv a3, zero
+; RV64I-NEXT:    li a3, 0
 ; RV64I-NEXT:    ld a4, 0(a0)
 ; RV64I-NEXT:    lui a0, 20
 ; RV64I-NEXT:    addiw a5, a0, -1920

diff  --git a/llvm/test/CodeGen/RISCV/split-sp-adjust.ll b/llvm/test/CodeGen/RISCV/split-sp-adjust.ll
index f99e8d722679..ecc2815d4864 100644
--- a/llvm/test/CodeGen/RISCV/split-sp-adjust.ll
+++ b/llvm/test/CodeGen/RISCV/split-sp-adjust.ll
@@ -11,7 +11,7 @@ define i32 @SplitSP() nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    addi a0, sp, 16
 ; RV32I-NEXT:    call foo at plt
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 2032
@@ -31,7 +31,7 @@ define i32 @NoSplitSP() nounwind {
 ; RV32I-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    addi a0, sp, 4
 ; RV32I-NEXT:    call foo at plt
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 2032
 ; RV32I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll
index ac23823de269..28da10d45ac6 100644
--- a/llvm/test/CodeGen/RISCV/srem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll
@@ -13,7 +13,7 @@ define i32 @fold_srem_positive_odd(i32 %x) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -28,7 +28,7 @@ define i32 @fold_srem_positive_odd(i32 %x) nounwind {
 ; RV32IM-NEXT:    srli a2, a1, 31
 ; RV32IM-NEXT:    srai a1, a1, 6
 ; RV32IM-NEXT:    add a1, a1, a2
-; RV32IM-NEXT:    addi a2, zero, 95
+; RV32IM-NEXT:    li a2, 95
 ; RV32IM-NEXT:    mul a1, a1, a2
 ; RV32IM-NEXT:    sub a0, a0, a1
 ; RV32IM-NEXT:    ret
@@ -38,7 +38,7 @@ define i32 @fold_srem_positive_odd(i32 %x) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -55,7 +55,7 @@ define i32 @fold_srem_positive_odd(i32 %x) nounwind {
 ; RV64IM-NEXT:    srliw a2, a1, 31
 ; RV64IM-NEXT:    sraiw a1, a1, 6
 ; RV64IM-NEXT:    addw a1, a1, a2
-; RV64IM-NEXT:    addi a2, zero, 95
+; RV64IM-NEXT:    li a2, 95
 ; RV64IM-NEXT:    mulw a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
@@ -69,7 +69,7 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 1060
+; RV32I-NEXT:    li a1, 1060
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -83,7 +83,7 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
 ; RV32IM-NEXT:    srli a2, a1, 31
 ; RV32IM-NEXT:    srai a1, a1, 8
 ; RV32IM-NEXT:    add a1, a1, a2
-; RV32IM-NEXT:    addi a2, zero, 1060
+; RV32IM-NEXT:    li a2, 1060
 ; RV32IM-NEXT:    mul a1, a1, a2
 ; RV32IM-NEXT:    sub a0, a0, a1
 ; RV32IM-NEXT:    ret
@@ -93,7 +93,7 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    addi a1, zero, 1060
+; RV64I-NEXT:    li a1, 1060
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -108,7 +108,7 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
 ; RV64IM-NEXT:    srli a2, a1, 63
 ; RV64IM-NEXT:    srai a1, a1, 40
 ; RV64IM-NEXT:    addw a1, a1, a2
-; RV64IM-NEXT:    addi a2, zero, 1060
+; RV64IM-NEXT:    li a2, 1060
 ; RV64IM-NEXT:    mulw a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
@@ -122,7 +122,7 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, -723
+; RV32I-NEXT:    li a1, -723
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -136,7 +136,7 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
 ; RV32IM-NEXT:    srli a2, a1, 31
 ; RV32IM-NEXT:    srai a1, a1, 8
 ; RV32IM-NEXT:    add a1, a1, a2
-; RV32IM-NEXT:    addi a2, zero, -723
+; RV32IM-NEXT:    li a2, -723
 ; RV32IM-NEXT:    mul a1, a1, a2
 ; RV32IM-NEXT:    sub a0, a0, a1
 ; RV32IM-NEXT:    ret
@@ -146,7 +146,7 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    addi a1, zero, -723
+; RV64I-NEXT:    li a1, -723
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -161,7 +161,7 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
 ; RV64IM-NEXT:    srli a2, a1, 63
 ; RV64IM-NEXT:    srai a1, a1, 40
 ; RV64IM-NEXT:    addw a1, a1, a2
-; RV64IM-NEXT:    addi a2, zero, -723
+; RV64IM-NEXT:    li a2, -723
 ; RV64IM-NEXT:    mulw a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
@@ -236,10 +236,10 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    add a0, s1, a0
@@ -258,7 +258,7 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
 ; RV32IM-NEXT:    srli a2, a1, 31
 ; RV32IM-NEXT:    srai a1, a1, 6
 ; RV32IM-NEXT:    add a1, a1, a2
-; RV32IM-NEXT:    addi a2, zero, 95
+; RV32IM-NEXT:    li a2, 95
 ; RV32IM-NEXT:    mul a2, a1, a2
 ; RV32IM-NEXT:    sub a0, a0, a2
 ; RV32IM-NEXT:    add a0, a0, a1
@@ -271,11 +271,11 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
 ; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sext.w s0, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    addw a0, s1, a0
@@ -296,7 +296,7 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
 ; RV64IM-NEXT:    srliw a2, a1, 31
 ; RV64IM-NEXT:    sraiw a1, a1, 6
 ; RV64IM-NEXT:    addw a1, a1, a2
-; RV64IM-NEXT:    addi a2, zero, 95
+; RV64IM-NEXT:    li a2, 95
 ; RV64IM-NEXT:    mulw a2, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a2
 ; RV64IM-NEXT:    addw a0, a0, a1
@@ -352,7 +352,7 @@ define i32 @dont_fold_srem_power_of_two(i32 %x) nounwind {
 define i32 @dont_fold_srem_one(i32 %x) nounwind {
 ; CHECK-LABEL: dont_fold_srem_one:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    mv a0, zero
+; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    ret
   %1 = srem i32 %x, 1
   ret i32 %1
@@ -409,8 +409,8 @@ define i64 @dont_fold_srem_i64(i64 %x) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 98
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 98
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __moddi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -420,8 +420,8 @@ define i64 @dont_fold_srem_i64(i64 %x) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IM-NEXT:    addi a2, zero, 98
-; RV32IM-NEXT:    mv a3, zero
+; RV32IM-NEXT:    li a2, 98
+; RV32IM-NEXT:    li a3, 0
 ; RV32IM-NEXT:    call __moddi3 at plt
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
@@ -431,7 +431,7 @@ define i64 @dont_fold_srem_i64(i64 %x) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, zero, 98
+; RV64I-NEXT:    li a1, 98
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -451,7 +451,7 @@ define i64 @dont_fold_srem_i64(i64 %x) nounwind {
 ; RV64IM-NEXT:    srli a2, a1, 63
 ; RV64IM-NEXT:    srai a1, a1, 5
 ; RV64IM-NEXT:    add a1, a1, a2
-; RV64IM-NEXT:    addi a2, zero, 98
+; RV64IM-NEXT:    li a2, 98
 ; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    sub a0, a0, a1
 ; RV64IM-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 1be1937466fa..ed663acd0b09 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -122,7 +122,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    slli a0, a0, 28
 ; RV32-NEXT:    srai a0, a0, 28
-; RV32-NEXT:    addi a1, zero, 6
+; RV32-NEXT:    li a1, 6
 ; RV32-NEXT:    call __modsi3 at plt
 ; RV32-NEXT:    addi a0, a0, -1
 ; RV32-NEXT:    seqz a0, a0
@@ -136,7 +136,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    slli a0, a0, 60
 ; RV64-NEXT:    srai a0, a0, 60
-; RV64-NEXT:    addi a1, zero, 6
+; RV64-NEXT:    li a1, 6
 ; RV64-NEXT:    call __moddi3 at plt
 ; RV64-NEXT:    addi a0, a0, -1
 ; RV64-NEXT:    seqz a0, a0
@@ -153,7 +153,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
 ; RV32M-NEXT:    mulh a1, a0, a1
 ; RV32M-NEXT:    srli a2, a1, 31
 ; RV32M-NEXT:    add a1, a1, a2
-; RV32M-NEXT:    addi a2, zero, 6
+; RV32M-NEXT:    li a2, 6
 ; RV32M-NEXT:    mul a1, a1, a2
 ; RV32M-NEXT:    sub a0, a0, a1
 ; RV32M-NEXT:    addi a0, a0, -1
@@ -175,7 +175,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
 ; RV64M-NEXT:    mulh a1, a0, a1
 ; RV64M-NEXT:    srli a2, a1, 63
 ; RV64M-NEXT:    add a1, a1, a2
-; RV64M-NEXT:    addi a2, zero, 6
+; RV64M-NEXT:    li a2, 6
 ; RV64M-NEXT:    mul a1, a1, a2
 ; RV64M-NEXT:    sub a0, a0, a1
 ; RV64M-NEXT:    addi a0, a0, -1
@@ -191,7 +191,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
 ; RV32MV-NEXT:    mulh a1, a0, a1
 ; RV32MV-NEXT:    srli a2, a1, 31
 ; RV32MV-NEXT:    add a1, a1, a2
-; RV32MV-NEXT:    addi a2, zero, 6
+; RV32MV-NEXT:    li a2, 6
 ; RV32MV-NEXT:    mul a1, a1, a2
 ; RV32MV-NEXT:    sub a0, a0, a1
 ; RV32MV-NEXT:    addi a0, a0, -1
@@ -213,7 +213,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
 ; RV64MV-NEXT:    mulh a1, a0, a1
 ; RV64MV-NEXT:    srli a2, a1, 63
 ; RV64MV-NEXT:    add a1, a1, a2
-; RV64MV-NEXT:    addi a2, zero, 6
+; RV64MV-NEXT:    li a2, 6
 ; RV64MV-NEXT:    mul a1, a1, a2
 ; RV64MV-NEXT:    sub a0, a0, a1
 ; RV64MV-NEXT:    addi a0, a0, -1
@@ -338,22 +338,22 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32-NEXT:    srli a1, a2, 1
 ; RV32-NEXT:    andi a1, a1, 1
 ; RV32-NEXT:    neg a1, a1
-; RV32-NEXT:    addi a2, zero, 7
-; RV32-NEXT:    mv a3, zero
+; RV32-NEXT:    li a2, 7
+; RV32-NEXT:    li a3, 0
 ; RV32-NEXT:    call __moddi3 at plt
 ; RV32-NEXT:    mv s5, a0
 ; RV32-NEXT:    mv s6, a1
-; RV32-NEXT:    addi a2, zero, -5
-; RV32-NEXT:    addi a3, zero, -1
+; RV32-NEXT:    li a2, -5
+; RV32-NEXT:    li a3, -1
 ; RV32-NEXT:    mv a0, s3
 ; RV32-NEXT:    mv a1, s1
 ; RV32-NEXT:    call __moddi3 at plt
 ; RV32-NEXT:    mv s1, a0
 ; RV32-NEXT:    mv s3, a1
-; RV32-NEXT:    addi a2, zero, 6
+; RV32-NEXT:    li a2, 6
 ; RV32-NEXT:    mv a0, s4
 ; RV32-NEXT:    mv a1, s2
-; RV32-NEXT:    mv a3, zero
+; RV32-NEXT:    li a3, 0
 ; RV32-NEXT:    call __moddi3 at plt
 ; RV32-NEXT:    xori a2, s1, 2
 ; RV32-NEXT:    or a2, a2, s3
@@ -405,7 +405,7 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64-NEXT:    lwu a1, 8(s0)
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    or a0, a1, a0
-; RV64-NEXT:    addi s4, zero, -1
+; RV64-NEXT:    li s4, -1
 ; RV64-NEXT:    srli a1, s4, 24
 ; RV64-NEXT:    and a0, a0, a1
 ; RV64-NEXT:    ld a1, 0(s0)
@@ -418,11 +418,11 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64-NEXT:    srai a0, a0, 31
 ; RV64-NEXT:    slli a1, a1, 31
 ; RV64-NEXT:    srai s2, a1, 31
-; RV64-NEXT:    addi a1, zero, 7
-; RV64-NEXT:    addi s5, zero, 7
+; RV64-NEXT:    li a1, 7
+; RV64-NEXT:    li s5, 7
 ; RV64-NEXT:    call __moddi3 at plt
 ; RV64-NEXT:    mv s3, a0
-; RV64-NEXT:    addi a1, zero, -5
+; RV64-NEXT:    li a1, -5
 ; RV64-NEXT:    mv a0, s1
 ; RV64-NEXT:    call __moddi3 at plt
 ; RV64-NEXT:    mv s1, a0
@@ -510,22 +510,22 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32M-NEXT:    srli a1, a2, 1
 ; RV32M-NEXT:    andi a1, a1, 1
 ; RV32M-NEXT:    neg a1, a1
-; RV32M-NEXT:    addi a2, zero, 7
-; RV32M-NEXT:    mv a3, zero
+; RV32M-NEXT:    li a2, 7
+; RV32M-NEXT:    li a3, 0
 ; RV32M-NEXT:    call __moddi3 at plt
 ; RV32M-NEXT:    mv s5, a0
 ; RV32M-NEXT:    mv s6, a1
-; RV32M-NEXT:    addi a2, zero, -5
-; RV32M-NEXT:    addi a3, zero, -1
+; RV32M-NEXT:    li a2, -5
+; RV32M-NEXT:    li a3, -1
 ; RV32M-NEXT:    mv a0, s3
 ; RV32M-NEXT:    mv a1, s1
 ; RV32M-NEXT:    call __moddi3 at plt
 ; RV32M-NEXT:    mv s1, a0
 ; RV32M-NEXT:    mv s3, a1
-; RV32M-NEXT:    addi a2, zero, 6
+; RV32M-NEXT:    li a2, 6
 ; RV32M-NEXT:    mv a0, s4
 ; RV32M-NEXT:    mv a1, s2
-; RV32M-NEXT:    mv a3, zero
+; RV32M-NEXT:    li a3, 0
 ; RV32M-NEXT:    call __moddi3 at plt
 ; RV32M-NEXT:    xori a2, s1, 2
 ; RV32M-NEXT:    or a2, a2, s3
@@ -568,7 +568,7 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64M-NEXT:    lwu a2, 8(a0)
 ; RV64M-NEXT:    slli a1, a1, 32
 ; RV64M-NEXT:    or a2, a2, a1
-; RV64M-NEXT:    addi a6, zero, -1
+; RV64M-NEXT:    li a6, -1
 ; RV64M-NEXT:    srli a3, a6, 24
 ; RV64M-NEXT:    and a2, a2, a3
 ; RV64M-NEXT:    ld a3, 0(a0)
@@ -640,7 +640,7 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64M-NEXT:    neg a1, a1
 ; RV64M-NEXT:    neg a4, a2
 ; RV64M-NEXT:    neg a3, a3
-; RV64M-NEXT:    addi a5, zero, 7
+; RV64M-NEXT:    li a5, 7
 ; RV64M-NEXT:    slli a5, a5, 32
 ; RV64M-NEXT:    and a4, a4, a5
 ; RV64M-NEXT:    srli a4, a4, 32
@@ -688,26 +688,26 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32MV-NEXT:    neg s5, a2
 ; RV32MV-NEXT:    andi a1, a1, 1
 ; RV32MV-NEXT:    neg a1, a1
-; RV32MV-NEXT:    addi a2, zero, 6
-; RV32MV-NEXT:    mv a3, zero
+; RV32MV-NEXT:    li a2, 6
+; RV32MV-NEXT:    li a3, 0
 ; RV32MV-NEXT:    call __moddi3 at plt
 ; RV32MV-NEXT:    sw a1, 36(sp)
 ; RV32MV-NEXT:    sw a0, 32(sp)
-; RV32MV-NEXT:    addi a2, zero, -5
-; RV32MV-NEXT:    addi a3, zero, -1
+; RV32MV-NEXT:    li a2, -5
+; RV32MV-NEXT:    li a3, -1
 ; RV32MV-NEXT:    mv a0, s4
 ; RV32MV-NEXT:    mv a1, s5
 ; RV32MV-NEXT:    call __moddi3 at plt
 ; RV32MV-NEXT:    sw a1, 52(sp)
 ; RV32MV-NEXT:    sw a0, 48(sp)
-; RV32MV-NEXT:    addi a2, zero, 7
+; RV32MV-NEXT:    li a2, 7
 ; RV32MV-NEXT:    mv a0, s2
 ; RV32MV-NEXT:    mv a1, s3
-; RV32MV-NEXT:    mv a3, zero
+; RV32MV-NEXT:    li a3, 0
 ; RV32MV-NEXT:    call __moddi3 at plt
 ; RV32MV-NEXT:    sw a1, 44(sp)
 ; RV32MV-NEXT:    sw a0, 40(sp)
-; RV32MV-NEXT:    addi a0, zero, 85
+; RV32MV-NEXT:    li a0, 85
 ; RV32MV-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; RV32MV-NEXT:    vmv.s.x v0, a0
 ; RV32MV-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
@@ -772,7 +772,7 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64MV-NEXT:    lwu a2, 8(a0)
 ; RV64MV-NEXT:    slli a1, a1, 32
 ; RV64MV-NEXT:    or a2, a2, a1
-; RV64MV-NEXT:    addi a6, zero, -1
+; RV64MV-NEXT:    li a6, -1
 ; RV64MV-NEXT:    ld a3, 0(a0)
 ; RV64MV-NEXT:    srli a4, a6, 24
 ; RV64MV-NEXT:    and a2, a2, a4
@@ -796,7 +796,7 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64MV-NEXT:    mulh a5, a3, a5
 ; RV64MV-NEXT:    srli a1, a5, 63
 ; RV64MV-NEXT:    add a1, a5, a1
-; RV64MV-NEXT:    addi a5, zero, 6
+; RV64MV-NEXT:    li a5, 6
 ; RV64MV-NEXT:    mul a1, a1, a5
 ; RV64MV-NEXT:    sub a1, a3, a1
 ; RV64MV-NEXT:    sd a1, 32(sp)

diff  --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
index 42a7ee5c2396..64ab2bf118b4 100644
--- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
@@ -24,19 +24,19 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    lh s0, 4(a1)
 ; RV32I-NEXT:    lh a2, 0(a1)
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s4, a0
-; RV32I-NEXT:    addi a1, zero, -124
+; RV32I-NEXT:    li a1, -124
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s5, a0
-; RV32I-NEXT:    addi a1, zero, 98
+; RV32I-NEXT:    li a1, 98
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a1, zero, -1003
+; RV32I-NEXT:    li a1, -1003
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    sh a0, 6(s1)
@@ -66,7 +66,7 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a2, a5, 31
 ; RV32IM-NEXT:    srli a5, a5, 6
 ; RV32IM-NEXT:    add a2, a5, a2
-; RV32IM-NEXT:    addi a5, zero, 95
+; RV32IM-NEXT:    li a5, 95
 ; RV32IM-NEXT:    mul a2, a2, a5
 ; RV32IM-NEXT:    sub a2, a4, a2
 ; RV32IM-NEXT:    lui a4, 507375
@@ -76,7 +76,7 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a5, a4, 31
 ; RV32IM-NEXT:    srli a4, a4, 6
 ; RV32IM-NEXT:    add a4, a4, a5
-; RV32IM-NEXT:    addi a5, zero, -124
+; RV32IM-NEXT:    li a5, -124
 ; RV32IM-NEXT:    mul a4, a4, a5
 ; RV32IM-NEXT:    sub a1, a1, a4
 ; RV32IM-NEXT:    lui a4, 342392
@@ -85,7 +85,7 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a5, a4, 31
 ; RV32IM-NEXT:    srli a4, a4, 5
 ; RV32IM-NEXT:    add a4, a4, a5
-; RV32IM-NEXT:    addi a5, zero, 98
+; RV32IM-NEXT:    li a5, 98
 ; RV32IM-NEXT:    mul a4, a4, a5
 ; RV32IM-NEXT:    sub a3, a3, a4
 ; RV32IM-NEXT:    lui a4, 780943
@@ -94,7 +94,7 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a5, a4, 31
 ; RV32IM-NEXT:    srli a4, a4, 8
 ; RV32IM-NEXT:    add a4, a4, a5
-; RV32IM-NEXT:    addi a5, zero, -1003
+; RV32IM-NEXT:    li a5, -1003
 ; RV32IM-NEXT:    mul a4, a4, a5
 ; RV32IM-NEXT:    sub a4, a6, a4
 ; RV32IM-NEXT:    sh a4, 6(a0)
@@ -118,19 +118,19 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    lh s0, 8(a1)
 ; RV64I-NEXT:    lh a2, 0(a1)
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s4, a0
-; RV64I-NEXT:    addi a1, zero, -124
+; RV64I-NEXT:    li a1, -124
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s5, a0
-; RV64I-NEXT:    addi a1, zero, 98
+; RV64I-NEXT:    li a1, 98
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addi a1, zero, -1003
+; RV64I-NEXT:    li a1, -1003
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    sh a0, 6(s1)
@@ -166,7 +166,7 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a2, a5, 63
 ; RV64IM-NEXT:    srli a5, a5, 6
 ; RV64IM-NEXT:    addw a2, a5, a2
-; RV64IM-NEXT:    addi a5, zero, 95
+; RV64IM-NEXT:    li a5, 95
 ; RV64IM-NEXT:    mulw a2, a2, a5
 ; RV64IM-NEXT:    subw a1, a1, a2
 ; RV64IM-NEXT:    lui a2, 777976
@@ -181,7 +181,7 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a5, a2, 63
 ; RV64IM-NEXT:    srli a2, a2, 6
 ; RV64IM-NEXT:    addw a2, a2, a5
-; RV64IM-NEXT:    addi a5, zero, -124
+; RV64IM-NEXT:    li a5, -124
 ; RV64IM-NEXT:    mulw a2, a2, a5
 ; RV64IM-NEXT:    subw a2, a4, a2
 ; RV64IM-NEXT:    lui a4, 2675
@@ -196,7 +196,7 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srli a4, a4, 5
 ; RV64IM-NEXT:    addw a4, a4, a5
-; RV64IM-NEXT:    addi a5, zero, 98
+; RV64IM-NEXT:    li a5, 98
 ; RV64IM-NEXT:    mulw a4, a4, a5
 ; RV64IM-NEXT:    subw a3, a3, a4
 ; RV64IM-NEXT:    lui a4, 1040212
@@ -211,7 +211,7 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srli a4, a4, 7
 ; RV64IM-NEXT:    addw a4, a4, a5
-; RV64IM-NEXT:    addi a5, zero, -1003
+; RV64IM-NEXT:    li a5, -1003
 ; RV64IM-NEXT:    mulw a4, a4, a5
 ; RV64IM-NEXT:    subw a4, a6, a4
 ; RV64IM-NEXT:    sh a4, 6(a0)
@@ -239,19 +239,19 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    lh s0, 4(a1)
 ; RV32I-NEXT:    lh a2, 0(a1)
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s4, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s5, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    sh a0, 6(s1)
@@ -281,7 +281,7 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a7, a2, 31
 ; RV32IM-NEXT:    srli a2, a2, 6
 ; RV32IM-NEXT:    add a2, a2, a7
-; RV32IM-NEXT:    addi a7, zero, 95
+; RV32IM-NEXT:    li a7, 95
 ; RV32IM-NEXT:    mul a2, a2, a7
 ; RV32IM-NEXT:    sub t0, a4, a2
 ; RV32IM-NEXT:    mulh a4, a1, a5
@@ -326,19 +326,19 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    lh s0, 8(a1)
 ; RV64I-NEXT:    lh a2, 0(a1)
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s4, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s5, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    sh a0, 6(s1)
@@ -374,7 +374,7 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a3, a2, 63
 ; RV64IM-NEXT:    srli a2, a2, 6
 ; RV64IM-NEXT:    addw a2, a2, a3
-; RV64IM-NEXT:    addi a3, zero, 95
+; RV64IM-NEXT:    li a3, 95
 ; RV64IM-NEXT:    mulw a2, a2, a3
 ; RV64IM-NEXT:    subw t0, a1, a2
 ; RV64IM-NEXT:    mulh a2, a4, a5
@@ -429,35 +429,35 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    lh s4, 8(a1)
 ; RV32I-NEXT:    lh s1, 12(a1)
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s5, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s6, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s7, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s8, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    mv s9, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    mv s4, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    call __divsi3 at plt
 ; RV32I-NEXT:    add a0, s8, a0
@@ -495,7 +495,7 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a7, a2, 31
 ; RV32IM-NEXT:    srai a2, a2, 6
 ; RV32IM-NEXT:    add t0, a2, a7
-; RV32IM-NEXT:    addi a7, zero, 95
+; RV32IM-NEXT:    li a7, 95
 ; RV32IM-NEXT:    mul a2, t0, a7
 ; RV32IM-NEXT:    sub t1, a4, a2
 ; RV32IM-NEXT:    mulh a4, a1, a5
@@ -548,35 +548,35 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    lh s4, 16(a1)
 ; RV64I-NEXT:    lh s1, 24(a1)
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s5, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s4
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s6, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s7, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s8, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    mv s9, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s4
 ; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    mv s4, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __divdi3 at plt
 ; RV64I-NEXT:    addw a0, s8, a0
@@ -620,7 +620,7 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a3, a2, 63
 ; RV64IM-NEXT:    srai a2, a2, 6
 ; RV64IM-NEXT:    addw t3, a2, a3
-; RV64IM-NEXT:    addi t0, zero, 95
+; RV64IM-NEXT:    li t0, 95
 ; RV64IM-NEXT:    mulw a3, t3, t0
 ; RV64IM-NEXT:    subw t1, a1, a3
 ; RV64IM-NEXT:    mulh a3, a4, a5
@@ -686,7 +686,7 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    add a1, a3, a1
 ; RV32I-NEXT:    andi a1, a1, -8
 ; RV32I-NEXT:    sub s1, a3, a1
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    sh a0, 6(s0)
 ; RV32I-NEXT:    sh s1, 4(s0)
@@ -713,7 +713,7 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a6, a5, 31
 ; RV32IM-NEXT:    srli a5, a5, 6
 ; RV32IM-NEXT:    add a6, a5, a6
-; RV32IM-NEXT:    addi a5, zero, 95
+; RV32IM-NEXT:    li a5, 95
 ; RV32IM-NEXT:    mul a5, a6, a5
 ; RV32IM-NEXT:    sub a4, a4, a5
 ; RV32IM-NEXT:    srli a5, a1, 26
@@ -759,7 +759,7 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    add a1, a3, a1
 ; RV64I-NEXT:    andi a1, a1, -8
 ; RV64I-NEXT:    subw s1, a3, a1
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    sh a0, 6(s0)
 ; RV64I-NEXT:    sh s1, 4(s0)
@@ -792,7 +792,7 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a2, a5, 63
 ; RV64IM-NEXT:    srli a5, a5, 6
 ; RV64IM-NEXT:    addw a2, a5, a2
-; RV64IM-NEXT:    addi a5, zero, 95
+; RV64IM-NEXT:    li a5, 95
 ; RV64IM-NEXT:    mulw a2, a2, a5
 ; RV64IM-NEXT:    subw a1, a1, a2
 ; RV64IM-NEXT:    srli a2, a4, 58
@@ -830,11 +830,11 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    lh s1, 8(a1)
 ; RV32I-NEXT:    lh a2, 4(a1)
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a1, zero, 654
+; RV32I-NEXT:    li a1, 654
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s3, a0
-; RV32I-NEXT:    addi a1, zero, 23
+; RV32I-NEXT:    li a1, 23
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
@@ -866,7 +866,7 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a5, a4, 31
 ; RV32IM-NEXT:    srli a4, a4, 9
 ; RV32IM-NEXT:    add a4, a4, a5
-; RV32IM-NEXT:    addi a5, zero, 654
+; RV32IM-NEXT:    li a5, 654
 ; RV32IM-NEXT:    mul a4, a4, a5
 ; RV32IM-NEXT:    sub a3, a3, a4
 ; RV32IM-NEXT:    lui a4, 729444
@@ -876,7 +876,7 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a5, a4, 31
 ; RV32IM-NEXT:    srli a4, a4, 4
 ; RV32IM-NEXT:    add a4, a4, a5
-; RV32IM-NEXT:    addi a5, zero, 23
+; RV32IM-NEXT:    li a5, 23
 ; RV32IM-NEXT:    mul a4, a4, a5
 ; RV32IM-NEXT:    sub a1, a1, a4
 ; RV32IM-NEXT:    lui a4, 395996
@@ -907,11 +907,11 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    lh s1, 16(a1)
 ; RV64I-NEXT:    lh a2, 8(a1)
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addi a1, zero, 654
+; RV64I-NEXT:    li a1, 654
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s3, a0
-; RV64I-NEXT:    addi a1, zero, 23
+; RV64I-NEXT:    li a1, 23
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
@@ -949,7 +949,7 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srli a4, a4, 4
 ; RV64IM-NEXT:    addw a4, a4, a5
-; RV64IM-NEXT:    addi a5, zero, 23
+; RV64IM-NEXT:    li a5, 23
 ; RV64IM-NEXT:    mulw a4, a4, a5
 ; RV64IM-NEXT:    subw a1, a1, a4
 ; RV64IM-NEXT:    lui a4, 6413
@@ -964,7 +964,7 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srli a4, a4, 8
 ; RV64IM-NEXT:    addw a4, a4, a5
-; RV64IM-NEXT:    addi a5, zero, 654
+; RV64IM-NEXT:    li a5, 654
 ; RV64IM-NEXT:    mulw a4, a4, a5
 ; RV64IM-NEXT:    subw a3, a3, a4
 ; RV64IM-NEXT:    lui a4, 12375
@@ -1011,7 +1011,7 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    lui a3, 8
 ; RV32I-NEXT:    and a1, a1, a3
 ; RV32I-NEXT:    sub s3, a2, a1
-; RV32I-NEXT:    addi a1, zero, 23
+; RV32I-NEXT:    li a1, 23
 ; RV32I-NEXT:    call __modsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
 ; RV32I-NEXT:    lui a0, 1
@@ -1042,7 +1042,7 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a5, a4, 31
 ; RV32IM-NEXT:    srli a4, a4, 4
 ; RV32IM-NEXT:    add a4, a4, a5
-; RV32IM-NEXT:    addi a5, zero, 23
+; RV32IM-NEXT:    li a5, 23
 ; RV32IM-NEXT:    mul a4, a4, a5
 ; RV32IM-NEXT:    sub a3, a3, a4
 ; RV32IM-NEXT:    lui a4, 395996
@@ -1083,7 +1083,7 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    lui a3, 8
 ; RV64I-NEXT:    and a1, a1, a3
 ; RV64I-NEXT:    subw s3, a2, a1
-; RV64I-NEXT:    addi a1, zero, 23
+; RV64I-NEXT:    li a1, 23
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
 ; RV64I-NEXT:    lui a0, 1
@@ -1120,7 +1120,7 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srli a4, a4, 4
 ; RV64IM-NEXT:    addw a4, a4, a5
-; RV64IM-NEXT:    addi a5, zero, 23
+; RV64IM-NEXT:    li a5, 23
 ; RV64IM-NEXT:    mulw a4, a4, a5
 ; RV64IM-NEXT:    subw a1, a1, a4
 ; RV64IM-NEXT:    lui a4, 12375
@@ -1178,23 +1178,23 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a1)
 ; RV32I-NEXT:    lw a1, 4(a1)
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    li a2, 1
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __moddi3 at plt
 ; RV32I-NEXT:    mv s7, a0
 ; RV32I-NEXT:    mv s8, a1
-; RV32I-NEXT:    addi a2, zero, 654
+; RV32I-NEXT:    li a2, 654
 ; RV32I-NEXT:    mv a0, s6
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __moddi3 at plt
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    mv s9, a1
-; RV32I-NEXT:    addi a2, zero, 23
+; RV32I-NEXT:    li a2, 23
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    mv a1, s5
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __moddi3 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s1, a1
@@ -1202,7 +1202,7 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    addi a2, a0, 1327
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __moddi3 at plt
 ; RV32I-NEXT:    sw a1, 28(s0)
 ; RV32I-NEXT:    sw a0, 24(s0)
@@ -1249,23 +1249,23 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    lw a3, 0(a1)
 ; RV32IM-NEXT:    lw a1, 4(a1)
 ; RV32IM-NEXT:    mv s0, a0
-; RV32IM-NEXT:    addi a2, zero, 1
+; RV32IM-NEXT:    li a2, 1
 ; RV32IM-NEXT:    mv a0, a3
-; RV32IM-NEXT:    mv a3, zero
+; RV32IM-NEXT:    li a3, 0
 ; RV32IM-NEXT:    call __moddi3 at plt
 ; RV32IM-NEXT:    mv s7, a0
 ; RV32IM-NEXT:    mv s8, a1
-; RV32IM-NEXT:    addi a2, zero, 654
+; RV32IM-NEXT:    li a2, 654
 ; RV32IM-NEXT:    mv a0, s6
 ; RV32IM-NEXT:    mv a1, s1
-; RV32IM-NEXT:    mv a3, zero
+; RV32IM-NEXT:    li a3, 0
 ; RV32IM-NEXT:    call __moddi3 at plt
 ; RV32IM-NEXT:    mv s6, a0
 ; RV32IM-NEXT:    mv s9, a1
-; RV32IM-NEXT:    addi a2, zero, 23
+; RV32IM-NEXT:    li a2, 23
 ; RV32IM-NEXT:    mv a0, s4
 ; RV32IM-NEXT:    mv a1, s5
-; RV32IM-NEXT:    mv a3, zero
+; RV32IM-NEXT:    li a3, 0
 ; RV32IM-NEXT:    call __moddi3 at plt
 ; RV32IM-NEXT:    mv s4, a0
 ; RV32IM-NEXT:    mv s1, a1
@@ -1273,7 +1273,7 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    addi a2, a0, 1327
 ; RV32IM-NEXT:    mv a0, s2
 ; RV32IM-NEXT:    mv a1, s3
-; RV32IM-NEXT:    mv a3, zero
+; RV32IM-NEXT:    li a3, 0
 ; RV32IM-NEXT:    call __moddi3 at plt
 ; RV32IM-NEXT:    sw a1, 28(s0)
 ; RV32IM-NEXT:    sw a0, 24(s0)
@@ -1309,11 +1309,11 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV64I-NEXT:    ld s1, 16(a1)
 ; RV64I-NEXT:    ld a2, 8(a1)
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addi a1, zero, 654
+; RV64I-NEXT:    li a1, 654
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s3, a0
-; RV64I-NEXT:    addi a1, zero, 23
+; RV64I-NEXT:    li a1, 23
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
@@ -1351,7 +1351,7 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srai a4, a4, 4
 ; RV64IM-NEXT:    add a4, a4, a5
-; RV64IM-NEXT:    addi a5, zero, 23
+; RV64IM-NEXT:    li a5, 23
 ; RV64IM-NEXT:    mul a4, a4, a5
 ; RV64IM-NEXT:    sub a1, a1, a4
 ; RV64IM-NEXT:    lui a4, 6413
@@ -1366,7 +1366,7 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srai a4, a4, 8
 ; RV64IM-NEXT:    add a4, a4, a5
-; RV64IM-NEXT:    addi a5, zero, 654
+; RV64IM-NEXT:    li a5, 654
 ; RV64IM-NEXT:    mul a4, a4, a5
 ; RV64IM-NEXT:    sub a3, a3, a4
 ; RV64IM-NEXT:    lui a4, 12375

diff  --git a/llvm/test/CodeGen/RISCV/ssub_sat.ll b/llvm/test/CodeGen/RISCV/ssub_sat.ll
index 94acffac7a81..6662516388e2 100644
--- a/llvm/test/CodeGen/RISCV/ssub_sat.ll
+++ b/llvm/test/CodeGen/RISCV/ssub_sat.ll
@@ -111,7 +111,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV64I-NEXT:    beq a3, a1, .LBB1_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    srai a0, a0, 63
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:  .LBB1_2:
@@ -145,7 +145,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV64IZbbNOZbt-NEXT:    beq a3, a1, .LBB1_2
 ; RV64IZbbNOZbt-NEXT:  # %bb.1:
 ; RV64IZbbNOZbt-NEXT:    srai a0, a0, 63
-; RV64IZbbNOZbt-NEXT:    addi a1, zero, -1
+; RV64IZbbNOZbt-NEXT:    li a1, -1
 ; RV64IZbbNOZbt-NEXT:    slli a1, a1, 63
 ; RV64IZbbNOZbt-NEXT:    xor a0, a0, a1
 ; RV64IZbbNOZbt-NEXT:  .LBB1_2:
@@ -175,7 +175,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV64IZbbZbt-NEXT:    slt a0, a1, a0
 ; RV64IZbbZbt-NEXT:    xor a0, a2, a0
 ; RV64IZbbZbt-NEXT:    srai a2, a1, 63
-; RV64IZbbZbt-NEXT:    addi a3, zero, -1
+; RV64IZbbZbt-NEXT:    li a3, -1
 ; RV64IZbbZbt-NEXT:    slli a3, a3, 63
 ; RV64IZbbZbt-NEXT:    xor a2, a2, a3
 ; RV64IZbbZbt-NEXT:    cmov a0, a0, a2, a1
@@ -250,54 +250,54 @@ define signext i8 @func8(i8 signext %x, i8 signext %y) nounwind {
 ; RV32I-LABEL: func8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    addi a1, zero, 127
+; RV32I-NEXT:    li a1, 127
 ; RV32I-NEXT:    bge a0, a1, .LBB3_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    li a1, -128
 ; RV32I-NEXT:    bge a1, a0, .LBB3_4
 ; RV32I-NEXT:  .LBB3_2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB3_3:
-; RV32I-NEXT:    addi a0, zero, 127
-; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    li a0, 127
+; RV32I-NEXT:    li a1, -128
 ; RV32I-NEXT:    blt a1, a0, .LBB3_2
 ; RV32I-NEXT:  .LBB3_4:
-; RV32I-NEXT:    addi a0, zero, -128
+; RV32I-NEXT:    li a0, -128
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func8:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, 127
+; RV64I-NEXT:    li a1, 127
 ; RV64I-NEXT:    bge a0, a1, .LBB3_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    li a1, -128
 ; RV64I-NEXT:    bge a1, a0, .LBB3_4
 ; RV64I-NEXT:  .LBB3_2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB3_3:
-; RV64I-NEXT:    addi a0, zero, 127
-; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    li a0, 127
+; RV64I-NEXT:    li a1, -128
 ; RV64I-NEXT:    blt a1, a0, .LBB3_2
 ; RV64I-NEXT:  .LBB3_4:
-; RV64I-NEXT:    addi a0, zero, -128
+; RV64I-NEXT:    li a0, -128
 ; RV64I-NEXT:    ret
 ;
 ; RV32IZbb-LABEL: func8:
 ; RV32IZbb:       # %bb.0:
 ; RV32IZbb-NEXT:    sub a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, 127
+; RV32IZbb-NEXT:    li a1, 127
 ; RV32IZbb-NEXT:    min a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, -128
+; RV32IZbb-NEXT:    li a1, -128
 ; RV32IZbb-NEXT:    max a0, a0, a1
 ; RV32IZbb-NEXT:    ret
 ;
 ; RV64IZbb-LABEL: func8:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    sub a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, 127
+; RV64IZbb-NEXT:    li a1, 127
 ; RV64IZbb-NEXT:    min a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, -128
+; RV64IZbb-NEXT:    li a1, -128
 ; RV64IZbb-NEXT:    max a0, a0, a1
 ; RV64IZbb-NEXT:    ret
   %tmp = call i8 @llvm.ssub.sat.i8(i8 %x, i8 %y);
@@ -308,54 +308,54 @@ define signext i4 @func3(i4 signext %x, i4 signext %y) nounwind {
 ; RV32I-LABEL: func3:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    addi a1, zero, 7
+; RV32I-NEXT:    li a1, 7
 ; RV32I-NEXT:    bge a0, a1, .LBB4_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    li a1, -8
 ; RV32I-NEXT:    bge a1, a0, .LBB4_4
 ; RV32I-NEXT:  .LBB4_2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB4_3:
-; RV32I-NEXT:    addi a0, zero, 7
-; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    li a0, 7
+; RV32I-NEXT:    li a1, -8
 ; RV32I-NEXT:    blt a1, a0, .LBB4_2
 ; RV32I-NEXT:  .LBB4_4:
-; RV32I-NEXT:    addi a0, zero, -8
+; RV32I-NEXT:    li a0, -8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func3:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, 7
+; RV64I-NEXT:    li a1, 7
 ; RV64I-NEXT:    bge a0, a1, .LBB4_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    li a1, -8
 ; RV64I-NEXT:    bge a1, a0, .LBB4_4
 ; RV64I-NEXT:  .LBB4_2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB4_3:
-; RV64I-NEXT:    addi a0, zero, 7
-; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    li a0, 7
+; RV64I-NEXT:    li a1, -8
 ; RV64I-NEXT:    blt a1, a0, .LBB4_2
 ; RV64I-NEXT:  .LBB4_4:
-; RV64I-NEXT:    addi a0, zero, -8
+; RV64I-NEXT:    li a0, -8
 ; RV64I-NEXT:    ret
 ;
 ; RV32IZbb-LABEL: func3:
 ; RV32IZbb:       # %bb.0:
 ; RV32IZbb-NEXT:    sub a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, 7
+; RV32IZbb-NEXT:    li a1, 7
 ; RV32IZbb-NEXT:    min a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, -8
+; RV32IZbb-NEXT:    li a1, -8
 ; RV32IZbb-NEXT:    max a0, a0, a1
 ; RV32IZbb-NEXT:    ret
 ;
 ; RV64IZbb-LABEL: func3:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    sub a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, 7
+; RV64IZbb-NEXT:    li a1, 7
 ; RV64IZbb-NEXT:    min a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, -8
+; RV64IZbb-NEXT:    li a1, -8
 ; RV64IZbb-NEXT:    max a0, a0, a1
 ; RV64IZbb-NEXT:    ret
   %tmp = call i4 @llvm.ssub.sat.i4(i4 %x, i4 %y);

diff  --git a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
index fa93cc0e7e88..3ff18083e8d5 100644
--- a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
@@ -119,7 +119,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV64I-NEXT:    beq a3, a1, .LBB1_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    srai a0, a0, 63
-; RV64I-NEXT:    addi a1, zero, -1
+; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:  .LBB1_2:
@@ -153,7 +153,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV64IZbbNOZbt-NEXT:    beq a3, a1, .LBB1_2
 ; RV64IZbbNOZbt-NEXT:  # %bb.1:
 ; RV64IZbbNOZbt-NEXT:    srai a0, a0, 63
-; RV64IZbbNOZbt-NEXT:    addi a1, zero, -1
+; RV64IZbbNOZbt-NEXT:    li a1, -1
 ; RV64IZbbNOZbt-NEXT:    slli a1, a1, 63
 ; RV64IZbbNOZbt-NEXT:    xor a0, a0, a1
 ; RV64IZbbNOZbt-NEXT:  .LBB1_2:
@@ -183,7 +183,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV64IZbbZbt-NEXT:    slt a0, a2, a0
 ; RV64IZbbZbt-NEXT:    xor a0, a1, a0
 ; RV64IZbbZbt-NEXT:    srai a1, a2, 63
-; RV64IZbbZbt-NEXT:    addi a3, zero, -1
+; RV64IZbbZbt-NEXT:    li a3, -1
 ; RV64IZbbZbt-NEXT:    slli a3, a3, 63
 ; RV64IZbbZbt-NEXT:    xor a1, a1, a3
 ; RV64IZbbZbt-NEXT:    cmov a0, a0, a1, a2
@@ -281,19 +281,19 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV32I-NEXT:    slli a1, a1, 24
 ; RV32I-NEXT:    srai a1, a1, 24
 ; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    addi a1, zero, 127
+; RV32I-NEXT:    li a1, 127
 ; RV32I-NEXT:    bge a0, a1, .LBB3_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    li a1, -128
 ; RV32I-NEXT:    bge a1, a0, .LBB3_4
 ; RV32I-NEXT:  .LBB3_2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB3_3:
-; RV32I-NEXT:    addi a0, zero, 127
-; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    li a0, 127
+; RV32I-NEXT:    li a1, -128
 ; RV32I-NEXT:    blt a1, a0, .LBB3_2
 ; RV32I-NEXT:  .LBB3_4:
-; RV32I-NEXT:    addi a0, zero, -128
+; RV32I-NEXT:    li a0, -128
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func8:
@@ -304,19 +304,19 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64I-NEXT:    slli a1, a1, 56
 ; RV64I-NEXT:    srai a1, a1, 56
 ; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, 127
+; RV64I-NEXT:    li a1, 127
 ; RV64I-NEXT:    bge a0, a1, .LBB3_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    li a1, -128
 ; RV64I-NEXT:    bge a1, a0, .LBB3_4
 ; RV64I-NEXT:  .LBB3_2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB3_3:
-; RV64I-NEXT:    addi a0, zero, 127
-; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    li a0, 127
+; RV64I-NEXT:    li a1, -128
 ; RV64I-NEXT:    blt a1, a0, .LBB3_2
 ; RV64I-NEXT:  .LBB3_4:
-; RV64I-NEXT:    addi a0, zero, -128
+; RV64I-NEXT:    li a0, -128
 ; RV64I-NEXT:    ret
 ;
 ; RV32IZbb-LABEL: func8:
@@ -325,9 +325,9 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV32IZbb-NEXT:    mul a1, a1, a2
 ; RV32IZbb-NEXT:    sext.b a1, a1
 ; RV32IZbb-NEXT:    sub a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, 127
+; RV32IZbb-NEXT:    li a1, 127
 ; RV32IZbb-NEXT:    min a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, -128
+; RV32IZbb-NEXT:    li a1, -128
 ; RV32IZbb-NEXT:    max a0, a0, a1
 ; RV32IZbb-NEXT:    ret
 ;
@@ -337,9 +337,9 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    sext.b a1, a1
 ; RV64IZbb-NEXT:    sub a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, 127
+; RV64IZbb-NEXT:    li a1, 127
 ; RV64IZbb-NEXT:    min a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, -128
+; RV64IZbb-NEXT:    li a1, -128
 ; RV64IZbb-NEXT:    max a0, a0, a1
 ; RV64IZbb-NEXT:    ret
   %a = mul i8 %y, %z
@@ -356,19 +356,19 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV32I-NEXT:    slli a1, a1, 28
 ; RV32I-NEXT:    srai a1, a1, 28
 ; RV32I-NEXT:    sub a0, a0, a1
-; RV32I-NEXT:    addi a1, zero, 7
+; RV32I-NEXT:    li a1, 7
 ; RV32I-NEXT:    bge a0, a1, .LBB4_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    li a1, -8
 ; RV32I-NEXT:    bge a1, a0, .LBB4_4
 ; RV32I-NEXT:  .LBB4_2:
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB4_3:
-; RV32I-NEXT:    addi a0, zero, 7
-; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    li a0, 7
+; RV32I-NEXT:    li a1, -8
 ; RV32I-NEXT:    blt a1, a0, .LBB4_2
 ; RV32I-NEXT:  .LBB4_4:
-; RV32I-NEXT:    addi a0, zero, -8
+; RV32I-NEXT:    li a0, -8
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func4:
@@ -379,19 +379,19 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64I-NEXT:    slli a1, a1, 60
 ; RV64I-NEXT:    srai a1, a1, 60
 ; RV64I-NEXT:    sub a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, 7
+; RV64I-NEXT:    li a1, 7
 ; RV64I-NEXT:    bge a0, a1, .LBB4_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    li a1, -8
 ; RV64I-NEXT:    bge a1, a0, .LBB4_4
 ; RV64I-NEXT:  .LBB4_2:
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB4_3:
-; RV64I-NEXT:    addi a0, zero, 7
-; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    li a0, 7
+; RV64I-NEXT:    li a1, -8
 ; RV64I-NEXT:    blt a1, a0, .LBB4_2
 ; RV64I-NEXT:  .LBB4_4:
-; RV64I-NEXT:    addi a0, zero, -8
+; RV64I-NEXT:    li a0, -8
 ; RV64I-NEXT:    ret
 ;
 ; RV32IZbb-LABEL: func4:
@@ -402,9 +402,9 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV32IZbb-NEXT:    slli a1, a1, 28
 ; RV32IZbb-NEXT:    srai a1, a1, 28
 ; RV32IZbb-NEXT:    sub a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, 7
+; RV32IZbb-NEXT:    li a1, 7
 ; RV32IZbb-NEXT:    min a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, -8
+; RV32IZbb-NEXT:    li a1, -8
 ; RV32IZbb-NEXT:    max a0, a0, a1
 ; RV32IZbb-NEXT:    ret
 ;
@@ -416,9 +416,9 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64IZbb-NEXT:    slli a1, a1, 60
 ; RV64IZbb-NEXT:    srai a1, a1, 60
 ; RV64IZbb-NEXT:    sub a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, 7
+; RV64IZbb-NEXT:    li a1, 7
 ; RV64IZbb-NEXT:    min a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, -8
+; RV64IZbb-NEXT:    li a1, -8
 ; RV64IZbb-NEXT:    max a0, a0, a1
 ; RV64IZbb-NEXT:    ret
   %a = mul i4 %y, %z

diff  --git a/llvm/test/CodeGen/RISCV/stack-slot-size.ll b/llvm/test/CodeGen/RISCV/stack-slot-size.ll
index 7edbec2da97f..9a9cc63fcbb8 100644
--- a/llvm/test/CodeGen/RISCV/stack-slot-size.ll
+++ b/llvm/test/CodeGen/RISCV/stack-slot-size.ll
@@ -18,7 +18,7 @@ define i32 @caller129() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
 ; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a0, zero, 42
+; RV32I-NEXT:    li a0, 42
 ; RV32I-NEXT:    sw a0, 24(sp)
 ; RV32I-NEXT:    sw zero, 16(sp)
 ; RV32I-NEXT:    sw zero, 12(sp)
@@ -36,7 +36,7 @@ define i32 @caller129() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
 ; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a0, zero, 42
+; RV64I-NEXT:    li a0, 42
 ; RV64I-NEXT:    sw a0, 36(sp)
 ; RV64I-NEXT:    sd zero, 16(sp)
 ; RV64I-NEXT:    sd zero, 8(sp)
@@ -59,7 +59,7 @@ define i32 @caller160() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
 ; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a0, zero, 42
+; RV32I-NEXT:    li a0, 42
 ; RV32I-NEXT:    sw a0, 24(sp)
 ; RV32I-NEXT:    sw zero, 16(sp)
 ; RV32I-NEXT:    sw zero, 12(sp)
@@ -77,7 +77,7 @@ define i32 @caller160() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
 ; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a0, zero, 42
+; RV64I-NEXT:    li a0, 42
 ; RV64I-NEXT:    sw a0, 36(sp)
 ; RV64I-NEXT:    sd zero, 16(sp)
 ; RV64I-NEXT:    sd zero, 8(sp)
@@ -100,7 +100,7 @@ define i32 @caller161() nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
 ; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a0, zero, 42
+; RV32I-NEXT:    li a0, 42
 ; RV32I-NEXT:    sw a0, 24(sp)
 ; RV32I-NEXT:    sw zero, 20(sp)
 ; RV32I-NEXT:    sw zero, 16(sp)
@@ -119,7 +119,7 @@ define i32 @caller161() nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -48
 ; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a0, zero, 42
+; RV64I-NEXT:    li a0, 42
 ; RV64I-NEXT:    sw a0, 36(sp)
 ; RV64I-NEXT:    sd zero, 16(sp)
 ; RV64I-NEXT:    sd zero, 8(sp)

diff  --git a/llvm/test/CodeGen/RISCV/uadd_sat.ll b/llvm/test/CodeGen/RISCV/uadd_sat.ll
index 8f817b3e4972..09ac927934dd 100644
--- a/llvm/test/CodeGen/RISCV/uadd_sat.ll
+++ b/llvm/test/CodeGen/RISCV/uadd_sat.ll
@@ -15,7 +15,7 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    add a1, a0, a1
-; RV32I-NEXT:    addi a0, zero, -1
+; RV32I-NEXT:    li a0, -1
 ; RV32I-NEXT:    bltu a1, a2, .LBB0_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
@@ -26,7 +26,7 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    addw a1, a0, a1
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    bltu a1, a2, .LBB0_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
@@ -61,8 +61,8 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sltu a4, a3, a1
 ; RV32I-NEXT:  .LBB1_2:
-; RV32I-NEXT:    addi a0, zero, -1
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a0, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    bnez a4, .LBB1_4
 ; RV32I-NEXT:  # %bb.3:
 ; RV32I-NEXT:    mv a0, a2
@@ -74,7 +74,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    add a1, a0, a1
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    bltu a1, a2, .LBB1_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
@@ -91,8 +91,8 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV32IZbb-NEXT:  # %bb.1:
 ; RV32IZbb-NEXT:    sltu a4, a3, a1
 ; RV32IZbb-NEXT:  .LBB1_2:
-; RV32IZbb-NEXT:    addi a0, zero, -1
-; RV32IZbb-NEXT:    addi a1, zero, -1
+; RV32IZbb-NEXT:    li a0, -1
+; RV32IZbb-NEXT:    li a1, -1
 ; RV32IZbb-NEXT:    bnez a4, .LBB1_4
 ; RV32IZbb-NEXT:  # %bb.3:
 ; RV32IZbb-NEXT:    mv a0, a2
@@ -156,34 +156,34 @@ define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind {
 ; RV32I-LABEL: func8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    addi a1, zero, 255
+; RV32I-NEXT:    li a1, 255
 ; RV32I-NEXT:    bltu a0, a1, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    addi a0, zero, 255
+; RV32I-NEXT:    li a0, 255
 ; RV32I-NEXT:  .LBB3_2:
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func8:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, 255
+; RV64I-NEXT:    li a1, 255
 ; RV64I-NEXT:    bltu a0, a1, .LBB3_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    addi a0, zero, 255
+; RV64I-NEXT:    li a0, 255
 ; RV64I-NEXT:  .LBB3_2:
 ; RV64I-NEXT:    ret
 ;
 ; RV32IZbb-LABEL: func8:
 ; RV32IZbb:       # %bb.0:
 ; RV32IZbb-NEXT:    add a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, 255
+; RV32IZbb-NEXT:    li a1, 255
 ; RV32IZbb-NEXT:    minu a0, a0, a1
 ; RV32IZbb-NEXT:    ret
 ;
 ; RV64IZbb-LABEL: func8:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    add a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, 255
+; RV64IZbb-NEXT:    li a1, 255
 ; RV64IZbb-NEXT:    minu a0, a0, a1
 ; RV64IZbb-NEXT:    ret
   %tmp = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %y);
@@ -194,34 +194,34 @@ define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind {
 ; RV32I-LABEL: func3:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    addi a1, zero, 15
+; RV32I-NEXT:    li a1, 15
 ; RV32I-NEXT:    bltu a0, a1, .LBB4_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    addi a0, zero, 15
+; RV32I-NEXT:    li a0, 15
 ; RV32I-NEXT:  .LBB4_2:
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func3:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, 15
+; RV64I-NEXT:    li a1, 15
 ; RV64I-NEXT:    bltu a0, a1, .LBB4_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    addi a0, zero, 15
+; RV64I-NEXT:    li a0, 15
 ; RV64I-NEXT:  .LBB4_2:
 ; RV64I-NEXT:    ret
 ;
 ; RV32IZbb-LABEL: func3:
 ; RV32IZbb:       # %bb.0:
 ; RV32IZbb-NEXT:    add a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, 15
+; RV32IZbb-NEXT:    li a1, 15
 ; RV32IZbb-NEXT:    minu a0, a0, a1
 ; RV32IZbb-NEXT:    ret
 ;
 ; RV64IZbb-LABEL: func3:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    add a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, 15
+; RV64IZbb-NEXT:    li a1, 15
 ; RV64IZbb-NEXT:    minu a0, a0, a1
 ; RV64IZbb-NEXT:    ret
   %tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %y);

diff  --git a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
index 251e53adfe8f..3655564f16dc 100644
--- a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
@@ -16,7 +16,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ; RV32I-NEXT:    mv a3, a0
 ; RV32I-NEXT:    mul a0, a1, a2
 ; RV32I-NEXT:    add a1, a3, a0
-; RV32I-NEXT:    addi a0, zero, -1
+; RV32I-NEXT:    li a0, -1
 ; RV32I-NEXT:    bltu a1, a3, .LBB0_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
@@ -28,7 +28,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ; RV64I-NEXT:    mulw a1, a1, a2
 ; RV64I-NEXT:    addw a1, a0, a1
 ; RV64I-NEXT:    sext.w a2, a0
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    bltu a1, a2, .LBB0_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
@@ -67,8 +67,8 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sltu a4, a3, a1
 ; RV32I-NEXT:  .LBB1_2:
-; RV32I-NEXT:    addi a0, zero, -1
-; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    li a0, -1
+; RV32I-NEXT:    li a1, -1
 ; RV32I-NEXT:    bnez a4, .LBB1_4
 ; RV32I-NEXT:  # %bb.3:
 ; RV32I-NEXT:    mv a0, a2
@@ -80,7 +80,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    add a2, a0, a2
-; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    li a0, -1
 ; RV64I-NEXT:    bltu a2, a1, .LBB1_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a2
@@ -97,8 +97,8 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV32IZbb-NEXT:  # %bb.1:
 ; RV32IZbb-NEXT:    sltu a4, a3, a1
 ; RV32IZbb-NEXT:  .LBB1_2:
-; RV32IZbb-NEXT:    addi a0, zero, -1
-; RV32IZbb-NEXT:    addi a1, zero, -1
+; RV32IZbb-NEXT:    li a0, -1
+; RV32IZbb-NEXT:    li a1, -1
 ; RV32IZbb-NEXT:    bnez a4, .LBB1_4
 ; RV32IZbb-NEXT:  # %bb.3:
 ; RV32IZbb-NEXT:    mv a0, a2
@@ -179,10 +179,10 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV32I-NEXT:    mul a1, a1, a2
 ; RV32I-NEXT:    andi a1, a1, 255
 ; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    addi a1, zero, 255
+; RV32I-NEXT:    li a1, 255
 ; RV32I-NEXT:    bltu a0, a1, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    addi a0, zero, 255
+; RV32I-NEXT:    li a0, 255
 ; RV32I-NEXT:  .LBB3_2:
 ; RV32I-NEXT:    ret
 ;
@@ -192,10 +192,10 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    andi a1, a1, 255
 ; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, 255
+; RV64I-NEXT:    li a1, 255
 ; RV64I-NEXT:    bltu a0, a1, .LBB3_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    addi a0, zero, 255
+; RV64I-NEXT:    li a0, 255
 ; RV64I-NEXT:  .LBB3_2:
 ; RV64I-NEXT:    ret
 ;
@@ -205,7 +205,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV32IZbb-NEXT:    mul a1, a1, a2
 ; RV32IZbb-NEXT:    andi a1, a1, 255
 ; RV32IZbb-NEXT:    add a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, 255
+; RV32IZbb-NEXT:    li a1, 255
 ; RV32IZbb-NEXT:    minu a0, a0, a1
 ; RV32IZbb-NEXT:    ret
 ;
@@ -215,7 +215,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    andi a1, a1, 255
 ; RV64IZbb-NEXT:    add a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, 255
+; RV64IZbb-NEXT:    li a1, 255
 ; RV64IZbb-NEXT:    minu a0, a0, a1
 ; RV64IZbb-NEXT:    ret
   %a = mul i8 %y, %z
@@ -230,10 +230,10 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV32I-NEXT:    mul a1, a1, a2
 ; RV32I-NEXT:    andi a1, a1, 15
 ; RV32I-NEXT:    add a0, a0, a1
-; RV32I-NEXT:    addi a1, zero, 15
+; RV32I-NEXT:    li a1, 15
 ; RV32I-NEXT:    bltu a0, a1, .LBB4_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    addi a0, zero, 15
+; RV32I-NEXT:    li a0, 15
 ; RV32I-NEXT:  .LBB4_2:
 ; RV32I-NEXT:    ret
 ;
@@ -243,10 +243,10 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    andi a1, a1, 15
 ; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    addi a1, zero, 15
+; RV64I-NEXT:    li a1, 15
 ; RV64I-NEXT:    bltu a0, a1, .LBB4_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    addi a0, zero, 15
+; RV64I-NEXT:    li a0, 15
 ; RV64I-NEXT:  .LBB4_2:
 ; RV64I-NEXT:    ret
 ;
@@ -256,7 +256,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV32IZbb-NEXT:    mul a1, a1, a2
 ; RV32IZbb-NEXT:    andi a1, a1, 15
 ; RV32IZbb-NEXT:    add a0, a0, a1
-; RV32IZbb-NEXT:    addi a1, zero, 15
+; RV32IZbb-NEXT:    li a1, 15
 ; RV32IZbb-NEXT:    minu a0, a0, a1
 ; RV32IZbb-NEXT:    ret
 ;
@@ -266,7 +266,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    andi a1, a1, 15
 ; RV64IZbb-NEXT:    add a0, a0, a1
-; RV64IZbb-NEXT:    addi a1, zero, 15
+; RV64IZbb-NEXT:    li a1, 15
 ; RV64IZbb-NEXT:    minu a0, a0, a1
 ; RV64IZbb-NEXT:    ret
   %a = mul i4 %y, %z

diff  --git a/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll b/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
index 93cb61b24d5e..34b235bc353c 100644
--- a/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
+++ b/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
@@ -1039,7 +1039,7 @@ define i32 @out_constant_varx_42(i32 %x, i32 %y, i32 %mask) {
 ; RV32ZBB-LABEL: out_constant_varx_42:
 ; RV32ZBB:       # %bb.0:
 ; RV32ZBB-NEXT:    and a0, a2, a0
-; RV32ZBB-NEXT:    addi a1, zero, 42
+; RV32ZBB-NEXT:    li a1, 42
 ; RV32ZBB-NEXT:    andn a1, a1, a2
 ; RV32ZBB-NEXT:    or a0, a0, a1
 ; RV32ZBB-NEXT:    ret
@@ -1047,7 +1047,7 @@ define i32 @out_constant_varx_42(i32 %x, i32 %y, i32 %mask) {
 ; RV64ZBB-LABEL: out_constant_varx_42:
 ; RV64ZBB:       # %bb.0:
 ; RV64ZBB-NEXT:    and a0, a2, a0
-; RV64ZBB-NEXT:    addi a1, zero, 42
+; RV64ZBB-NEXT:    li a1, 42
 ; RV64ZBB-NEXT:    andn a1, a1, a2
 ; RV64ZBB-NEXT:    or a0, a0, a1
 ; RV64ZBB-NEXT:    ret
@@ -1375,7 +1375,7 @@ define i32 @out_constant_42_vary_invmask(i32 %x, i32 %y, i32 %mask) {
 ;
 ; RV32ZBB-LABEL: out_constant_42_vary_invmask:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, zero, 42
+; RV32ZBB-NEXT:    li a0, 42
 ; RV32ZBB-NEXT:    andn a0, a0, a2
 ; RV32ZBB-NEXT:    and a1, a2, a1
 ; RV32ZBB-NEXT:    or a0, a0, a1
@@ -1383,7 +1383,7 @@ define i32 @out_constant_42_vary_invmask(i32 %x, i32 %y, i32 %mask) {
 ;
 ; RV64ZBB-LABEL: out_constant_42_vary_invmask:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, zero, 42
+; RV64ZBB-NEXT:    li a0, 42
 ; RV64ZBB-NEXT:    andn a0, a0, a2
 ; RV64ZBB-NEXT:    and a1, a2, a1
 ; RV64ZBB-NEXT:    or a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/urem-lkk.ll b/llvm/test/CodeGen/RISCV/urem-lkk.ll
index 3f5a0c4b5dd0..5824db068590 100644
--- a/llvm/test/CodeGen/RISCV/urem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-lkk.ll
@@ -13,7 +13,7 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -28,7 +28,7 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind {
 ; RV32IM-NEXT:    srli a2, a2, 1
 ; RV32IM-NEXT:    add a1, a2, a1
 ; RV32IM-NEXT:    srli a1, a1, 6
-; RV32IM-NEXT:    addi a2, zero, 95
+; RV32IM-NEXT:    li a2, 95
 ; RV32IM-NEXT:    mul a1, a1, a2
 ; RV32IM-NEXT:    sub a0, a0, a1
 ; RV32IM-NEXT:    ret
@@ -39,7 +39,7 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -57,7 +57,7 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind {
 ; RV64IM-NEXT:    srliw a2, a2, 1
 ; RV64IM-NEXT:    add a1, a2, a1
 ; RV64IM-NEXT:    srli a1, a1, 6
-; RV64IM-NEXT:    addi a2, zero, 95
+; RV64IM-NEXT:    li a2, 95
 ; RV64IM-NEXT:    mulw a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
@@ -71,7 +71,7 @@ define i32 @fold_urem_positive_even(i32 %x) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a1, zero, 1060
+; RV32I-NEXT:    li a1, 1060
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -83,7 +83,7 @@ define i32 @fold_urem_positive_even(i32 %x) nounwind {
 ; RV32IM-NEXT:    addi a1, a1, -61
 ; RV32IM-NEXT:    mulhu a1, a0, a1
 ; RV32IM-NEXT:    srli a1, a1, 10
-; RV32IM-NEXT:    addi a2, zero, 1060
+; RV32IM-NEXT:    li a2, 1060
 ; RV32IM-NEXT:    mul a1, a1, a2
 ; RV32IM-NEXT:    sub a0, a0, a1
 ; RV32IM-NEXT:    ret
@@ -94,7 +94,7 @@ define i32 @fold_urem_positive_even(i32 %x) nounwind {
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
-; RV64I-NEXT:    addi a1, zero, 1060
+; RV64I-NEXT:    li a1, 1060
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -108,7 +108,7 @@ define i32 @fold_urem_positive_even(i32 %x) nounwind {
 ; RV64IM-NEXT:    slli a2, a2, 32
 ; RV64IM-NEXT:    mulhu a1, a1, a2
 ; RV64IM-NEXT:    srli a1, a1, 42
-; RV64IM-NEXT:    addi a2, zero, 1060
+; RV64IM-NEXT:    li a2, 1060
 ; RV64IM-NEXT:    mulw a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
@@ -126,10 +126,10 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    add a0, s1, a0
@@ -148,7 +148,7 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
 ; RV32IM-NEXT:    srli a2, a2, 1
 ; RV32IM-NEXT:    add a1, a2, a1
 ; RV32IM-NEXT:    srli a1, a1, 6
-; RV32IM-NEXT:    addi a2, zero, 95
+; RV32IM-NEXT:    li a2, 95
 ; RV32IM-NEXT:    mul a2, a1, a2
 ; RV32IM-NEXT:    sub a0, a0, a2
 ; RV32IM-NEXT:    add a0, a0, a1
@@ -162,11 +162,11 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli s0, a0, 32
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    add a0, s1, a0
@@ -188,7 +188,7 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
 ; RV64IM-NEXT:    srliw a2, a2, 1
 ; RV64IM-NEXT:    add a1, a2, a1
 ; RV64IM-NEXT:    srli a1, a1, 6
-; RV64IM-NEXT:    addi a2, zero, 95
+; RV64IM-NEXT:    li a2, 95
 ; RV64IM-NEXT:    mulw a2, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a2
 ; RV64IM-NEXT:    addw a0, a0, a1
@@ -213,7 +213,7 @@ define i32 @dont_fold_urem_power_of_two(i32 %x) nounwind {
 define i32 @dont_fold_urem_one(i32 %x) nounwind {
 ; CHECK-LABEL: dont_fold_urem_one:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    mv a0, zero
+; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    ret
   %1 = urem i32 %x, 1
   ret i32 %1
@@ -234,8 +234,8 @@ define i64 @dont_fold_urem_i64(i64 %x) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    addi a2, zero, 98
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a2, 98
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __umoddi3 at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -245,8 +245,8 @@ define i64 @dont_fold_urem_i64(i64 %x) nounwind {
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    addi sp, sp, -16
 ; RV32IM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IM-NEXT:    addi a2, zero, 98
-; RV32IM-NEXT:    mv a3, zero
+; RV32IM-NEXT:    li a2, 98
+; RV32IM-NEXT:    li a3, 0
 ; RV32IM-NEXT:    call __umoddi3 at plt
 ; RV32IM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IM-NEXT:    addi sp, sp, 16
@@ -256,7 +256,7 @@ define i64 @dont_fold_urem_i64(i64 %x) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    addi a1, zero, 98
+; RV64I-NEXT:    li a1, 98
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -275,7 +275,7 @@ define i64 @dont_fold_urem_i64(i64 %x) nounwind {
 ; RV64IM-NEXT:    addi a2, a2, 1505
 ; RV64IM-NEXT:    mulhu a1, a1, a2
 ; RV64IM-NEXT:    srli a1, a1, 4
-; RV64IM-NEXT:    addi a2, zero, 98
+; RV64IM-NEXT:    li a2, 98
 ; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    sub a0, a0, a1
 ; RV64IM-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index 786ca0bd2cc1..560af257d422 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -213,7 +213,7 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    neg a0, a0
 ; RV32-NEXT:    andi a0, a0, 15
-; RV32-NEXT:    addi a1, zero, 3
+; RV32-NEXT:    li a1, 3
 ; RV32-NEXT:    sltu a0, a1, a0
 ; RV32-NEXT:    ret
 ;
@@ -223,7 +223,7 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 ; RV64-NEXT:    add a0, a1, a0
 ; RV64-NEXT:    neg a0, a0
 ; RV64-NEXT:    andi a0, a0, 15
-; RV64-NEXT:    addi a1, zero, 3
+; RV64-NEXT:    li a1, 3
 ; RV64-NEXT:    sltu a0, a1, a0
 ; RV64-NEXT:    ret
 ;
@@ -233,7 +233,7 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 ; RV32M-NEXT:    add a0, a1, a0
 ; RV32M-NEXT:    neg a0, a0
 ; RV32M-NEXT:    andi a0, a0, 15
-; RV32M-NEXT:    addi a1, zero, 3
+; RV32M-NEXT:    li a1, 3
 ; RV32M-NEXT:    sltu a0, a1, a0
 ; RV32M-NEXT:    ret
 ;
@@ -243,7 +243,7 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 ; RV64M-NEXT:    add a0, a1, a0
 ; RV64M-NEXT:    neg a0, a0
 ; RV64M-NEXT:    andi a0, a0, 15
-; RV64M-NEXT:    addi a1, zero, 3
+; RV64M-NEXT:    li a1, 3
 ; RV64M-NEXT:    sltu a0, a1, a0
 ; RV64M-NEXT:    ret
 ;
@@ -253,7 +253,7 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 ; RV32MV-NEXT:    add a0, a1, a0
 ; RV32MV-NEXT:    neg a0, a0
 ; RV32MV-NEXT:    andi a0, a0, 15
-; RV32MV-NEXT:    addi a1, zero, 3
+; RV32MV-NEXT:    li a1, 3
 ; RV32MV-NEXT:    sltu a0, a1, a0
 ; RV32MV-NEXT:    ret
 ;
@@ -263,7 +263,7 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 ; RV64MV-NEXT:    add a0, a1, a0
 ; RV64MV-NEXT:    neg a0, a0
 ; RV64MV-NEXT:    andi a0, a0, 15
-; RV64MV-NEXT:    addi a1, zero, 3
+; RV64MV-NEXT:    li a1, 3
 ; RV64MV-NEXT:    sltu a0, a1, a0
 ; RV64MV-NEXT:    ret
   %urem = urem i4 %X, 5
@@ -276,10 +276,10 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    addi a1, zero, 307
+; RV32-NEXT:    li a1, 307
 ; RV32-NEXT:    call __mulsi3 at plt
 ; RV32-NEXT:    andi a0, a0, 511
-; RV32-NEXT:    addi a1, zero, 1
+; RV32-NEXT:    li a1, 1
 ; RV32-NEXT:    sltu a0, a1, a0
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
@@ -289,10 +289,10 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    addi sp, sp, -16
 ; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    addi a1, zero, 307
+; RV64-NEXT:    li a1, 307
 ; RV64-NEXT:    call __muldi3 at plt
 ; RV64-NEXT:    andi a0, a0, 511
-; RV64-NEXT:    addi a1, zero, 1
+; RV64-NEXT:    li a1, 1
 ; RV64-NEXT:    sltu a0, a1, a0
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
@@ -300,37 +300,37 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ;
 ; RV32M-LABEL: test_urem_negative_odd:
 ; RV32M:       # %bb.0:
-; RV32M-NEXT:    addi a1, zero, 307
+; RV32M-NEXT:    li a1, 307
 ; RV32M-NEXT:    mul a0, a0, a1
 ; RV32M-NEXT:    andi a0, a0, 511
-; RV32M-NEXT:    addi a1, zero, 1
+; RV32M-NEXT:    li a1, 1
 ; RV32M-NEXT:    sltu a0, a1, a0
 ; RV32M-NEXT:    ret
 ;
 ; RV64M-LABEL: test_urem_negative_odd:
 ; RV64M:       # %bb.0:
-; RV64M-NEXT:    addi a1, zero, 307
+; RV64M-NEXT:    li a1, 307
 ; RV64M-NEXT:    mul a0, a0, a1
 ; RV64M-NEXT:    andi a0, a0, 511
-; RV64M-NEXT:    addi a1, zero, 1
+; RV64M-NEXT:    li a1, 1
 ; RV64M-NEXT:    sltu a0, a1, a0
 ; RV64M-NEXT:    ret
 ;
 ; RV32MV-LABEL: test_urem_negative_odd:
 ; RV32MV:       # %bb.0:
-; RV32MV-NEXT:    addi a1, zero, 307
+; RV32MV-NEXT:    li a1, 307
 ; RV32MV-NEXT:    mul a0, a0, a1
 ; RV32MV-NEXT:    andi a0, a0, 511
-; RV32MV-NEXT:    addi a1, zero, 1
+; RV32MV-NEXT:    li a1, 1
 ; RV32MV-NEXT:    sltu a0, a1, a0
 ; RV32MV-NEXT:    ret
 ;
 ; RV64MV-LABEL: test_urem_negative_odd:
 ; RV64MV:       # %bb.0:
-; RV64MV-NEXT:    addi a1, zero, 307
+; RV64MV-NEXT:    li a1, 307
 ; RV64MV-NEXT:    mul a0, a0, a1
 ; RV64MV-NEXT:    andi a0, a0, 511
-; RV64MV-NEXT:    addi a1, zero, 1
+; RV64MV-NEXT:    li a1, 1
 ; RV64MV-NEXT:    sltu a0, a1, a0
 ; RV64MV-NEXT:    ret
   %urem = urem i9 %X, -5
@@ -355,28 +355,28 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32-NEXT:    or s1, a2, a0
 ; RV32-NEXT:    srli s2, a1, 11
 ; RV32-NEXT:    andi a0, a1, 2047
-; RV32-NEXT:    addi a1, zero, 683
+; RV32-NEXT:    li a1, 683
 ; RV32-NEXT:    call __mulsi3 at plt
 ; RV32-NEXT:    slli a1, a0, 10
 ; RV32-NEXT:    andi a0, a0, 2046
 ; RV32-NEXT:    srli a0, a0, 1
 ; RV32-NEXT:    or a0, a0, a1
 ; RV32-NEXT:    andi a0, a0, 2047
-; RV32-NEXT:    addi a1, zero, 341
+; RV32-NEXT:    li a1, 341
 ; RV32-NEXT:    sltu s3, a1, a0
-; RV32-NEXT:    addi a1, zero, 819
+; RV32-NEXT:    li a1, 819
 ; RV32-NEXT:    mv a0, s1
 ; RV32-NEXT:    call __mulsi3 at plt
 ; RV32-NEXT:    addi a0, a0, -1638
 ; RV32-NEXT:    andi a0, a0, 2047
-; RV32-NEXT:    addi a1, zero, 1
+; RV32-NEXT:    li a1, 1
 ; RV32-NEXT:    sltu s1, a1, a0
-; RV32-NEXT:    addi a1, zero, 1463
+; RV32-NEXT:    li a1, 1463
 ; RV32-NEXT:    mv a0, s2
 ; RV32-NEXT:    call __mulsi3 at plt
 ; RV32-NEXT:    addi a0, a0, -1463
 ; RV32-NEXT:    andi a0, a0, 2047
-; RV32-NEXT:    addi a1, zero, 292
+; RV32-NEXT:    li a1, 292
 ; RV32-NEXT:    sltu a0, a1, a0
 ; RV32-NEXT:    neg a1, s3
 ; RV32-NEXT:    neg a0, a0
@@ -415,28 +415,28 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64-NEXT:    srli s2, a0, 11
 ; RV64-NEXT:    srli s1, a0, 22
 ; RV64-NEXT:    andi a0, a0, 2047
-; RV64-NEXT:    addi a1, zero, 683
+; RV64-NEXT:    li a1, 683
 ; RV64-NEXT:    call __muldi3 at plt
 ; RV64-NEXT:    slli a1, a0, 10
 ; RV64-NEXT:    andi a0, a0, 2046
 ; RV64-NEXT:    srli a0, a0, 1
 ; RV64-NEXT:    or a0, a0, a1
 ; RV64-NEXT:    andi a0, a0, 2047
-; RV64-NEXT:    addi a1, zero, 341
+; RV64-NEXT:    li a1, 341
 ; RV64-NEXT:    sltu s3, a1, a0
-; RV64-NEXT:    addi a1, zero, 819
+; RV64-NEXT:    li a1, 819
 ; RV64-NEXT:    mv a0, s1
 ; RV64-NEXT:    call __muldi3 at plt
 ; RV64-NEXT:    addi a0, a0, -1638
 ; RV64-NEXT:    andi a0, a0, 2047
-; RV64-NEXT:    addi a1, zero, 1
+; RV64-NEXT:    li a1, 1
 ; RV64-NEXT:    sltu s1, a1, a0
-; RV64-NEXT:    addi a1, zero, 1463
+; RV64-NEXT:    li a1, 1463
 ; RV64-NEXT:    mv a0, s2
 ; RV64-NEXT:    call __muldi3 at plt
 ; RV64-NEXT:    addi a0, a0, -1463
 ; RV64-NEXT:    andi a0, a0, 2047
-; RV64-NEXT:    addi a1, zero, 292
+; RV64-NEXT:    li a1, 292
 ; RV64-NEXT:    sltu a0, a1, a0
 ; RV64-NEXT:    neg a1, s3
 ; RV64-NEXT:    neg a0, a0
@@ -447,7 +447,7 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64-NEXT:    slli a1, s1, 22
 ; RV64-NEXT:    sub a0, a0, a1
 ; RV64-NEXT:    sw a0, 0(s0)
-; RV64-NEXT:    addi a1, zero, -1
+; RV64-NEXT:    li a1, -1
 ; RV64-NEXT:    srli a1, a1, 31
 ; RV64-NEXT:    and a0, a0, a1
 ; RV64-NEXT:    srli a0, a0, 32
@@ -469,26 +469,26 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32M-NEXT:    or a1, a3, a1
 ; RV32M-NEXT:    srli a3, a2, 11
 ; RV32M-NEXT:    andi a2, a2, 2047
-; RV32M-NEXT:    addi a4, zero, 683
+; RV32M-NEXT:    li a4, 683
 ; RV32M-NEXT:    mul a2, a2, a4
 ; RV32M-NEXT:    slli a4, a2, 10
 ; RV32M-NEXT:    andi a2, a2, 2046
 ; RV32M-NEXT:    srli a2, a2, 1
 ; RV32M-NEXT:    or a2, a2, a4
 ; RV32M-NEXT:    andi a2, a2, 2047
-; RV32M-NEXT:    addi a4, zero, 341
+; RV32M-NEXT:    li a4, 341
 ; RV32M-NEXT:    sltu a2, a4, a2
-; RV32M-NEXT:    addi a4, zero, 819
+; RV32M-NEXT:    li a4, 819
 ; RV32M-NEXT:    mul a1, a1, a4
 ; RV32M-NEXT:    addi a1, a1, -1638
 ; RV32M-NEXT:    andi a1, a1, 2047
-; RV32M-NEXT:    addi a4, zero, 1
+; RV32M-NEXT:    li a4, 1
 ; RV32M-NEXT:    sltu a1, a4, a1
-; RV32M-NEXT:    addi a4, zero, 1463
+; RV32M-NEXT:    li a4, 1463
 ; RV32M-NEXT:    mul a3, a3, a4
 ; RV32M-NEXT:    addi a3, a3, -1463
 ; RV32M-NEXT:    andi a3, a3, 2047
-; RV32M-NEXT:    addi a4, zero, 292
+; RV32M-NEXT:    li a4, 292
 ; RV32M-NEXT:    sltu a3, a4, a3
 ; RV32M-NEXT:    neg a2, a2
 ; RV32M-NEXT:    neg a3, a3
@@ -514,26 +514,26 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64M-NEXT:    srli a2, a1, 11
 ; RV64M-NEXT:    srli a3, a1, 22
 ; RV64M-NEXT:    andi a1, a1, 2047
-; RV64M-NEXT:    addi a4, zero, 683
+; RV64M-NEXT:    li a4, 683
 ; RV64M-NEXT:    mul a1, a1, a4
 ; RV64M-NEXT:    slli a4, a1, 10
 ; RV64M-NEXT:    andi a1, a1, 2046
 ; RV64M-NEXT:    srli a1, a1, 1
 ; RV64M-NEXT:    or a1, a1, a4
 ; RV64M-NEXT:    andi a1, a1, 2047
-; RV64M-NEXT:    addi a4, zero, 341
+; RV64M-NEXT:    li a4, 341
 ; RV64M-NEXT:    sltu a1, a4, a1
-; RV64M-NEXT:    addi a4, zero, 819
+; RV64M-NEXT:    li a4, 819
 ; RV64M-NEXT:    mul a3, a3, a4
 ; RV64M-NEXT:    addi a3, a3, -1638
 ; RV64M-NEXT:    andi a3, a3, 2047
-; RV64M-NEXT:    addi a4, zero, 1
+; RV64M-NEXT:    li a4, 1
 ; RV64M-NEXT:    sltu a3, a4, a3
-; RV64M-NEXT:    addi a4, zero, 1463
+; RV64M-NEXT:    li a4, 1463
 ; RV64M-NEXT:    mul a2, a2, a4
 ; RV64M-NEXT:    addi a2, a2, -1463
 ; RV64M-NEXT:    andi a2, a2, 2047
-; RV64M-NEXT:    addi a4, zero, 292
+; RV64M-NEXT:    li a4, 292
 ; RV64M-NEXT:    sltu a2, a4, a2
 ; RV64M-NEXT:    neg a1, a1
 ; RV64M-NEXT:    neg a2, a2
@@ -544,7 +544,7 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64M-NEXT:    slli a2, a3, 22
 ; RV64M-NEXT:    sub a1, a1, a2
 ; RV64M-NEXT:    sw a1, 0(a0)
-; RV64M-NEXT:    addi a2, zero, -1
+; RV64M-NEXT:    li a2, -1
 ; RV64M-NEXT:    srli a2, a2, 31
 ; RV64M-NEXT:    and a1, a1, a2
 ; RV64M-NEXT:    srli a1, a1, 32
@@ -570,7 +570,7 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32MV-NEXT:    addi a1, sp, 8
 ; RV32MV-NEXT:    vle16.v v8, (a1)
 ; RV32MV-NEXT:    vmv.v.i v9, 10
-; RV32MV-NEXT:    addi a1, zero, 9
+; RV32MV-NEXT:    li a1, 9
 ; RV32MV-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
 ; RV32MV-NEXT:    vmv.s.x v9, a1
 ; RV32MV-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
@@ -583,11 +583,11 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32MV-NEXT:    vadd.vv v10, v8, v8
 ; RV32MV-NEXT:    vsll.vv v9, v10, v9
 ; RV32MV-NEXT:    vmv.v.i v10, 0
-; RV32MV-NEXT:    addi a1, zero, 1
+; RV32MV-NEXT:    li a1, 1
 ; RV32MV-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
 ; RV32MV-NEXT:    vmv1r.v v11, v10
 ; RV32MV-NEXT:    vmv.s.x v11, a1
-; RV32MV-NEXT:    addi a1, zero, 2047
+; RV32MV-NEXT:    li a1, 2047
 ; RV32MV-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV32MV-NEXT:    vand.vx v8, v8, a1
 ; RV32MV-NEXT:    lui a2, %hi(.LCPI4_1)
@@ -635,7 +635,7 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64MV-NEXT:    addi a1, sp, 8
 ; RV64MV-NEXT:    vle16.v v8, (a1)
 ; RV64MV-NEXT:    vmv.v.i v9, 10
-; RV64MV-NEXT:    addi a1, zero, 9
+; RV64MV-NEXT:    li a1, 9
 ; RV64MV-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
 ; RV64MV-NEXT:    vmv.s.x v9, a1
 ; RV64MV-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
@@ -648,11 +648,11 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64MV-NEXT:    vadd.vv v10, v8, v8
 ; RV64MV-NEXT:    vsll.vv v9, v10, v9
 ; RV64MV-NEXT:    vmv.v.i v10, 0
-; RV64MV-NEXT:    addi a1, zero, 1
+; RV64MV-NEXT:    li a1, 1
 ; RV64MV-NEXT:    vsetvli zero, zero, e16, mf2, tu, mu
 ; RV64MV-NEXT:    vmv1r.v v11, v10
 ; RV64MV-NEXT:    vmv.s.x v11, a1
-; RV64MV-NEXT:    addi a1, zero, 2047
+; RV64MV-NEXT:    li a1, 2047
 ; RV64MV-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; RV64MV-NEXT:    vand.vx v8, v8, a1
 ; RV64MV-NEXT:    lui a2, %hi(.LCPI4_1)
@@ -676,7 +676,7 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64MV-NEXT:    slli a2, a2, 22
 ; RV64MV-NEXT:    or a1, a1, a2
 ; RV64MV-NEXT:    sw a1, 0(a0)
-; RV64MV-NEXT:    addi a2, zero, -1
+; RV64MV-NEXT:    li a2, -1
 ; RV64MV-NEXT:    srli a2, a2, 31
 ; RV64MV-NEXT:    and a1, a1, a2
 ; RV64MV-NEXT:    srli a1, a1, 32

diff  --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index 17b68b99fc0b..3feeb5ce324c 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -25,19 +25,19 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    lhu s0, 4(a1)
 ; RV32I-NEXT:    lhu a2, 0(a1)
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s4, a0
-; RV32I-NEXT:    addi a1, zero, 124
+; RV32I-NEXT:    li a1, 124
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s5, a0
-; RV32I-NEXT:    addi a1, zero, 98
+; RV32I-NEXT:    li a1, 98
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a1, zero, 1003
+; RV32I-NEXT:    li a1, 1003
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    sh a0, 6(s1)
@@ -67,7 +67,7 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a2, a2, 1
 ; RV32IM-NEXT:    add a2, a2, a5
 ; RV32IM-NEXT:    srli a2, a2, 6
-; RV32IM-NEXT:    addi a5, zero, 95
+; RV32IM-NEXT:    li a5, 95
 ; RV32IM-NEXT:    mul a2, a2, a5
 ; RV32IM-NEXT:    sub a2, a4, a2
 ; RV32IM-NEXT:    srli a4, a1, 2
@@ -75,21 +75,21 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    addi a5, a5, 529
 ; RV32IM-NEXT:    mulhu a4, a4, a5
 ; RV32IM-NEXT:    srli a4, a4, 2
-; RV32IM-NEXT:    addi a5, zero, 124
+; RV32IM-NEXT:    li a5, 124
 ; RV32IM-NEXT:    mul a4, a4, a5
 ; RV32IM-NEXT:    sub a1, a1, a4
 ; RV32IM-NEXT:    lui a4, 342392
 ; RV32IM-NEXT:    addi a4, a4, 669
 ; RV32IM-NEXT:    mulhu a4, a3, a4
 ; RV32IM-NEXT:    srli a4, a4, 5
-; RV32IM-NEXT:    addi a5, zero, 98
+; RV32IM-NEXT:    li a5, 98
 ; RV32IM-NEXT:    mul a4, a4, a5
 ; RV32IM-NEXT:    sub a3, a3, a4
 ; RV32IM-NEXT:    lui a4, 267633
 ; RV32IM-NEXT:    addi a4, a4, -1809
 ; RV32IM-NEXT:    mulhu a4, a6, a4
 ; RV32IM-NEXT:    srli a4, a4, 8
-; RV32IM-NEXT:    addi a5, zero, 1003
+; RV32IM-NEXT:    li a5, 1003
 ; RV32IM-NEXT:    mul a4, a4, a5
 ; RV32IM-NEXT:    sub a4, a6, a4
 ; RV32IM-NEXT:    sh a4, 6(a0)
@@ -113,19 +113,19 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    lhu s0, 8(a1)
 ; RV64I-NEXT:    lhu a2, 0(a1)
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s4, a0
-; RV64I-NEXT:    addi a1, zero, 124
+; RV64I-NEXT:    li a1, 124
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s5, a0
-; RV64I-NEXT:    addi a1, zero, 98
+; RV64I-NEXT:    li a1, 98
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addi a1, zero, 1003
+; RV64I-NEXT:    li a1, 1003
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    sh a0, 6(s1)
@@ -161,7 +161,7 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a2, a2, 1
 ; RV64IM-NEXT:    add a2, a2, a5
 ; RV64IM-NEXT:    srli a2, a2, 6
-; RV64IM-NEXT:    addi a5, zero, 95
+; RV64IM-NEXT:    li a5, 95
 ; RV64IM-NEXT:    mulw a2, a2, a5
 ; RV64IM-NEXT:    subw a1, a1, a2
 ; RV64IM-NEXT:    srli a2, a4, 2
@@ -175,7 +175,7 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    addi a5, a5, 133
 ; RV64IM-NEXT:    mulhu a2, a2, a5
 ; RV64IM-NEXT:    srli a2, a2, 3
-; RV64IM-NEXT:    addi a5, zero, 124
+; RV64IM-NEXT:    li a5, 124
 ; RV64IM-NEXT:    mulw a2, a2, a5
 ; RV64IM-NEXT:    subw a2, a4, a2
 ; RV64IM-NEXT:    srli a4, a3, 1
@@ -189,7 +189,7 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    addi a5, a5, 1505
 ; RV64IM-NEXT:    mulhu a4, a4, a5
 ; RV64IM-NEXT:    srli a4, a4, 4
-; RV64IM-NEXT:    addi a5, zero, 98
+; RV64IM-NEXT:    li a5, 98
 ; RV64IM-NEXT:    mulw a4, a4, a5
 ; RV64IM-NEXT:    subw a3, a3, a4
 ; RV64IM-NEXT:    lui a4, 8364
@@ -202,7 +202,7 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    addi a4, a4, 1213
 ; RV64IM-NEXT:    mulhu a4, a6, a4
 ; RV64IM-NEXT:    srli a4, a4, 7
-; RV64IM-NEXT:    addi a5, zero, 1003
+; RV64IM-NEXT:    li a5, 1003
 ; RV64IM-NEXT:    mulw a4, a4, a5
 ; RV64IM-NEXT:    subw a4, a6, a4
 ; RV64IM-NEXT:    sh a4, 6(a0)
@@ -230,19 +230,19 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    lhu s0, 4(a1)
 ; RV32I-NEXT:    lhu a2, 0(a1)
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s4, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s5, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    sh a0, 6(s1)
@@ -272,7 +272,7 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a3, a3, 1
 ; RV32IM-NEXT:    add a2, a3, a2
 ; RV32IM-NEXT:    srli a2, a2, 6
-; RV32IM-NEXT:    addi a3, zero, 95
+; RV32IM-NEXT:    li a3, 95
 ; RV32IM-NEXT:    mul a2, a2, a3
 ; RV32IM-NEXT:    sub t0, a4, a2
 ; RV32IM-NEXT:    mulhu a4, a1, a5
@@ -317,19 +317,19 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    lhu s0, 8(a1)
 ; RV64I-NEXT:    lhu a2, 0(a1)
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s4, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s5, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    sh a0, 6(s1)
@@ -365,7 +365,7 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a3, a3, 1
 ; RV64IM-NEXT:    add a2, a3, a2
 ; RV64IM-NEXT:    srli a2, a2, 6
-; RV64IM-NEXT:    addi a3, zero, 95
+; RV64IM-NEXT:    li a3, 95
 ; RV64IM-NEXT:    mulw a2, a2, a3
 ; RV64IM-NEXT:    subw t0, a1, a2
 ; RV64IM-NEXT:    mulhu a2, a4, a5
@@ -420,35 +420,35 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    lhu s4, 8(a1)
 ; RV32I-NEXT:    lhu s1, 12(a1)
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s5, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s6, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s7, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s8, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    mv s9, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    mv s4, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    call __udivsi3 at plt
 ; RV32I-NEXT:    add a0, s8, a0
@@ -486,7 +486,7 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a3, a3, 1
 ; RV32IM-NEXT:    add a2, a3, a2
 ; RV32IM-NEXT:    srli t3, a2, 6
-; RV32IM-NEXT:    addi t0, zero, 95
+; RV32IM-NEXT:    li t0, 95
 ; RV32IM-NEXT:    mul a3, t3, t0
 ; RV32IM-NEXT:    sub t1, a4, a3
 ; RV32IM-NEXT:    mulhu a4, a1, a5
@@ -539,35 +539,35 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    lhu s4, 16(a1)
 ; RV64I-NEXT:    lhu s1, 24(a1)
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s5, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s4
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s6, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s7, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s8, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    mv s9, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s4
 ; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    mv s4, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    mv s1, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __udivdi3 at plt
 ; RV64I-NEXT:    addw a0, s8, a0
@@ -611,7 +611,7 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a3, a3, 1
 ; RV64IM-NEXT:    add a2, a3, a2
 ; RV64IM-NEXT:    srli t3, a2, 6
-; RV64IM-NEXT:    addi t0, zero, 95
+; RV64IM-NEXT:    li t0, 95
 ; RV64IM-NEXT:    mulw a3, t3, t0
 ; RV64IM-NEXT:    subw t1, a1, a3
 ; RV64IM-NEXT:    mulhu a3, a4, a5
@@ -665,7 +665,7 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    lhu s1, 0(a1)
 ; RV32I-NEXT:    lhu a2, 12(a1)
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a1, zero, 95
+; RV32I-NEXT:    li a1, 95
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    andi a1, s1, 63
@@ -696,7 +696,7 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    srli a2, a2, 1
 ; RV32IM-NEXT:    add a2, a2, a5
 ; RV32IM-NEXT:    srli a2, a2, 6
-; RV32IM-NEXT:    addi a5, zero, 95
+; RV32IM-NEXT:    li a5, 95
 ; RV32IM-NEXT:    mul a2, a2, a5
 ; RV32IM-NEXT:    sub a2, a4, a2
 ; RV32IM-NEXT:    andi a1, a1, 63
@@ -721,7 +721,7 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    lhu s1, 0(a1)
 ; RV64I-NEXT:    lhu a2, 24(a1)
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addi a1, zero, 95
+; RV64I-NEXT:    li a1, 95
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    andi a1, s1, 63
@@ -758,7 +758,7 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a2, a2, 1
 ; RV64IM-NEXT:    add a2, a2, a5
 ; RV64IM-NEXT:    srli a2, a2, 6
-; RV64IM-NEXT:    addi a5, zero, 95
+; RV64IM-NEXT:    li a5, 95
 ; RV64IM-NEXT:    mulw a2, a2, a5
 ; RV64IM-NEXT:    subw a1, a1, a2
 ; RV64IM-NEXT:    andi a2, a4, 63
@@ -787,11 +787,11 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV32I-NEXT:    lhu s1, 8(a1)
 ; RV32I-NEXT:    lhu a2, 4(a1)
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a1, zero, 654
+; RV32I-NEXT:    li a1, 654
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s3, a0
-; RV32I-NEXT:    addi a1, zero, 23
+; RV32I-NEXT:    li a1, 23
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    call __umodsi3 at plt
 ; RV32I-NEXT:    mv s1, a0
@@ -821,14 +821,14 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV32IM-NEXT:    addi a5, a5, -1903
 ; RV32IM-NEXT:    mulhu a4, a4, a5
 ; RV32IM-NEXT:    srli a4, a4, 8
-; RV32IM-NEXT:    addi a5, zero, 654
+; RV32IM-NEXT:    li a5, 654
 ; RV32IM-NEXT:    mul a4, a4, a5
 ; RV32IM-NEXT:    sub a2, a2, a4
 ; RV32IM-NEXT:    lui a4, 729444
 ; RV32IM-NEXT:    addi a4, a4, 713
 ; RV32IM-NEXT:    mulhu a4, a1, a4
 ; RV32IM-NEXT:    srli a4, a4, 4
-; RV32IM-NEXT:    addi a5, zero, 23
+; RV32IM-NEXT:    li a5, 23
 ; RV32IM-NEXT:    mul a4, a4, a5
 ; RV32IM-NEXT:    sub a1, a1, a4
 ; RV32IM-NEXT:    lui a4, 395996
@@ -857,11 +857,11 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    lhu s1, 16(a1)
 ; RV64I-NEXT:    lhu a2, 8(a1)
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addi a1, zero, 654
+; RV64I-NEXT:    li a1, 654
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s3, a0
-; RV64I-NEXT:    addi a1, zero, 23
+; RV64I-NEXT:    li a1, 23
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
@@ -899,7 +899,7 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a5, a5, 1
 ; RV64IM-NEXT:    add a4, a5, a4
 ; RV64IM-NEXT:    srli a4, a4, 4
-; RV64IM-NEXT:    addi a5, zero, 23
+; RV64IM-NEXT:    li a5, 23
 ; RV64IM-NEXT:    mulw a4, a4, a5
 ; RV64IM-NEXT:    subw a1, a1, a4
 ; RV64IM-NEXT:    srli a4, a3, 1
@@ -913,7 +913,7 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    addi a5, a5, 965
 ; RV64IM-NEXT:    mulhu a4, a4, a5
 ; RV64IM-NEXT:    srli a4, a4, 7
-; RV64IM-NEXT:    addi a5, zero, 654
+; RV64IM-NEXT:    li a5, 654
 ; RV64IM-NEXT:    mulw a4, a4, a5
 ; RV64IM-NEXT:    subw a3, a3, a4
 ; RV64IM-NEXT:    lui a4, 1044567
@@ -973,23 +973,23 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    lw a3, 0(a1)
 ; RV32I-NEXT:    lw a1, 4(a1)
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    li a2, 1
 ; RV32I-NEXT:    mv a0, a3
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __umoddi3 at plt
 ; RV32I-NEXT:    mv s7, a0
 ; RV32I-NEXT:    mv s8, a1
-; RV32I-NEXT:    addi a2, zero, 654
+; RV32I-NEXT:    li a2, 654
 ; RV32I-NEXT:    mv a0, s6
 ; RV32I-NEXT:    mv a1, s1
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __umoddi3 at plt
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    mv s9, a1
-; RV32I-NEXT:    addi a2, zero, 23
+; RV32I-NEXT:    li a2, 23
 ; RV32I-NEXT:    mv a0, s4
 ; RV32I-NEXT:    mv a1, s5
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __umoddi3 at plt
 ; RV32I-NEXT:    mv s4, a0
 ; RV32I-NEXT:    mv s1, a1
@@ -997,7 +997,7 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32I-NEXT:    addi a2, a0, 1327
 ; RV32I-NEXT:    mv a0, s2
 ; RV32I-NEXT:    mv a1, s3
-; RV32I-NEXT:    mv a3, zero
+; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __umoddi3 at plt
 ; RV32I-NEXT:    sw a1, 28(s0)
 ; RV32I-NEXT:    sw a0, 24(s0)
@@ -1044,23 +1044,23 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    lw a3, 0(a1)
 ; RV32IM-NEXT:    lw a1, 4(a1)
 ; RV32IM-NEXT:    mv s0, a0
-; RV32IM-NEXT:    addi a2, zero, 1
+; RV32IM-NEXT:    li a2, 1
 ; RV32IM-NEXT:    mv a0, a3
-; RV32IM-NEXT:    mv a3, zero
+; RV32IM-NEXT:    li a3, 0
 ; RV32IM-NEXT:    call __umoddi3 at plt
 ; RV32IM-NEXT:    mv s7, a0
 ; RV32IM-NEXT:    mv s8, a1
-; RV32IM-NEXT:    addi a2, zero, 654
+; RV32IM-NEXT:    li a2, 654
 ; RV32IM-NEXT:    mv a0, s6
 ; RV32IM-NEXT:    mv a1, s1
-; RV32IM-NEXT:    mv a3, zero
+; RV32IM-NEXT:    li a3, 0
 ; RV32IM-NEXT:    call __umoddi3 at plt
 ; RV32IM-NEXT:    mv s6, a0
 ; RV32IM-NEXT:    mv s9, a1
-; RV32IM-NEXT:    addi a2, zero, 23
+; RV32IM-NEXT:    li a2, 23
 ; RV32IM-NEXT:    mv a0, s4
 ; RV32IM-NEXT:    mv a1, s5
-; RV32IM-NEXT:    mv a3, zero
+; RV32IM-NEXT:    li a3, 0
 ; RV32IM-NEXT:    call __umoddi3 at plt
 ; RV32IM-NEXT:    mv s4, a0
 ; RV32IM-NEXT:    mv s1, a1
@@ -1068,7 +1068,7 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV32IM-NEXT:    addi a2, a0, 1327
 ; RV32IM-NEXT:    mv a0, s2
 ; RV32IM-NEXT:    mv a1, s3
-; RV32IM-NEXT:    mv a3, zero
+; RV32IM-NEXT:    li a3, 0
 ; RV32IM-NEXT:    call __umoddi3 at plt
 ; RV32IM-NEXT:    sw a1, 28(s0)
 ; RV32IM-NEXT:    sw a0, 24(s0)
@@ -1104,11 +1104,11 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV64I-NEXT:    ld s1, 16(a1)
 ; RV64I-NEXT:    ld a2, 8(a1)
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    addi a1, zero, 654
+; RV64I-NEXT:    li a1, 654
 ; RV64I-NEXT:    mv a0, a2
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s3, a0
-; RV64I-NEXT:    addi a1, zero, 23
+; RV64I-NEXT:    li a1, 23
 ; RV64I-NEXT:    mv a0, s1
 ; RV64I-NEXT:    call __umoddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
@@ -1146,7 +1146,7 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV64IM-NEXT:    srli a5, a5, 1
 ; RV64IM-NEXT:    add a4, a5, a4
 ; RV64IM-NEXT:    srli a4, a4, 4
-; RV64IM-NEXT:    addi a5, zero, 23
+; RV64IM-NEXT:    li a5, 23
 ; RV64IM-NEXT:    mul a4, a4, a5
 ; RV64IM-NEXT:    sub a1, a1, a4
 ; RV64IM-NEXT:    srli a4, a3, 1
@@ -1160,7 +1160,7 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
 ; RV64IM-NEXT:    addi a5, a5, 965
 ; RV64IM-NEXT:    mulhu a4, a4, a5
 ; RV64IM-NEXT:    srli a4, a4, 7
-; RV64IM-NEXT:    addi a5, zero, 654
+; RV64IM-NEXT:    li a5, 654
 ; RV64IM-NEXT:    mul a4, a4, a5
 ; RV64IM-NEXT:    sub a3, a3, a4
 ; RV64IM-NEXT:    lui a4, 1044567

diff  --git a/llvm/test/CodeGen/RISCV/usub_sat.ll b/llvm/test/CodeGen/RISCV/usub_sat.ll
index 353e8eaf838e..b1c2af1cc6ad 100644
--- a/llvm/test/CodeGen/RISCV/usub_sat.ll
+++ b/llvm/test/CodeGen/RISCV/usub_sat.ll
@@ -15,7 +15,7 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    sub a1, a0, a1
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    bltu a2, a1, .LBB0_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
@@ -26,7 +26,7 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    subw a1, a0, a1
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    bltu a2, a1, .LBB0_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
@@ -62,8 +62,8 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV32I-NEXT:  .LBB1_2:
 ; RV32I-NEXT:    sltu a4, a0, a2
 ; RV32I-NEXT:  .LBB1_3:
-; RV32I-NEXT:    mv a0, zero
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a0, 0
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    bnez a4, .LBB1_5
 ; RV32I-NEXT:  # %bb.4:
 ; RV32I-NEXT:    mv a0, a2
@@ -75,7 +75,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    sub a1, a0, a1
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    bltu a2, a1, .LBB1_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
@@ -95,8 +95,8 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV32IZbb-NEXT:  .LBB1_2:
 ; RV32IZbb-NEXT:    sltu a4, a0, a2
 ; RV32IZbb-NEXT:  .LBB1_3:
-; RV32IZbb-NEXT:    mv a0, zero
-; RV32IZbb-NEXT:    mv a1, zero
+; RV32IZbb-NEXT:    li a0, 0
+; RV32IZbb-NEXT:    li a1, 0
 ; RV32IZbb-NEXT:    bnez a4, .LBB1_5
 ; RV32IZbb-NEXT:  # %bb.4:
 ; RV32IZbb-NEXT:    mv a0, a2
@@ -118,7 +118,7 @@ define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    sub a1, a0, a1
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    bltu a2, a1, .LBB2_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
@@ -129,7 +129,7 @@ define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    sub a1, a0, a1
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    bltu a2, a1, .LBB2_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
@@ -156,7 +156,7 @@ define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    sub a1, a0, a1
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    bltu a2, a1, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
@@ -167,7 +167,7 @@ define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    sub a1, a0, a1
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    bltu a2, a1, .LBB3_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
@@ -194,7 +194,7 @@ define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    sub a1, a0, a1
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    bltu a2, a1, .LBB4_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
@@ -205,7 +205,7 @@ define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a2, a0
 ; RV64I-NEXT:    sub a1, a0, a1
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    bltu a2, a1, .LBB4_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
index 7e28cfbe5c37..ef466dcc4837 100644
--- a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
@@ -16,7 +16,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ; RV32I-NEXT:    mv a3, a0
 ; RV32I-NEXT:    mul a0, a1, a2
 ; RV32I-NEXT:    sub a1, a3, a0
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    bltu a3, a1, .LBB0_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
@@ -28,7 +28,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ; RV64I-NEXT:    mulw a1, a1, a2
 ; RV64I-NEXT:    subw a1, a0, a1
 ; RV64I-NEXT:    sext.w a2, a0
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    bltu a2, a1, .LBB0_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
@@ -68,8 +68,8 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV32I-NEXT:  .LBB1_2:
 ; RV32I-NEXT:    sltu a4, a0, a3
 ; RV32I-NEXT:  .LBB1_3:
-; RV32I-NEXT:    mv a0, zero
-; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    li a0, 0
+; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    bnez a4, .LBB1_5
 ; RV32I-NEXT:  # %bb.4:
 ; RV32I-NEXT:    mv a0, a3
@@ -81,7 +81,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    mv a1, a0
 ; RV64I-NEXT:    sub a2, a0, a2
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    bltu a1, a2, .LBB1_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a2
@@ -101,8 +101,8 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV32IZbb-NEXT:  .LBB1_2:
 ; RV32IZbb-NEXT:    sltu a4, a0, a3
 ; RV32IZbb-NEXT:  .LBB1_3:
-; RV32IZbb-NEXT:    mv a0, zero
-; RV32IZbb-NEXT:    mv a1, zero
+; RV32IZbb-NEXT:    li a0, 0
+; RV32IZbb-NEXT:    li a1, 0
 ; RV32IZbb-NEXT:    bnez a4, .LBB1_5
 ; RV32IZbb-NEXT:  # %bb.4:
 ; RV32IZbb-NEXT:    mv a0, a3
@@ -129,7 +129,7 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
 ; RV32I-NEXT:    mul a0, a1, a2
 ; RV32I-NEXT:    and a0, a0, a3
 ; RV32I-NEXT:    sub a1, a4, a0
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    bltu a4, a1, .LBB2_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
@@ -144,7 +144,7 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
 ; RV64I-NEXT:    mul a0, a1, a2
 ; RV64I-NEXT:    and a0, a0, a3
 ; RV64I-NEXT:    sub a1, a4, a0
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    bltu a4, a1, .LBB2_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
@@ -180,7 +180,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV32I-NEXT:    mul a0, a1, a2
 ; RV32I-NEXT:    andi a0, a0, 255
 ; RV32I-NEXT:    sub a1, a3, a0
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    bltu a3, a1, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
@@ -193,7 +193,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64I-NEXT:    mul a0, a1, a2
 ; RV64I-NEXT:    andi a0, a0, 255
 ; RV64I-NEXT:    sub a1, a3, a0
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    bltu a3, a1, .LBB3_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1
@@ -229,7 +229,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV32I-NEXT:    mul a0, a1, a2
 ; RV32I-NEXT:    andi a0, a0, 15
 ; RV32I-NEXT:    sub a1, a3, a0
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    bltu a3, a1, .LBB4_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    mv a0, a1
@@ -242,7 +242,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64I-NEXT:    mul a0, a1, a2
 ; RV64I-NEXT:    andi a0, a0, 15
 ; RV64I-NEXT:    sub a1, a3, a0
-; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    li a0, 0
 ; RV64I-NEXT:    bltu a3, a1, .LBB4_2
 ; RV64I-NEXT:  # %bb.1:
 ; RV64I-NEXT:    mv a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index 219494b0ea6d..8141c7f84194 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -421,8 +421,8 @@ define void @va1_caller() nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, -16
 ; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a3, 261888
-; ILP32-ILP32F-FPELIM-NEXT:    addi a4, zero, 2
-; ILP32-ILP32F-FPELIM-NEXT:    mv a2, zero
+; ILP32-ILP32F-FPELIM-NEXT:    li a4, 2
+; ILP32-ILP32F-FPELIM-NEXT:    li a2, 0
 ; ILP32-ILP32F-FPELIM-NEXT:    call va1 at plt
 ; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 16
@@ -435,8 +435,8 @@ define void @va1_caller() nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a3, 261888
-; ILP32-ILP32F-WITHFP-NEXT:    addi a4, zero, 2
-; ILP32-ILP32F-WITHFP-NEXT:    mv a2, zero
+; ILP32-ILP32F-WITHFP-NEXT:    li a4, 2
+; ILP32-ILP32F-WITHFP-NEXT:    li a2, 0
 ; ILP32-ILP32F-WITHFP-NEXT:    call va1 at plt
 ; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -448,8 +448,8 @@ define void @va1_caller() nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, -16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a3, 261888
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a4, zero, 2
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv a2, zero
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a4, 2
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a2, 0
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va1 at plt
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 16
@@ -459,9 +459,9 @@ define void @va1_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 1023
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 1023
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a0, 52
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a2, zero, 2
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a2, 2
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    call va1 at plt
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 16
@@ -473,9 +473,9 @@ define void @va1_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, zero, 1023
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 1023
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a0, 52
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a2, zero, 2
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a2, 2
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    call va1 at plt
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -572,7 +572,7 @@ define i64 @va2(i8 *%fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    srli a1, a1, 32
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, a1, 8
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a1, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, zero, 1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 1
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a1, 32
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, a1, -8
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    and a0, a0, a1
@@ -601,7 +601,7 @@ define i64 @va2(i8 *%fmt, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    srli a1, a1, 32
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, a1, 8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a1, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, zero, 1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a1, 32
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, a1, -8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    and a0, a0, a1
@@ -747,7 +747,7 @@ define void @va2_caller() nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, -16
 ; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a3, 261888
-; ILP32-ILP32F-FPELIM-NEXT:    mv a2, zero
+; ILP32-ILP32F-FPELIM-NEXT:    li a2, 0
 ; ILP32-ILP32F-FPELIM-NEXT:    call va2 at plt
 ; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 16
@@ -760,7 +760,7 @@ define void @va2_caller() nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a3, 261888
-; ILP32-ILP32F-WITHFP-NEXT:    mv a2, zero
+; ILP32-ILP32F-WITHFP-NEXT:    li a2, 0
 ; ILP32-ILP32F-WITHFP-NEXT:    call va2 at plt
 ; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -772,7 +772,7 @@ define void @va2_caller() nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, -16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a3, 261888
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv a2, zero
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a2, 0
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va2 at plt
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 16
@@ -782,7 +782,7 @@ define void @va2_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 1023
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 1023
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a1, a0, 52
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    call va2 at plt
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -795,7 +795,7 @@ define void @va2_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, zero, 1023
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 1023
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a1, a0, 52
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    call va2 at plt
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
@@ -898,7 +898,7 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    srli a2, a2, 32
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a2, a2, 8
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a2, 8(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a2, zero, 1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a2, 1
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a2, a2, 32
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a2, a2, -8
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    and a0, a0, a2
@@ -927,7 +927,7 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    srli a2, a2, 32
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a2, a2, 8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a2, -24(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a2, zero, 1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a2, 1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a2, a2, 32
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a2, a2, -8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    and a0, a0, a2
@@ -1079,11 +1079,11 @@ define void @va3_caller() nounwind {
 ; ILP32-ILP32F-FPELIM:       # %bb.0:
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, -16
 ; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; ILP32-ILP32F-FPELIM-NEXT:    addi a0, zero, 2
-; ILP32-ILP32F-FPELIM-NEXT:    addi a1, zero, 1111
+; ILP32-ILP32F-FPELIM-NEXT:    li a0, 2
+; ILP32-ILP32F-FPELIM-NEXT:    li a1, 1111
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a5, 262144
-; ILP32-ILP32F-FPELIM-NEXT:    mv a2, zero
-; ILP32-ILP32F-FPELIM-NEXT:    mv a4, zero
+; ILP32-ILP32F-FPELIM-NEXT:    li a2, 0
+; ILP32-ILP32F-FPELIM-NEXT:    li a4, 0
 ; ILP32-ILP32F-FPELIM-NEXT:    call va3 at plt
 ; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, 16
@@ -1095,11 +1095,11 @@ define void @va3_caller() nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 16
-; ILP32-ILP32F-WITHFP-NEXT:    addi a0, zero, 2
-; ILP32-ILP32F-WITHFP-NEXT:    addi a1, zero, 1111
+; ILP32-ILP32F-WITHFP-NEXT:    li a0, 2
+; ILP32-ILP32F-WITHFP-NEXT:    li a1, 1111
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a5, 262144
-; ILP32-ILP32F-WITHFP-NEXT:    mv a2, zero
-; ILP32-ILP32F-WITHFP-NEXT:    mv a4, zero
+; ILP32-ILP32F-WITHFP-NEXT:    li a2, 0
+; ILP32-ILP32F-WITHFP-NEXT:    li a4, 0
 ; ILP32-ILP32F-WITHFP-NEXT:    call va3 at plt
 ; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; ILP32-ILP32F-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1110,11 +1110,11 @@ define void @va3_caller() nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM:       # %bb.0:
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, -16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, zero, 2
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a1, zero, 1111
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a0, 2
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a1, 1111
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a5, 262144
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv a2, zero
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    mv a4, zero
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a2, 0
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a4, 0
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va3 at plt
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi sp, sp, 16
@@ -1124,10 +1124,10 @@ define void @va3_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 1
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a2, a0, 62
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 2
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, zero, 1111
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 2
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 1111
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    call va3 at plt
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, 16
@@ -1139,10 +1139,10 @@ define void @va3_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 16
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, zero, 1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 1
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a2, a0, 62
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, zero, 2
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, zero, 1111
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 2
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 1111
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    call va3 at plt
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1396,11 +1396,11 @@ define void @va5_aligned_stack_caller() nounwind {
 ; ILP32-ILP32F-FPELIM:       # %bb.0:
 ; ILP32-ILP32F-FPELIM-NEXT:    addi sp, sp, -64
 ; ILP32-ILP32F-FPELIM-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
-; ILP32-ILP32F-FPELIM-NEXT:    addi a0, zero, 17
+; ILP32-ILP32F-FPELIM-NEXT:    li a0, 17
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a0, 24(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    addi a0, zero, 16
+; ILP32-ILP32F-FPELIM-NEXT:    li a0, 16
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a0, 20(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    addi a0, zero, 15
+; ILP32-ILP32F-FPELIM-NEXT:    li a0, 15
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a0, 16(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a0, 262236
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, a0, 655
@@ -1408,7 +1408,7 @@ define void @va5_aligned_stack_caller() nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a0, 377487
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, a0, 1475
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a0, 8(sp)
-; ILP32-ILP32F-FPELIM-NEXT:    addi a0, zero, 14
+; ILP32-ILP32F-FPELIM-NEXT:    li a0, 14
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a0, 0(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a0, 262153
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a0, a0, 491
@@ -1423,12 +1423,12 @@ define void @va5_aligned_stack_caller() nounwind {
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a5, a0, 1311
 ; ILP32-ILP32F-FPELIM-NEXT:    lui a0, 688509
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a6, a0, -2048
-; ILP32-ILP32F-FPELIM-NEXT:    addi a0, zero, 1
-; ILP32-ILP32F-FPELIM-NEXT:    addi a1, zero, 11
+; ILP32-ILP32F-FPELIM-NEXT:    li a0, 1
+; ILP32-ILP32F-FPELIM-NEXT:    li a1, 11
 ; ILP32-ILP32F-FPELIM-NEXT:    addi a2, sp, 32
-; ILP32-ILP32F-FPELIM-NEXT:    addi a3, zero, 12
-; ILP32-ILP32F-FPELIM-NEXT:    addi a4, zero, 13
-; ILP32-ILP32F-FPELIM-NEXT:    addi a7, zero, 4
+; ILP32-ILP32F-FPELIM-NEXT:    li a3, 12
+; ILP32-ILP32F-FPELIM-NEXT:    li a4, 13
+; ILP32-ILP32F-FPELIM-NEXT:    li a7, 4
 ; ILP32-ILP32F-FPELIM-NEXT:    sw a5, 32(sp)
 ; ILP32-ILP32F-FPELIM-NEXT:    call va5_aligned_stack_callee at plt
 ; ILP32-ILP32F-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
@@ -1441,11 +1441,11 @@ define void @va5_aligned_stack_caller() nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
 ; ILP32-ILP32F-WITHFP-NEXT:    addi s0, sp, 64
-; ILP32-ILP32F-WITHFP-NEXT:    addi a0, zero, 17
+; ILP32-ILP32F-WITHFP-NEXT:    li a0, 17
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a0, 24(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    addi a0, zero, 16
+; ILP32-ILP32F-WITHFP-NEXT:    li a0, 16
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a0, 20(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    addi a0, zero, 15
+; ILP32-ILP32F-WITHFP-NEXT:    li a0, 15
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a0, 16(sp)
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a0, 262236
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a0, a0, 655
@@ -1453,7 +1453,7 @@ define void @va5_aligned_stack_caller() nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a0, 377487
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a0, a0, 1475
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a0, 8(sp)
-; ILP32-ILP32F-WITHFP-NEXT:    addi a0, zero, 14
+; ILP32-ILP32F-WITHFP-NEXT:    li a0, 14
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a0, 0(sp)
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a0, 262153
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a0, a0, 491
@@ -1468,12 +1468,12 @@ define void @va5_aligned_stack_caller() nounwind {
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a5, a0, 1311
 ; ILP32-ILP32F-WITHFP-NEXT:    lui a0, 688509
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a6, a0, -2048
-; ILP32-ILP32F-WITHFP-NEXT:    addi a0, zero, 1
-; ILP32-ILP32F-WITHFP-NEXT:    addi a1, zero, 11
+; ILP32-ILP32F-WITHFP-NEXT:    li a0, 1
+; ILP32-ILP32F-WITHFP-NEXT:    li a1, 11
 ; ILP32-ILP32F-WITHFP-NEXT:    addi a2, s0, -32
-; ILP32-ILP32F-WITHFP-NEXT:    addi a3, zero, 12
-; ILP32-ILP32F-WITHFP-NEXT:    addi a4, zero, 13
-; ILP32-ILP32F-WITHFP-NEXT:    addi a7, zero, 4
+; ILP32-ILP32F-WITHFP-NEXT:    li a3, 12
+; ILP32-ILP32F-WITHFP-NEXT:    li a4, 13
+; ILP32-ILP32F-WITHFP-NEXT:    li a7, 4
 ; ILP32-ILP32F-WITHFP-NEXT:    sw a5, -32(s0)
 ; ILP32-ILP32F-WITHFP-NEXT:    call va5_aligned_stack_callee at plt
 ; ILP32-ILP32F-WITHFP-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
@@ -1491,13 +1491,13 @@ define void @va5_aligned_stack_caller() nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a0, 377487
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, a0, 1475
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a0, 8(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, zero, 17
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a0, 17
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a0, 24(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, zero, 16
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a0, 16
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a0, 20(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, zero, 15
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a0, 15
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a0, 16(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, zero, 14
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a0, 14
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a0, 0(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a0, 262153
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, a0, 491
@@ -1512,12 +1512,12 @@ define void @va5_aligned_stack_caller() nounwind {
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a5, a0, 1311
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lui a0, 688509
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a6, a0, -2048
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a0, zero, 1
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a1, zero, 11
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a0, 1
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a1, 11
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a2, sp, 32
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a3, zero, 12
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a4, zero, 13
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    addi a7, zero, 4
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a3, 12
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a4, 13
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    li a7, 4
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    sw a5, 32(sp)
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    call va5_aligned_stack_callee at plt
 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
@@ -1528,11 +1528,11 @@ define void @va5_aligned_stack_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM:       # %bb.0:
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi sp, sp, -48
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 17
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 17
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 24(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 16
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 16
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 16(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 15
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 15
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a0, 8(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    lui a0, 2049
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addiw a0, a0, -1147
@@ -1562,11 +1562,11 @@ define void @va5_aligned_stack_caller() nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, a0, 1147
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    slli a0, a0, 14
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a3, a0, -1967
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a0, zero, 1
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a1, zero, 11
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a4, zero, 12
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a5, zero, 13
-; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a7, zero, 14
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a0, 1
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a1, 11
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a4, 12
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a5, 13
+; LP64-LP64F-LP64D-FPELIM-NEXT:    li a7, 14
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd t0, 0(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    call va5_aligned_stack_callee at plt
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
@@ -1579,11 +1579,11 @@ define void @va5_aligned_stack_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi s0, sp, 48
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, zero, 17
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 17
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, 24(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, zero, 16
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 16
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, 16(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, zero, 15
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 15
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a0, 8(sp)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    lui a0, 2049
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addiw a0, a0, -1147
@@ -1613,11 +1613,11 @@ define void @va5_aligned_stack_caller() nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, a0, 1147
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    slli a0, a0, 14
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a3, a0, -1967
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a0, zero, 1
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a1, zero, 11
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a4, zero, 12
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a5, zero, 13
-; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a7, zero, 14
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a0, 1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a1, 11
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a4, 12
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a5, 13
+; LP64-LP64F-LP64D-WITHFP-NEXT:    li a7, 14
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd t0, 0(sp)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    call va5_aligned_stack_callee at plt
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll
index e12f7cd6edd5..929c154f39e8 100644
--- a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll
+++ b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll
@@ -23,13 +23,13 @@ define void @vec3_setcc_crash(<3 x i8>* %in, <3 x i8>* %out) {
 ; RV32-NEXT:    mv a5, a0
 ; RV32-NEXT:    bgtz a3, .LBB0_2
 ; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    mv a5, zero
+; RV32-NEXT:    li a5, 0
 ; RV32-NEXT:  .LBB0_2:
 ; RV32-NEXT:    srai a4, a4, 24
 ; RV32-NEXT:    andi a5, a5, 255
 ; RV32-NEXT:    bgtz a6, .LBB0_4
 ; RV32-NEXT:  # %bb.3:
-; RV32-NEXT:    mv a2, zero
+; RV32-NEXT:    li a2, 0
 ; RV32-NEXT:    j .LBB0_5
 ; RV32-NEXT:  .LBB0_4:
 ; RV32-NEXT:    srli a2, a2, 8
@@ -38,7 +38,7 @@ define void @vec3_setcc_crash(<3 x i8>* %in, <3 x i8>* %out) {
 ; RV32-NEXT:    or a2, a5, a2
 ; RV32-NEXT:    bgtz a4, .LBB0_7
 ; RV32-NEXT:  # %bb.6:
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    j .LBB0_8
 ; RV32-NEXT:  .LBB0_7:
 ; RV32-NEXT:    srli a0, a0, 16
@@ -61,13 +61,13 @@ define void @vec3_setcc_crash(<3 x i8>* %in, <3 x i8>* %out) {
 ; RV64-NEXT:    mv a5, a0
 ; RV64-NEXT:    bgtz a3, .LBB0_2
 ; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    mv a5, zero
+; RV64-NEXT:    li a5, 0
 ; RV64-NEXT:  .LBB0_2:
 ; RV64-NEXT:    srai a4, a4, 56
 ; RV64-NEXT:    andi a5, a5, 255
 ; RV64-NEXT:    bgtz a6, .LBB0_4
 ; RV64-NEXT:  # %bb.3:
-; RV64-NEXT:    mv a2, zero
+; RV64-NEXT:    li a2, 0
 ; RV64-NEXT:    j .LBB0_5
 ; RV64-NEXT:  .LBB0_4:
 ; RV64-NEXT:    srli a2, a2, 8
@@ -76,7 +76,7 @@ define void @vec3_setcc_crash(<3 x i8>* %in, <3 x i8>* %out) {
 ; RV64-NEXT:    or a2, a5, a2
 ; RV64-NEXT:    bgtz a4, .LBB0_7
 ; RV64-NEXT:  # %bb.6:
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    j .LBB0_8
 ; RV64-NEXT:  .LBB0_7:
 ; RV64-NEXT:    srli a0, a0, 16

diff  --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index d691fe05a98d..c3ff237bc473 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -699,7 +699,7 @@ entry:
 define zeroext i1 @usubo.i32.constant.lhs(i32 %v1, i32* %res) {
 ; RV32-LABEL: usubo.i32.constant.lhs:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    addi a2, zero, -2
+; RV32-NEXT:    li a2, -2
 ; RV32-NEXT:    sub a2, a2, a0
 ; RV32-NEXT:    addi a0, a2, 1
 ; RV32-NEXT:    seqz a0, a0
@@ -708,7 +708,7 @@ define zeroext i1 @usubo.i32.constant.lhs(i32 %v1, i32* %res) {
 ;
 ; RV64-LABEL: usubo.i32.constant.lhs:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addi a2, zero, -2
+; RV64-NEXT:    li a2, -2
 ; RV64-NEXT:    subw a2, a2, a0
 ; RV64-NEXT:    addi a0, a2, 1
 ; RV64-NEXT:    seqz a0, a0
@@ -717,7 +717,7 @@ define zeroext i1 @usubo.i32.constant.lhs(i32 %v1, i32* %res) {
 ;
 ; RV32ZBA-LABEL: usubo.i32.constant.lhs:
 ; RV32ZBA:       # %bb.0: # %entry
-; RV32ZBA-NEXT:    addi a2, zero, -2
+; RV32ZBA-NEXT:    li a2, -2
 ; RV32ZBA-NEXT:    sub a2, a2, a0
 ; RV32ZBA-NEXT:    addi a0, a2, 1
 ; RV32ZBA-NEXT:    seqz a0, a0
@@ -726,7 +726,7 @@ define zeroext i1 @usubo.i32.constant.lhs(i32 %v1, i32* %res) {
 ;
 ; RV64ZBA-LABEL: usubo.i32.constant.lhs:
 ; RV64ZBA:       # %bb.0: # %entry
-; RV64ZBA-NEXT:    addi a2, zero, -2
+; RV64ZBA-NEXT:    li a2, -2
 ; RV64ZBA-NEXT:    subw a2, a2, a0
 ; RV64ZBA-NEXT:    addi a0, a2, 1
 ; RV64ZBA-NEXT:    seqz a0, a0
@@ -849,7 +849,7 @@ entry:
 define zeroext i1 @smulo2.i32(i32 %v1, i32* %res) {
 ; RV32-LABEL: smulo2.i32:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    addi a2, zero, 13
+; RV32-NEXT:    li a2, 13
 ; RV32-NEXT:    mulh a3, a0, a2
 ; RV32-NEXT:    mul a2, a0, a2
 ; RV32-NEXT:    srai a0, a2, 31
@@ -861,7 +861,7 @@ define zeroext i1 @smulo2.i32(i32 %v1, i32* %res) {
 ; RV64-LABEL: smulo2.i32:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    sext.w a0, a0
-; RV64-NEXT:    addi a2, zero, 13
+; RV64-NEXT:    li a2, 13
 ; RV64-NEXT:    mul a3, a0, a2
 ; RV64-NEXT:    mulw a0, a0, a2
 ; RV64-NEXT:    xor a0, a0, a3
@@ -871,7 +871,7 @@ define zeroext i1 @smulo2.i32(i32 %v1, i32* %res) {
 ;
 ; RV32ZBA-LABEL: smulo2.i32:
 ; RV32ZBA:       # %bb.0: # %entry
-; RV32ZBA-NEXT:    addi a2, zero, 13
+; RV32ZBA-NEXT:    li a2, 13
 ; RV32ZBA-NEXT:    mulh a3, a0, a2
 ; RV32ZBA-NEXT:    mul a2, a0, a2
 ; RV32ZBA-NEXT:    srai a0, a2, 31
@@ -1063,7 +1063,7 @@ entry:
 define zeroext i1 @smulo2.i64(i64 %v1, i64* %res) {
 ; RV32-LABEL: smulo2.i64:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    addi a7, zero, 13
+; RV32-NEXT:    li a7, 13
 ; RV32-NEXT:    mulhu a4, a0, a7
 ; RV32-NEXT:    mul a5, a1, a7
 ; RV32-NEXT:    add t0, a5, a4
@@ -1089,7 +1089,7 @@ define zeroext i1 @smulo2.i64(i64 %v1, i64* %res) {
 ;
 ; RV64-LABEL: smulo2.i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addi a2, zero, 13
+; RV64-NEXT:    li a2, 13
 ; RV64-NEXT:    mulh a3, a0, a2
 ; RV64-NEXT:    mul a2, a0, a2
 ; RV64-NEXT:    srai a0, a2, 63
@@ -1100,7 +1100,7 @@ define zeroext i1 @smulo2.i64(i64 %v1, i64* %res) {
 ;
 ; RV32ZBA-LABEL: smulo2.i64:
 ; RV32ZBA:       # %bb.0: # %entry
-; RV32ZBA-NEXT:    addi a7, zero, 13
+; RV32ZBA-NEXT:    li a7, 13
 ; RV32ZBA-NEXT:    mulhu a4, a0, a7
 ; RV32ZBA-NEXT:    mul a5, a1, a7
 ; RV32ZBA-NEXT:    add t0, a5, a4
@@ -1126,7 +1126,7 @@ define zeroext i1 @smulo2.i64(i64 %v1, i64* %res) {
 ;
 ; RV64ZBA-LABEL: smulo2.i64:
 ; RV64ZBA:       # %bb.0: # %entry
-; RV64ZBA-NEXT:    addi a2, zero, 13
+; RV64ZBA-NEXT:    li a2, 13
 ; RV64ZBA-NEXT:    mulh a3, a0, a2
 ; RV64ZBA-NEXT:    mul a2, a0, a2
 ; RV64ZBA-NEXT:    srai a0, a2, 63
@@ -1191,7 +1191,7 @@ entry:
 define zeroext i1 @umulo2.i32(i32 %v1, i32* %res) {
 ; RV32-LABEL: umulo2.i32:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    addi a3, zero, 13
+; RV32-NEXT:    li a3, 13
 ; RV32-NEXT:    mulhu a2, a0, a3
 ; RV32-NEXT:    snez a2, a2
 ; RV32-NEXT:    mul a0, a0, a3
@@ -1203,7 +1203,7 @@ define zeroext i1 @umulo2.i32(i32 %v1, i32* %res) {
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    srli a0, a0, 32
-; RV64-NEXT:    addi a2, zero, 13
+; RV64-NEXT:    li a2, 13
 ; RV64-NEXT:    mul a2, a0, a2
 ; RV64-NEXT:    srli a0, a2, 32
 ; RV64-NEXT:    snez a0, a0
@@ -1212,7 +1212,7 @@ define zeroext i1 @umulo2.i32(i32 %v1, i32* %res) {
 ;
 ; RV32ZBA-LABEL: umulo2.i32:
 ; RV32ZBA:       # %bb.0: # %entry
-; RV32ZBA-NEXT:    addi a3, zero, 13
+; RV32ZBA-NEXT:    li a3, 13
 ; RV32ZBA-NEXT:    mulhu a2, a0, a3
 ; RV32ZBA-NEXT:    snez a2, a2
 ; RV32ZBA-NEXT:    mul a0, a0, a3
@@ -1363,7 +1363,7 @@ entry:
 define zeroext i1 @umulo2.i64(i64 %v1, i64* %res) {
 ; RV32-LABEL: umulo2.i64:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    addi a3, zero, 13
+; RV32-NEXT:    li a3, 13
 ; RV32-NEXT:    mul a4, a1, a3
 ; RV32-NEXT:    mulhu a5, a0, a3
 ; RV32-NEXT:    add a4, a5, a4
@@ -1379,7 +1379,7 @@ define zeroext i1 @umulo2.i64(i64 %v1, i64* %res) {
 ;
 ; RV64-LABEL: umulo2.i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addi a3, zero, 13
+; RV64-NEXT:    li a3, 13
 ; RV64-NEXT:    mulhu a2, a0, a3
 ; RV64-NEXT:    snez a2, a2
 ; RV64-NEXT:    mul a0, a0, a3
@@ -1389,7 +1389,7 @@ define zeroext i1 @umulo2.i64(i64 %v1, i64* %res) {
 ;
 ; RV32ZBA-LABEL: umulo2.i64:
 ; RV32ZBA:       # %bb.0: # %entry
-; RV32ZBA-NEXT:    addi a3, zero, 13
+; RV32ZBA-NEXT:    li a3, 13
 ; RV32ZBA-NEXT:    mul a4, a1, a3
 ; RV32ZBA-NEXT:    mulhu a5, a0, a3
 ; RV32ZBA-NEXT:    add a4, a5, a4
@@ -1405,7 +1405,7 @@ define zeroext i1 @umulo2.i64(i64 %v1, i64* %res) {
 ;
 ; RV64ZBA-LABEL: umulo2.i64:
 ; RV64ZBA:       # %bb.0: # %entry
-; RV64ZBA-NEXT:    addi a3, zero, 13
+; RV64ZBA-NEXT:    li a3, 13
 ; RV64ZBA-NEXT:    mulhu a2, a0, a3
 ; RV64ZBA-NEXT:    snez a2, a2
 ; RV64ZBA-NEXT:    mul a0, a0, a3
@@ -1596,7 +1596,7 @@ define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    xor a1, a1, a3
 ; RV32-NEXT:    not a1, a1
 ; RV32-NEXT:    and a0, a1, a0
-; RV32-NEXT:    addi a1, zero, -1
+; RV32-NEXT:    li a1, -1
 ; RV32-NEXT:    slt a0, a1, a0
 ; RV32-NEXT:    ret
 ;
@@ -1619,7 +1619,7 @@ define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    xor a1, a1, a3
 ; RV32ZBA-NEXT:    not a1, a1
 ; RV32ZBA-NEXT:    and a0, a1, a0
-; RV32ZBA-NEXT:    addi a1, zero, -1
+; RV32ZBA-NEXT:    li a1, -1
 ; RV32ZBA-NEXT:    slt a0, a1, a0
 ; RV32ZBA-NEXT:    ret
 ;
@@ -1996,7 +1996,7 @@ define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    xor a0, a1, a0
 ; RV32-NEXT:    xor a1, a1, a3
 ; RV32-NEXT:    and a0, a1, a0
-; RV32-NEXT:    addi a1, zero, -1
+; RV32-NEXT:    li a1, -1
 ; RV32-NEXT:    slt a0, a1, a0
 ; RV32-NEXT:    ret
 ;
@@ -2017,7 +2017,7 @@ define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    xor a0, a1, a0
 ; RV32ZBA-NEXT:    xor a1, a1, a3
 ; RV32ZBA-NEXT:    and a0, a1, a0
-; RV32ZBA-NEXT:    addi a1, zero, -1
+; RV32ZBA-NEXT:    li a1, -1
 ; RV32ZBA-NEXT:    slt a0, a1, a0
 ; RV32ZBA-NEXT:    ret
 ;
@@ -2870,10 +2870,10 @@ define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    slti a1, a1, 0
 ; RV32-NEXT:    beq a1, a0, .LBB50_2
 ; RV32-NEXT:  # %bb.1: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB50_2: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: saddo.br.i32:
@@ -2884,10 +2884,10 @@ define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    addw a0, a0, a1
 ; RV64-NEXT:    beq a0, a2, .LBB50_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB50_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: saddo.br.i32:
@@ -2897,10 +2897,10 @@ define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
 ; RV32ZBA-NEXT:    slti a1, a1, 0
 ; RV32ZBA-NEXT:    beq a1, a0, .LBB50_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret
 ; RV32ZBA-NEXT:  .LBB50_2: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: saddo.br.i32:
@@ -2911,10 +2911,10 @@ define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    addw a0, a0, a1
 ; RV64ZBA-NEXT:    beq a0, a2, .LBB50_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB50_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
@@ -2942,10 +2942,10 @@ define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    and a0, a1, a0
 ; RV32-NEXT:    bgez a0, .LBB51_2
 ; RV32-NEXT:  # %bb.1: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB51_2: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: saddo.br.i64:
@@ -2955,10 +2955,10 @@ define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    slti a1, a1, 0
 ; RV64-NEXT:    beq a1, a0, .LBB51_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB51_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: saddo.br.i64:
@@ -2973,10 +2973,10 @@ define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    and a0, a1, a0
 ; RV32ZBA-NEXT:    bgez a0, .LBB51_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret
 ; RV32ZBA-NEXT:  .LBB51_2: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: saddo.br.i64:
@@ -2986,10 +2986,10 @@ define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA-NEXT:    slti a1, a1, 0
 ; RV64ZBA-NEXT:    beq a1, a0, .LBB51_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB51_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
@@ -3010,10 +3010,10 @@ define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    add a1, a0, a1
 ; RV32-NEXT:    bgeu a1, a0, .LBB52_2
 ; RV32-NEXT:  # %bb.1: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB52_2: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uaddo.br.i32:
@@ -3022,10 +3022,10 @@ define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    sext.w a0, a0
 ; RV64-NEXT:    bgeu a1, a0, .LBB52_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB52_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: uaddo.br.i32:
@@ -3033,10 +3033,10 @@ define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
 ; RV32ZBA-NEXT:    add a1, a0, a1
 ; RV32ZBA-NEXT:    bgeu a1, a0, .LBB52_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret
 ; RV32ZBA-NEXT:  .LBB52_2: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: uaddo.br.i32:
@@ -3045,10 +3045,10 @@ define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    sext.w a0, a0
 ; RV64ZBA-NEXT:    bgeu a1, a0, .LBB52_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB52_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
@@ -3076,10 +3076,10 @@ define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:  .LBB53_2: # %entry
 ; RV32-NEXT:    beqz a0, .LBB53_4
 ; RV32-NEXT:  # %bb.3: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB53_4: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uaddo.br.i64:
@@ -3087,10 +3087,10 @@ define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    add a1, a0, a1
 ; RV64-NEXT:    bgeu a1, a0, .LBB53_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB53_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: uaddo.br.i64:
@@ -3105,10 +3105,10 @@ define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:  .LBB53_2: # %entry
 ; RV32ZBA-NEXT:    beqz a0, .LBB53_4
 ; RV32ZBA-NEXT:  # %bb.3: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret
 ; RV32ZBA-NEXT:  .LBB53_4: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: uaddo.br.i64:
@@ -3116,10 +3116,10 @@ define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA-NEXT:    add a1, a0, a1
 ; RV64ZBA-NEXT:    bgeu a1, a0, .LBB53_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB53_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
@@ -3142,10 +3142,10 @@ define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    slt a0, a1, a0
 ; RV32-NEXT:    beq a2, a0, .LBB54_2
 ; RV32-NEXT:  # %bb.1: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB54_2: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssubo.br.i32:
@@ -3156,10 +3156,10 @@ define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    subw a0, a0, a1
 ; RV64-NEXT:    beq a0, a2, .LBB54_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB54_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: ssubo.br.i32:
@@ -3169,10 +3169,10 @@ define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
 ; RV32ZBA-NEXT:    slt a0, a1, a0
 ; RV32ZBA-NEXT:    beq a2, a0, .LBB54_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret
 ; RV32ZBA-NEXT:  .LBB54_2: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: ssubo.br.i32:
@@ -3183,10 +3183,10 @@ define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    subw a0, a0, a1
 ; RV64ZBA-NEXT:    beq a0, a2, .LBB54_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB54_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
@@ -3212,10 +3212,10 @@ define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    and a0, a1, a0
 ; RV32-NEXT:    bgez a0, .LBB55_2
 ; RV32-NEXT:  # %bb.1: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB55_2: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssubo.br.i64:
@@ -3225,10 +3225,10 @@ define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    slt a0, a1, a0
 ; RV64-NEXT:    beq a2, a0, .LBB55_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB55_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: ssubo.br.i64:
@@ -3241,10 +3241,10 @@ define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    and a0, a1, a0
 ; RV32ZBA-NEXT:    bgez a0, .LBB55_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret
 ; RV32ZBA-NEXT:  .LBB55_2: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: ssubo.br.i64:
@@ -3254,10 +3254,10 @@ define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA-NEXT:    slt a0, a1, a0
 ; RV64ZBA-NEXT:    beq a2, a0, .LBB55_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB55_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
@@ -3278,10 +3278,10 @@ define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    sub a1, a0, a1
 ; RV32-NEXT:    bgeu a0, a1, .LBB56_2
 ; RV32-NEXT:  # %bb.1: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB56_2: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usubo.br.i32:
@@ -3290,10 +3290,10 @@ define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    sext.w a0, a0
 ; RV64-NEXT:    bgeu a0, a1, .LBB56_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB56_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: usubo.br.i32:
@@ -3301,10 +3301,10 @@ define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
 ; RV32ZBA-NEXT:    sub a1, a0, a1
 ; RV32ZBA-NEXT:    bgeu a0, a1, .LBB56_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret
 ; RV32ZBA-NEXT:  .LBB56_2: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: usubo.br.i32:
@@ -3313,10 +3313,10 @@ define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    sext.w a0, a0
 ; RV64ZBA-NEXT:    bgeu a0, a1, .LBB56_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB56_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
@@ -3342,14 +3342,14 @@ define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    sltu a0, a1, a3
 ; RV32-NEXT:    bnez a0, .LBB57_4
 ; RV32-NEXT:  .LBB57_2: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB57_3:
 ; RV32-NEXT:    sub a1, a0, a2
 ; RV32-NEXT:    sltu a0, a0, a1
 ; RV32-NEXT:    beqz a0, .LBB57_2
 ; RV32-NEXT:  .LBB57_4: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: usubo.br.i64:
@@ -3357,10 +3357,10 @@ define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    sub a1, a0, a1
 ; RV64-NEXT:    bgeu a0, a1, .LBB57_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB57_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: usubo.br.i64:
@@ -3373,14 +3373,14 @@ define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    sltu a0, a1, a3
 ; RV32ZBA-NEXT:    bnez a0, .LBB57_4
 ; RV32ZBA-NEXT:  .LBB57_2: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:    ret
 ; RV32ZBA-NEXT:  .LBB57_3:
 ; RV32ZBA-NEXT:    sub a1, a0, a2
 ; RV32ZBA-NEXT:    sltu a0, a0, a1
 ; RV32ZBA-NEXT:    beqz a0, .LBB57_2
 ; RV32ZBA-NEXT:  .LBB57_4: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: usubo.br.i64:
@@ -3388,10 +3388,10 @@ define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA-NEXT:    sub a1, a0, a1
 ; RV64ZBA-NEXT:    bgeu a0, a1, .LBB57_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB57_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
@@ -3414,10 +3414,10 @@ define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    srai a0, a0, 31
 ; RV32-NEXT:    beq a2, a0, .LBB58_2
 ; RV32-NEXT:  # %bb.1: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB58_2: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: smulo.br.i32:
@@ -3428,10 +3428,10 @@ define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    mulw a0, a0, a1
 ; RV64-NEXT:    beq a0, a2, .LBB58_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB58_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: smulo.br.i32:
@@ -3441,10 +3441,10 @@ define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
 ; RV32ZBA-NEXT:    srai a0, a0, 31
 ; RV32ZBA-NEXT:    beq a2, a0, .LBB58_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret
 ; RV32ZBA-NEXT:  .LBB58_2: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: smulo.br.i32:
@@ -3455,10 +3455,10 @@ define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    mulw a0, a0, a1
 ; RV64ZBA-NEXT:    beq a0, a2, .LBB58_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB58_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
@@ -3529,10 +3529,10 @@ define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    or a0, a1, a0
 ; RV32-NEXT:    beqz a0, .LBB59_2
 ; RV32-NEXT:  # %bb.1: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    j .LBB59_3
 ; RV32-NEXT:  .LBB59_2: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:  .LBB59_3: # %overflow
 ; RV32-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
@@ -3547,10 +3547,10 @@ define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    srai a0, a0, 63
 ; RV64-NEXT:    beq a2, a0, .LBB59_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB59_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: smulo.br.i64:
@@ -3608,10 +3608,10 @@ define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    or a0, a1, a0
 ; RV32ZBA-NEXT:    beqz a0, .LBB59_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    j .LBB59_3
 ; RV32ZBA-NEXT:  .LBB59_2: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:  .LBB59_3: # %overflow
 ; RV32ZBA-NEXT:    lw s2, 4(sp) # 4-byte Folded Reload
 ; RV32ZBA-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
@@ -3626,10 +3626,10 @@ define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA-NEXT:    srai a0, a0, 63
 ; RV64ZBA-NEXT:    beq a2, a0, .LBB59_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB59_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
@@ -3647,7 +3647,7 @@ continue:
 define zeroext i1 @smulo2.br.i64(i64 %v1) {
 ; RV32-LABEL: smulo2.br.i64:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    addi a6, zero, -13
+; RV32-NEXT:    li a6, -13
 ; RV32-NEXT:    mulhu a3, a0, a6
 ; RV32-NEXT:    mul a4, a1, a6
 ; RV32-NEXT:    add a3, a4, a3
@@ -3657,7 +3657,7 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
 ; RV32-NEXT:    sub t0, a3, a0
 ; RV32-NEXT:    neg t1, a0
 ; RV32-NEXT:    sltu a2, t0, t1
-; RV32-NEXT:    addi a7, zero, -1
+; RV32-NEXT:    li a7, -1
 ; RV32-NEXT:    mulhu t2, a0, a7
 ; RV32-NEXT:    add a2, t2, a2
 ; RV32-NEXT:    add a2, t3, a2
@@ -3687,29 +3687,29 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
 ; RV32-NEXT:    or a0, a1, a0
 ; RV32-NEXT:    beqz a0, .LBB60_2
 ; RV32-NEXT:  # %bb.1: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB60_2: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: smulo2.br.i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addi a1, zero, -13
+; RV64-NEXT:    li a1, -13
 ; RV64-NEXT:    mulh a2, a0, a1
 ; RV64-NEXT:    mul a0, a0, a1
 ; RV64-NEXT:    srai a0, a0, 63
 ; RV64-NEXT:    beq a2, a0, .LBB60_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB60_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: smulo2.br.i64:
 ; RV32ZBA:       # %bb.0: # %entry
-; RV32ZBA-NEXT:    addi a6, zero, -13
+; RV32ZBA-NEXT:    li a6, -13
 ; RV32ZBA-NEXT:    mulhu a3, a0, a6
 ; RV32ZBA-NEXT:    mul a4, a1, a6
 ; RV32ZBA-NEXT:    add a3, a4, a3
@@ -3719,7 +3719,7 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
 ; RV32ZBA-NEXT:    sub t0, a3, a0
 ; RV32ZBA-NEXT:    neg t1, a0
 ; RV32ZBA-NEXT:    sltu a2, t0, t1
-; RV32ZBA-NEXT:    addi a7, zero, -1
+; RV32ZBA-NEXT:    li a7, -1
 ; RV32ZBA-NEXT:    mulhu t2, a0, a7
 ; RV32ZBA-NEXT:    add a2, t2, a2
 ; RV32ZBA-NEXT:    add a2, t3, a2
@@ -3749,24 +3749,24 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
 ; RV32ZBA-NEXT:    or a0, a1, a0
 ; RV32ZBA-NEXT:    beqz a0, .LBB60_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret
 ; RV32ZBA-NEXT:  .LBB60_2: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: smulo2.br.i64:
 ; RV64ZBA:       # %bb.0: # %entry
-; RV64ZBA-NEXT:    addi a1, zero, -13
+; RV64ZBA-NEXT:    li a1, -13
 ; RV64ZBA-NEXT:    mulh a2, a0, a1
 ; RV64ZBA-NEXT:    mul a0, a0, a1
 ; RV64ZBA-NEXT:    srai a0, a0, 63
 ; RV64ZBA-NEXT:    beq a2, a0, .LBB60_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB60_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 -13)
@@ -3787,10 +3787,10 @@ define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    mulhu a0, a0, a1
 ; RV32-NEXT:    beqz a0, .LBB61_2
 ; RV32-NEXT:  # %bb.1: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB61_2: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: umulo.br.i32:
@@ -3801,10 +3801,10 @@ define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
 ; RV64-NEXT:    srli a0, a0, 32
 ; RV64-NEXT:    beqz a0, .LBB61_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB61_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: umulo.br.i32:
@@ -3812,10 +3812,10 @@ define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
 ; RV32ZBA-NEXT:    mulhu a0, a0, a1
 ; RV32ZBA-NEXT:    beqz a0, .LBB61_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret
 ; RV32ZBA-NEXT:  .LBB61_2: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: umulo.br.i32:
@@ -3826,10 +3826,10 @@ define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
 ; RV64ZBA-NEXT:    srli a0, a0, 32
 ; RV64ZBA-NEXT:    beqz a0, .LBB61_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB61_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
@@ -3865,10 +3865,10 @@ define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    or a0, a0, a6
 ; RV32-NEXT:    beqz a0, .LBB62_2
 ; RV32-NEXT:  # %bb.1: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB62_2: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: umulo.br.i64:
@@ -3876,10 +3876,10 @@ define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    mulhu a0, a0, a1
 ; RV64-NEXT:    beqz a0, .LBB62_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB62_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: umulo.br.i64:
@@ -3902,10 +3902,10 @@ define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    or a0, a0, a6
 ; RV32ZBA-NEXT:    beqz a0, .LBB62_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret
 ; RV32ZBA-NEXT:  .LBB62_2: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: umulo.br.i64:
@@ -3913,10 +3913,10 @@ define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA-NEXT:    mulhu a0, a0, a1
 ; RV64ZBA-NEXT:    beqz a0, .LBB62_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB62_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
@@ -3944,10 +3944,10 @@ define zeroext i1 @umulo2.br.i64(i64 %v1) {
 ; RV32-NEXT:  .LBB63_2: # %entry
 ; RV32-NEXT:    beqz a0, .LBB63_4
 ; RV32-NEXT:  # %bb.3: # %overflow
-; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    li a0, 0
 ; RV32-NEXT:    ret
 ; RV32-NEXT:  .LBB63_4: # %continue
-; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: umulo2.br.i64:
@@ -3955,10 +3955,10 @@ define zeroext i1 @umulo2.br.i64(i64 %v1) {
 ; RV64-NEXT:    add a1, a0, a0
 ; RV64-NEXT:    bgeu a1, a0, .LBB63_2
 ; RV64-NEXT:  # %bb.1: # %overflow
-; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    li a0, 0
 ; RV64-NEXT:    ret
 ; RV64-NEXT:  .LBB63_2: # %continue
-; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    li a0, 1
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: umulo2.br.i64:
@@ -3973,10 +3973,10 @@ define zeroext i1 @umulo2.br.i64(i64 %v1) {
 ; RV32ZBA-NEXT:  .LBB63_2: # %entry
 ; RV32ZBA-NEXT:    beqz a0, .LBB63_4
 ; RV32ZBA-NEXT:  # %bb.3: # %overflow
-; RV32ZBA-NEXT:    mv a0, zero
+; RV32ZBA-NEXT:    li a0, 0
 ; RV32ZBA-NEXT:    ret
 ; RV32ZBA-NEXT:  .LBB63_4: # %continue
-; RV32ZBA-NEXT:    addi a0, zero, 1
+; RV32ZBA-NEXT:    li a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: umulo2.br.i64:
@@ -3984,10 +3984,10 @@ define zeroext i1 @umulo2.br.i64(i64 %v1) {
 ; RV64ZBA-NEXT:    add a1, a0, a0
 ; RV64ZBA-NEXT:    bgeu a1, a0, .LBB63_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
-; RV64ZBA-NEXT:    mv a0, zero
+; RV64ZBA-NEXT:    li a0, 0
 ; RV64ZBA-NEXT:    ret
 ; RV64ZBA-NEXT:  .LBB63_2: # %continue
-; RV64ZBA-NEXT:    addi a0, zero, 1
+; RV64ZBA-NEXT:    li a0, 1
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 2)

diff  --git a/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll b/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll
index fd7bd17fb0ed..28e9d1c7bc40 100644
--- a/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll
+++ b/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll
@@ -18,10 +18,10 @@ define dso_local i32 @test_zext_i8() nounwind {
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB0_2
 ; RV32I-NEXT:  # %bb.1: # %if.end
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB0_2: # %if.then
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    ret
 entry:
   %0 = load i8, i8* getelementptr inbounds ([5 x i8], [5 x i8]* @bytes, i32 0, i32 0), align 1
@@ -54,10 +54,10 @@ define dso_local i32 @test_zext_i16() nounwind {
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    bnez a0, .LBB1_2
 ; RV32I-NEXT:  # %bb.1: # %if.end
-; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    li a0, 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB1_2: # %if.then
-; RV32I-NEXT:    addi a0, zero, 1
+; RV32I-NEXT:    li a0, 1
 ; RV32I-NEXT:    ret
 entry:
   %0 = load i16, i16* getelementptr inbounds ([5 x i16], [5 x i16]* @shorts, i32 0, i32 0), align 2

diff  --git a/llvm/test/MC/RISCV/compress-rv32i.s b/llvm/test/MC/RISCV/compress-rv32i.s
index d64c1c9e0d34..7869481bb78d 100644
--- a/llvm/test/MC/RISCV/compress-rv32i.s
+++ b/llvm/test/MC/RISCV/compress-rv32i.s
@@ -57,10 +57,10 @@ nop
 addi ra, ra, -32
 
 # CHECK-BYTES: 85 50
-# CHECK-ALIAS: addi ra, zero, -31
+# CHECK-ALIAS: li ra, -31
 # CHECK-INST: c.li ra, -31
 # CHECK: # encoding: [0x85,0x50]
-addi ra, zero, -31
+li ra, -31
 
 # CHECK-BYTES: 39 71
 # CHECK-ALIAS: addi sp, sp, -64

diff  --git a/llvm/test/MC/RISCV/numeric-reg-names.s b/llvm/test/MC/RISCV/numeric-reg-names.s
index 3ca2bf600606..ba1827ff40d8 100644
--- a/llvm/test/MC/RISCV/numeric-reg-names.s
+++ b/llvm/test/MC/RISCV/numeric-reg-names.s
@@ -4,8 +4,8 @@
 # RUN:     | llvm-objdump -d -M numeric - \
 # RUN:     | FileCheck -check-prefix=CHECK-NUMERIC %s
 
-# CHECK-NUMERIC: addi x10, x0, 1
-# CHECK-NUMERIC-NEXT: addi x10, x0, 1
+# CHECK-NUMERIC: li x10, 1
+# CHECK-NUMERIC-NEXT: li x10, 1
 addi a0, x0, 1
 addi a0, zero, 1
 

diff  --git a/llvm/test/MC/RISCV/rv32i-aliases-valid.s b/llvm/test/MC/RISCV/rv32i-aliases-valid.s
index 566680d754fc..37d63135c138 100644
--- a/llvm/test/MC/RISCV/rv32i-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv32i-aliases-valid.s
@@ -18,20 +18,25 @@
 .Lpcrel_hi0: auipc a0, %pcrel_hi(foo)
 
 # CHECK-INST: addi a0, zero, 0
-# CHECK-ALIAS: mv a0, zero
+# CHECK-ALIAS: li a0, 0
 li x10, 0
-# CHECK-EXPAND: addi a0, zero, 1
+# CHECK-INST: addi a0, zero, 1
+# CHECK-ALIAS: li a0, 1
 li x10, 1
-# CHECK-EXPAND: addi a0, zero, -1
+# CHECK-INST: addi a0, zero, -1
+# CHECK-ALIAS: li a0, -1
 li x10, -1
-# CHECK-EXPAND: addi a0, zero, 2047
+# CHECK-INST: addi a0, zero, 2047
+# CHECK-ALIAS: li a0, 2047
 li x10, 2047
-# CHECK-EXPAND: addi a0, zero, -2047
+# CHECK-INST: addi a0, zero, -2047
+# CHECK-ALIAS: li a0, -2047
 li x10, -2047
 # CHECK-EXPAND: lui a1, 1
 # CHECK-EXPAND: addi a1, a1, -2048
 li x11, 2048
-# CHECK-EXPAND: addi a1, zero, -2048
+# CHECK-INST: addi a1, zero, -2048
+# CHECK-ALIAS: li a1, -2048
 li x11, -2048
 # CHECK-EXPAND: lui a1, 1
 # CHECK-EXPAND: addi a1, a1, -2047
@@ -68,10 +73,12 @@ li x12, -0x80000000
 
 # CHECK-EXPAND: lui a2, 524288
 li x12, 0x80000000
-# CHECK-EXPAND: addi a2, zero, -1
+# CHECK-INST: addi a2, zero, -1
+# CHECK-ALIAS: li a2, -1
 li x12, 0xFFFFFFFF
 
-# CHECK-EXPAND: addi a0, zero, 1110
+# CHECK-INST: addi a0, zero, 1110
+# CHECK-ALIAS: li a0, 1110
 li a0, %lo(0x123456)
 
 # CHECK-OBJ-NOALIAS: addi a0, zero, 0

diff  --git a/llvm/test/MC/RISCV/rv64i-aliases-valid.s b/llvm/test/MC/RISCV/rv64i-aliases-valid.s
index ad7097c13771..76accc09417f 100644
--- a/llvm/test/MC/RISCV/rv64i-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv64i-aliases-valid.s
@@ -21,20 +21,25 @@
 .Lpcrel_hi0: auipc a0, %pcrel_hi(foo)
 
 # CHECK-INST: addi a0, zero, 0
-# CHECK-ALIAS: mv a0, zero
+# CHECK-ALIAS: li a0, 0
 li x10, 0
-# CHECK-EXPAND: addi a0, zero, 1
+# CHECK-INST: addi a0, zero, 1
+# CHECK-ALIAS: li a0, 1
 li x10, 1
-# CHECK-EXPAND: addi a0, zero, -1
+# CHECK-INST: addi a0, zero, -1
+# CHECK-ALIAS: li a0, -1
 li x10, -1
-# CHECK-EXPAND: addi a0, zero, 2047
+# CHECK-INST: addi a0, zero, 2047
+# CHECK-ALIAS: li a0, 2047
 li x10, 2047
-# CHECK-EXPAND: addi a0, zero, -2047
+# CHECK-INST: addi a0, zero, -2047
+# CHECK-ALIAS: li a0, -2047
 li x10, -2047
 # CHECK-EXPAND: lui a1, 1
 # CHECK-EXPAND: addiw a1, a1, -2048
 li x11, 2048
-# CHECK-EXPAND: addi a1, zero, -2048
+# CHECK-INST: addi a1, zero, -2048
+# CHECK-ALIAS: li a1, -2048
 li x11, -2048
 # CHECK-EXPAND: lui a1, 1
 # CHECK-EXPAND: addiw a1, a1, -2047
@@ -69,31 +74,46 @@ li x12, -2147483648
 # CHECK-EXPAND: lui a2, 524288
 li x12, -0x80000000
 
-# CHECK-EXPAND: addi a2, zero, 1
-# CHECK-EXPAND-NEXT: slli a2, a2, 31
+# CHECK-INST: addi a2, zero, 1
+# CHECK-INST-NEXT: slli a2, a2, 31
+# CHECK-ALIAS: li a2, 1
+# CHECK-ALIAS-NEXT: slli a2, a2, 31
 li x12, 0x80000000
-# CHECK-EXPAND: addi a2, zero, -1
-# CHECK-EXPAND-NEXT: srli a2, a2, 32
+# CHECK-INST: addi a2, zero, -1
+# CHECK-INST-NEXT: srli a2, a2, 32
+# CHECK-ALIAS: li a2, -1
+# CHECK-ALIAS-NEXT: srli a2, a2, 32
 li x12, 0xFFFFFFFF
 
-# CHECK-EXPAND: addi t0, zero, 1
-# CHECK-EXPAND-NEXT: slli t0, t0, 32
+# CHECK-INST: addi t0, zero, 1
+# CHECK-INST-NEXT: slli t0, t0, 32
+# CHECK-ALIAS: li t0, 1
+# CHECK-ALIAS-NEXT: slli t0, t0, 32
 li t0, 0x100000000
-# CHECK-EXPAND: addi t1, zero, -1
-# CHECK-EXPAND-NEXT: slli t1, t1, 63
+# CHECK-INST: addi t1, zero, -1
+# CHECK-INST-NEXT: slli t1, t1, 63
+# CHECK-ALIAS: li t1, -1
+# CHECK-ALIAS-NEXT: slli t1, t1, 63
 li t1, 0x8000000000000000
-# CHECK-EXPAND: addi t1, zero, -1
-# CHECK-EXPAND-NEXT: slli t1, t1, 63
+# CHECK-INST: addi t1, zero, -1
+# CHECK-INST-NEXT: slli t1, t1, 63
+# CHECK-ALIAS: li t1, -1
+# CHECK-ALIAS-NEXT: slli t1, t1, 63
 li t1, -0x8000000000000000
 # CHECK-EXPAND: lui t2, 9321
 # CHECK-EXPAND-NEXT: addiw t2, t2, -1329
 # CHECK-EXPAND-NEXT: slli t2, t2, 35
 li t2, 0x1234567800000000
-# CHECK-EXPAND: addi t3, zero, 7
-# CHECK-EXPAND-NEXT: slli t3, t3, 36
-# CHECK-EXPAND-NEXT: addi t3, t3, 11
-# CHECK-EXPAND-NEXT: slli t3, t3, 24
-# CHECK-EXPAND-NEXT: addi t3, t3, 15
+# CHECK-INST: addi t3, zero, 7
+# CHECK-INST-NEXT: slli t3, t3, 36
+# CHECK-INST-NEXT: addi t3, t3, 11
+# CHECK-INST-NEXT: slli t3, t3, 24
+# CHECK-INST-NEXT: addi t3, t3, 15
+# CHECK-ALIAS: li t3, 7
+# CHECK-ALIAS-NEXT: slli t3, t3, 36
+# CHECK-ALIAS-NEXT: addi t3, t3, 11
+# CHECK-ALIAS-NEXT: slli t3, t3, 24
+# CHECK-ALIAS-NEXT: addi t3, t3, 15
 li t3, 0x700000000B00000F
 # CHECK-EXPAND: lui t4, 583
 # CHECK-EXPAND-NEXT: addiw t4, t4, -1875
@@ -104,7 +124,8 @@ li t3, 0x700000000B00000F
 # CHECK-EXPAND-NEXT: slli t4, t4, 13
 # CHECK-EXPAND-NEXT: addi t4, t4, -272
 li t4, 0x123456789abcdef0
-# CHECK-EXPAND: addi t5, zero, -1
+# CHECK-INST: addi t5, zero, -1
+# CHECK-ALIAS: li t5, -1
 li t5, 0xFFFFFFFFFFFFFFFF
 # CHECK-EXPAND: lui t6, 262145
 # CHECK-EXPAND-NEXT: slli t6, t6, 1
@@ -126,20 +147,31 @@ li x8, 0xFFFFFFF001
 # CHECK-EXPAND-NEXT: slli s1, s1, 20
 # CHECK-EXPAND-NEXT: addi s1, s1, -3
 li x9, 0x1000FFFFFFFD
-# CHECK-EXPAND: addi a0, zero, -1
-# CHECK-EXPAND-NEXT: slli a0, a0, 36
-# CHECK-EXPAND-NEXT: addi a0, a0, 1
-# CHECK-EXPAND-NEXT: slli a0, a0, 25
-# CHECK-EXPAND-NEXT: addi a0, a0, -1
+# CHECK-INST: addi a0, zero, -1
+# CHECK-INST-NEXT: slli a0, a0, 36
+# CHECK-INST-NEXT: addi a0, a0, 1
+# CHECK-INST-NEXT: slli a0, a0, 25
+# CHECK-INST-NEXT: addi a0, a0, -1
+# CHECK-ALIAS: li a0, -1
+# CHECK-ALIAS-NEXT: slli a0, a0, 36
+# CHECK-ALIAS-NEXT: addi a0, a0, 1
+# CHECK-ALIAS-NEXT: slli a0, a0, 25
+# CHECK-ALIAS-NEXT: addi a0, a0, -1
 li x10, 0xE000000001FFFFFF
-# CHECK-EXPAND: addi a1, zero, -2047
-# CHECK-EXPAND-NEXT: slli a1, a1, 27
-# CHECK-EXPAND-NEXT: addi a1, a1, -1
-# CHECK-EXPAND-NEXT: slli a1, a1, 12
-# CHECK-EXPAND-NEXT: addi a1, a1, 2047
+# CHECK-INST: addi a1, zero, -2047
+# CHECK-INST-NEXT: slli a1, a1, 27
+# CHECK-INST-NEXT: addi a1, a1, -1
+# CHECK-INST-NEXT: slli a1, a1, 12
+# CHECK-INST-NEXT: addi a1, a1, 2047
+# CHECK-ALIAS: li a1, -2047
+# CHECK-ALIAS-NEXT: slli a1, a1, 27
+# CHECK-ALIAS-NEXT: addi a1, a1, -1
+# CHECK-ALIAS-NEXT: slli a1, a1, 12
+# CHECK-ALIAS-NEXT: addi a1, a1, 2047
 li x11, 0xFFFC007FFFFFF7FF
 
-# CHECK-EXPAND: addi a0, zero, 1110
+# CHECK-INST: addi a0, zero, 1110
+# CHECK-ALIAS: li a0, 1110
 li a0, %lo(0x123456)
 
 # CHECK-OBJ-NOALIAS: addi a0, zero, 0

diff  --git a/llvm/test/MC/RISCV/rv64zba-aliases-valid.s b/llvm/test/MC/RISCV/rv64zba-aliases-valid.s
index 865436bfd4b7..2b073e0bf0b0 100644
--- a/llvm/test/MC/RISCV/rv64zba-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv64zba-aliases-valid.s
@@ -21,7 +21,7 @@ zext.w x5, x6
 
 # CHECK-S-OBJ-NOALIAS: addi t1, zero, -2
 # CHECK-S-OBJ-NOALIAS-NEXT: add.uw t1, t1, zero
-# CHECK-S-OBJ: addi t1, zero, -2
+# CHECK-S-OBJ: li t1, -2
 # CHECK-S-OBJ-NEXT: zext.w t1, t1
 li x6, 0xfffffffe
 

diff  --git a/llvm/test/MC/RISCV/rv64zbs-aliases-valid.s b/llvm/test/MC/RISCV/rv64zbs-aliases-valid.s
index c170416e882e..d74f43d43e84 100644
--- a/llvm/test/MC/RISCV/rv64zbs-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rv64zbs-aliases-valid.s
@@ -49,13 +49,13 @@ li x5, -2863311530 # 0xffffffff55555556
 
 # CHECK-S-OBJ-NOALIAS: addi t0, zero, 1365
 # CHECK-S-OBJ-NOALIAS-NEXT: bseti t0, t0, 31
-# CHECK-S-OBJ: addi t0, zero, 1365
+# CHECK-S-OBJ: li t0, 1365
 # CHECK-S-OBJ-NEXT: bseti t0, t0, 31
 li x5, 2147485013
 
 # CHECK-S-OBJ-NOALIAS: addi t0, zero, -1365
 # CHECK-S-OBJ-NOALIAS-NEXT: bclri t0, t0, 31
-# CHECK-S-OBJ: addi t0, zero, -1365
+# CHECK-S-OBJ: li t0, -1365
 # CHECK-S-OBJ-NEXT: bclri t0, t0, 31
 li x5, -2147485013
 

diff  --git a/llvm/test/MC/RISCV/rvi-aliases-valid.s b/llvm/test/MC/RISCV/rvi-aliases-valid.s
index e6bb6fb5a2d4..8b166d48992e 100644
--- a/llvm/test/MC/RISCV/rvi-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rvi-aliases-valid.s
@@ -38,7 +38,7 @@
 nop
 
 # CHECK-S-OBJ-NOALIAS: addi t6, zero, 0
-# CHECK-S-OBJ: mv t6, zero
+# CHECK-S-OBJ: li t6, 0
 mv x31, zero
 # CHECK-S-OBJ-NOALIAS: addi a2, a3, 0
 # CHECK-S-OBJ: mv a2, a3

diff  --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/riscv_function_name.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/riscv_function_name.ll.expected
index d2ec3e0f9fcc..053a75473636 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/riscv_function_name.ll.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/riscv_function_name.ll.expected
@@ -6,7 +6,7 @@
 define hidden i32 @"_Z54bar$ompvariant$bar"() {
 ; CHECK-LABEL: _Z54bar$ompvariant$bar:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi a0, zero, 2
+; CHECK-NEXT:    li a0, 2
 ; CHECK-NEXT:    ret
 entry:
   ret i32 2


        


More information about the llvm-commits mailing list