[llvm] d9ba1a9 - [RISCV] Teach isel to select ADDW/SUBW/MULW/SLLIW when only the lower 32-bits are used.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 18 10:23:21 PDT 2021


Author: Craig Topper
Date: 2021-08-18T10:22:00-07:00
New Revision: d9ba1a9c5cac638a5cf3d23404509f7eeb434b7a

URL: https://github.com/llvm/llvm-project/commit/d9ba1a9c5cac638a5cf3d23404509f7eeb434b7a
DIFF: https://github.com/llvm/llvm-project/commit/d9ba1a9c5cac638a5cf3d23404509f7eeb434b7a.diff

LOG: [RISCV] Teach isel to select ADDW/SUBW/MULW/SLLIW when only the lower 32-bits are used.

We normally select these when the root node is a sext_inreg, but
SimplifyDemandedBits can sometimes bypass the sext_inreg for some
users. This can create situation where sext_inreg+add/sub/mul/shl
is selected to a W instruction, and then the add/sub/mul/shl is
separately selected to a non-W instruction with the same inputs.

This patch tries to detect when it would still be ok to use a W
instruction without the sext_inreg by checking the direct users.
This can allow the W instruction to CSE with one created for a
sext_inreg+add/sub/mul/shl. To minimize complexity and cost of
checking, we make no attempt to determine if the CSE will happen
and just always use a W instruction when we can.

Differential Revision: https://reviews.llvm.org/D107658

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
    llvm/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/lib/Target/RISCV/RISCVInstrInfoM.td
    llvm/test/CodeGen/RISCV/add-before-shl.ll
    llvm/test/CodeGen/RISCV/add-imm.ll
    llvm/test/CodeGen/RISCV/addimm-mulimm.ll
    llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
    llvm/test/CodeGen/RISCV/atomic-rmw.ll
    llvm/test/CodeGen/RISCV/atomic-signext.ll
    llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
    llvm/test/CodeGen/RISCV/div.ll
    llvm/test/CodeGen/RISCV/mul.ll
    llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll
    llvm/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll
    llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll
    llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll
    llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
    llvm/test/CodeGen/RISCV/rv64zbb.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
    llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
    llvm/test/CodeGen/RISCV/shadowcallstack.ll
    llvm/test/CodeGen/RISCV/shift-masked-shamt.ll
    llvm/test/CodeGen/RISCV/srem-lkk.ll
    llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
    llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
    llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
    llvm/test/CodeGen/RISCV/urem-lkk.ll
    llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
    llvm/test/CodeGen/RISCV/usub_sat_plus.ll
    llvm/test/CodeGen/RISCV/vararg.ll
    llvm/test/CodeGen/RISCV/xaluo.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 9866567ac1ee..9343fcddef56 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1496,6 +1496,88 @@ bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
   return false;
 }
 
+// Return true if all users of this SDNode* only consume the lower \p Bits.
+// This can be used to form W instructions for add/sub/mul/shl even when the
+// root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
+// SimplifyDemandedBits has made it so some users see a sext_inreg and some
+// don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
+// the add/sub/mul/shl to become non-W instructions. By checking the users we
+// may be able to use a W instruction and CSE with the other instruction if
+// this has happened. We could try to detect that the CSE opportunity exists
+// before doing this, but that would be more complicated.
+// TODO: Does this need to look through AND/OR/XOR to their users to find more
+// opportunities.
+bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
+  assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
+          Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL) &&
+         "Unexpected opcode");
+
+  for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
+    SDNode *User = *UI;
+    // Users of this node should have already been instruction selected
+    if (!User->isMachineOpcode())
+      return false;
+
+    // TODO: Add more opcodes?
+    switch (User->getMachineOpcode()) {
+    default:
+      return false;
+    case RISCV::ADDW:
+    case RISCV::ADDIW:
+    case RISCV::SUBW:
+    case RISCV::MULW:
+    case RISCV::SLLW:
+    case RISCV::SLLIW:
+    case RISCV::SRAW:
+    case RISCV::SRAIW:
+    case RISCV::SRLW:
+    case RISCV::SRLIW:
+    case RISCV::DIVW:
+    case RISCV::DIVUW:
+    case RISCV::REMW:
+    case RISCV::REMUW:
+    case RISCV::ROLW:
+    case RISCV::RORW:
+    case RISCV::RORIW:
+    case RISCV::CLZW:
+    case RISCV::CTZW:
+    case RISCV::CPOPW:
+    case RISCV::SLLIUW:
+      if (Bits < 32)
+        return false;
+      break;
+    case RISCV::SLLI:
+      // SLLI only uses the lower (XLen - ShAmt) bits.
+      if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
+        return false;
+      break;
+    case RISCV::ADDUW:
+    case RISCV::SH1ADDUW:
+    case RISCV::SH2ADDUW:
+    case RISCV::SH3ADDUW:
+      // The first operand to add.uw/shXadd.uw is implicitly zero extended from
+      // 32 bits.
+      if (UI.getOperandNo() != 0 || Bits < 32)
+        return false;
+      break;
+    case RISCV::SB:
+      if (UI.getOperandNo() != 0 || Bits < 8)
+        return false;
+      break;
+    case RISCV::SH:
+      if (UI.getOperandNo() != 0 || Bits < 16)
+        return false;
+      break;
+    case RISCV::SW:
+      if (UI.getOperandNo() != 0 || Bits < 32)
+        return false;
+      break;
+    }
+  }
+
+  return true;
+}
+
 // Select VL as a 5 bit immediate or a value that will become a register. This
 // allows us to choose betwen VSETIVLI or VSETVLI later.
 bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 296762e77b65..a4113a79512f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -58,6 +58,9 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
   bool selectSExti32(SDValue N, SDValue &Val);
   bool selectZExti32(SDValue N, SDValue &Val);
 
+  bool hasAllNBitUsers(SDNode *Node, unsigned Bits) const;
+  bool hasAllWUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 32); }
+
   bool selectVLOp(SDValue N, SDValue &VL);
 
   bool selectVSplat(SDValue N, SDValue &SplatVal);

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 331415f9b8d7..b7c05cbe19c2 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1254,6 +1254,14 @@ def : Pat<(i64 (shl (and GPR:$rs1, 0xffffffff), uimm5:$shamt)),
           (SRLI (SLLI GPR:$rs1, 32), (ImmSubFrom32 uimm5:$shamt))>;
 }
 
+// PatFrag to allow ADDW/SUBW/MULW/SLLW to be selected from i64 add/sub/mul/shl
+// if only the lower 32 bits of their result is used.
+class overflowingbinopw<SDPatternOperator operator>
+    : PatFrag<(ops node:$lhs, node:$rhs),
+              (operator node:$lhs, node:$rhs), [{
+  return hasAllWUsers(Node);
+}]>;
+
 let Predicates = [IsRV64] in {
 
 /// sext and zext
@@ -1283,6 +1291,13 @@ def : PatGprGpr<shiftopw<riscv_sllw>, SLLW>;
 def : PatGprGpr<shiftopw<riscv_srlw>, SRLW>;
 def : PatGprGpr<shiftopw<riscv_sraw>, SRAW>;
 
+// Select W instructions without sext_inreg if only the lower 32 bits of the
+// result are used.
+def : PatGprGpr<overflowingbinopw<add>, ADDW>;
+def : PatGprSimm12<overflowingbinopw<add>, ADDIW>;
+def : PatGprGpr<overflowingbinopw<sub>, SUBW>;
+def : PatGprImm<overflowingbinopw<shl>, SLLIW, uimm5>;
+
 /// Loads
 
 defm : LdPat<sextloadi32, LW, i64>;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td
index f654ed1949a4..b38998f127e0 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td
@@ -75,6 +75,10 @@ let Predicates = [HasStdExtM, IsRV64] in {
 def : Pat<(sext_inreg (mul GPR:$rs1, GPR:$rs2), i32),
           (MULW GPR:$rs1, GPR:$rs2)>;
 
+// Select W instructions without sext_inreg if only the lower 32-bits of the
+// result are used.
+def : PatGprGpr<overflowingbinopw<mul>, MULW>;
+
 def : PatGprGpr<riscv_divw, DIVW>;
 def : PatGprGpr<riscv_divuw, DIVUW>;
 def : PatGprGpr<riscv_remuw, REMUW>;

diff  --git a/llvm/test/CodeGen/RISCV/add-before-shl.ll b/llvm/test/CodeGen/RISCV/add-before-shl.ll
index a80b634c468b..212c74b79a34 100644
--- a/llvm/test/CodeGen/RISCV/add-before-shl.ll
+++ b/llvm/test/CodeGen/RISCV/add-before-shl.ll
@@ -21,7 +21,7 @@ define signext i32 @add_small_const(i32 signext %a) nounwind {
 ;
 ; RV64I-LABEL: add_small_const:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 1
+; RV64I-NEXT:    addiw a0, a0, 1
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
 ; RV64I-NEXT:    jalr zero, 0(ra)
@@ -35,7 +35,7 @@ define signext i32 @add_small_const(i32 signext %a) nounwind {
 ;
 ; RV64C-LABEL: add_small_const:
 ; RV64C:       # %bb.0:
-; RV64C-NEXT:    c.addi a0, 1
+; RV64C-NEXT:    c.addiw a0, 1
 ; RV64C-NEXT:    c.slli a0, 56
 ; RV64C-NEXT:    c.srai a0, 56
 ; RV64C-NEXT:    c.jr ra
@@ -75,7 +75,7 @@ define signext i32 @add_large_const(i32 signext %a) nounwind {
 ; RV64C:       # %bb.0:
 ; RV64C-NEXT:    c.lui a1, 1
 ; RV64C-NEXT:    c.addiw a1, -1
-; RV64C-NEXT:    c.add a0, a1
+; RV64C-NEXT:    c.addw a0, a1
 ; RV64C-NEXT:    c.slli a0, 48
 ; RV64C-NEXT:    c.srai a0, 48
 ; RV64C-NEXT:    c.jr ra
@@ -115,7 +115,7 @@ define signext i32 @add_huge_const(i32 signext %a) nounwind {
 ; RV64C:       # %bb.0:
 ; RV64C-NEXT:    c.lui a1, 8
 ; RV64C-NEXT:    c.addiw a1, -1
-; RV64C-NEXT:    c.add a0, a1
+; RV64C-NEXT:    c.addw a0, a1
 ; RV64C-NEXT:    c.slli a0, 48
 ; RV64C-NEXT:    c.srai a0, 48
 ; RV64C-NEXT:    c.jr ra
@@ -135,7 +135,7 @@ define signext i24 @add_non_machine_type(i24 signext %a) nounwind {
 ;
 ; RV64I-LABEL: add_non_machine_type:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 256
+; RV64I-NEXT:    addiw a0, a0, 256
 ; RV64I-NEXT:    slli a0, a0, 52
 ; RV64I-NEXT:    srai a0, a0, 40
 ; RV64I-NEXT:    jalr zero, 0(ra)
@@ -149,7 +149,7 @@ define signext i24 @add_non_machine_type(i24 signext %a) nounwind {
 ;
 ; RV64C-LABEL: add_non_machine_type:
 ; RV64C:       # %bb.0:
-; RV64C-NEXT:    addi a0, a0, 256
+; RV64C-NEXT:    addiw a0, a0, 256
 ; RV64C-NEXT:    c.slli a0, 52
 ; RV64C-NEXT:    c.srai a0, 40
 ; RV64C-NEXT:    c.jr ra

diff  --git a/llvm/test/CodeGen/RISCV/add-imm.ll b/llvm/test/CodeGen/RISCV/add-imm.ll
index 37231aae4454..c6727e79d394 100644
--- a/llvm/test/CodeGen/RISCV/add-imm.ll
+++ b/llvm/test/CodeGen/RISCV/add-imm.ll
@@ -180,10 +180,9 @@ define signext i32 @add32_sext_reject_on_rv64(i32 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lui a1, 1
 ; RV64I-NEXT:    addiw a1, a1, -1096
-; RV64I-NEXT:    add a2, a0, a1
-; RV64I-NEXT:    lui a3, %hi(gv0)
 ; RV64I-NEXT:    addw a0, a0, a1
-; RV64I-NEXT:    sw a2, %lo(gv0)(a3)
+; RV64I-NEXT:    lui a1, %hi(gv0)
+; RV64I-NEXT:    sw a0, %lo(gv0)(a1)
 ; RV64I-NEXT:    ret
   %b = add nsw i32 %a, 3000
   store i32 %b, i32* @gv0, align 4
@@ -234,8 +233,8 @@ define void @add32_reject() nounwind {
 ; RV64I-NEXT:    lw a3, %lo(gb)(a2)
 ; RV64I-NEXT:    lui a4, 1
 ; RV64I-NEXT:    addiw a4, a4, -1096
-; RV64I-NEXT:    add a1, a1, a4
-; RV64I-NEXT:    add a3, a3, a4
+; RV64I-NEXT:    addw a1, a1, a4
+; RV64I-NEXT:    addw a3, a3, a4
 ; RV64I-NEXT:    sw a1, %lo(ga)(a0)
 ; RV64I-NEXT:    sw a3, %lo(gb)(a2)
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll
index ea08b5ad53ed..4b06e9909e29 100644
--- a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll
+++ b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll
@@ -18,7 +18,7 @@ define signext i32 @add_mul_trans_accept_1(i32 %x) {
 ; RV64IM-LABEL: add_mul_trans_accept_1:
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    addi a1, zero, 11
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    addiw a0, a0, 407
 ; RV64IM-NEXT:    ret
   %tmp0 = add i32 %x, 37
@@ -39,7 +39,7 @@ define signext i32 @add_mul_trans_accept_2(i32 %x) {
 ; RV64IM-LABEL: add_mul_trans_accept_2:
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    addi a1, zero, 13
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    lui a1, 28
 ; RV64IM-NEXT:    addiw a1, a1, 1701
 ; RV64IM-NEXT:    addw a0, a0, a1
@@ -62,7 +62,7 @@ define signext i32 @add_mul_trans_reject_1(i32 %x) {
 ; RV64IM-LABEL: add_mul_trans_reject_1:
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    addi a1, zero, 19
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    lui a1, 9
 ; RV64IM-NEXT:    addiw a1, a1, 585
 ; RV64IM-NEXT:    addw a0, a0, a1
@@ -87,7 +87,7 @@ define signext i32 @add_mul_trans_reject_2(i32 %x) {
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    lui a1, 792
 ; RV64IM-NEXT:    addiw a1, a1, -1709
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    lui a1, 1014660
 ; RV64IM-NEXT:    addiw a1, a1, -1891
 ; RV64IM-NEXT:    addw a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
index 888148f987e6..236c422dc60e 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
@@ -61,7 +61,7 @@ define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind
 ; RV64IA-LABEL: cmpxchg_i8_monotonic_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a4, zero, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -137,7 +137,7 @@ define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA-LABEL: cmpxchg_i8_acquire_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a4, zero, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -213,7 +213,7 @@ define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA-LABEL: cmpxchg_i8_acquire_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a4, zero, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -289,7 +289,7 @@ define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA-LABEL: cmpxchg_i8_release_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a4, zero, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -365,7 +365,7 @@ define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA-LABEL: cmpxchg_i8_release_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a4, zero, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -441,7 +441,7 @@ define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA-LABEL: cmpxchg_i8_acq_rel_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a4, zero, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -517,7 +517,7 @@ define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA-LABEL: cmpxchg_i8_acq_rel_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a4, zero, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -593,7 +593,7 @@ define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA-LABEL: cmpxchg_i8_seq_cst_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a4, zero, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -669,7 +669,7 @@ define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA-LABEL: cmpxchg_i8_seq_cst_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a4, zero, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -745,7 +745,7 @@ define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) nounwind {
 ; RV64IA-LABEL: cmpxchg_i8_seq_cst_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a4, zero, 255
 ; RV64IA-NEXT:    sllw a4, a4, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -822,7 +822,7 @@ define void @cmpxchg_i16_monotonic_monotonic(i16* %ptr, i16 %cmp, i16 %val) noun
 ; RV64IA-LABEL: cmpxchg_i16_monotonic_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a4, 16
 ; RV64IA-NEXT:    addiw a4, a4, -1
 ; RV64IA-NEXT:    sllw a5, a4, a0
@@ -900,7 +900,7 @@ define void @cmpxchg_i16_acquire_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV64IA-LABEL: cmpxchg_i16_acquire_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a4, 16
 ; RV64IA-NEXT:    addiw a4, a4, -1
 ; RV64IA-NEXT:    sllw a5, a4, a0
@@ -978,7 +978,7 @@ define void @cmpxchg_i16_acquire_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64IA-LABEL: cmpxchg_i16_acquire_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a4, 16
 ; RV64IA-NEXT:    addiw a4, a4, -1
 ; RV64IA-NEXT:    sllw a5, a4, a0
@@ -1056,7 +1056,7 @@ define void @cmpxchg_i16_release_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV64IA-LABEL: cmpxchg_i16_release_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a4, 16
 ; RV64IA-NEXT:    addiw a4, a4, -1
 ; RV64IA-NEXT:    sllw a5, a4, a0
@@ -1134,7 +1134,7 @@ define void @cmpxchg_i16_release_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64IA-LABEL: cmpxchg_i16_release_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a4, 16
 ; RV64IA-NEXT:    addiw a4, a4, -1
 ; RV64IA-NEXT:    sllw a5, a4, a0
@@ -1212,7 +1212,7 @@ define void @cmpxchg_i16_acq_rel_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV64IA-LABEL: cmpxchg_i16_acq_rel_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a4, 16
 ; RV64IA-NEXT:    addiw a4, a4, -1
 ; RV64IA-NEXT:    sllw a5, a4, a0
@@ -1290,7 +1290,7 @@ define void @cmpxchg_i16_acq_rel_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64IA-LABEL: cmpxchg_i16_acq_rel_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a4, 16
 ; RV64IA-NEXT:    addiw a4, a4, -1
 ; RV64IA-NEXT:    sllw a5, a4, a0
@@ -1368,7 +1368,7 @@ define void @cmpxchg_i16_seq_cst_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi
 ; RV64IA-LABEL: cmpxchg_i16_seq_cst_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a4, 16
 ; RV64IA-NEXT:    addiw a4, a4, -1
 ; RV64IA-NEXT:    sllw a5, a4, a0
@@ -1446,7 +1446,7 @@ define void @cmpxchg_i16_seq_cst_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64IA-LABEL: cmpxchg_i16_seq_cst_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a4, 16
 ; RV64IA-NEXT:    addiw a4, a4, -1
 ; RV64IA-NEXT:    sllw a5, a4, a0
@@ -1524,7 +1524,7 @@ define void @cmpxchg_i16_seq_cst_seq_cst(i16* %ptr, i16 %cmp, i16 %val) nounwind
 ; RV64IA-LABEL: cmpxchg_i16_seq_cst_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a3, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a4, 16
 ; RV64IA-NEXT:    addiw a4, a4, -1
 ; RV64IA-NEXT:    sllw a5, a4, a0

diff  --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index c623844646a0..eaf650a9739b 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -52,7 +52,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xchg_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -116,7 +116,7 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xchg_i8_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -180,7 +180,7 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xchg_i8_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -244,7 +244,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xchg_i8_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -308,7 +308,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xchg_i8_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -372,7 +372,7 @@ define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_add_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -436,7 +436,7 @@ define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_add_i8_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -500,7 +500,7 @@ define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_add_i8_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -564,7 +564,7 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_add_i8_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -628,7 +628,7 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_add_i8_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -692,7 +692,7 @@ define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_sub_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -756,7 +756,7 @@ define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_sub_i8_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -820,7 +820,7 @@ define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_sub_i8_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -884,7 +884,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_sub_i8_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -948,7 +948,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_sub_i8_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -1006,7 +1006,7 @@ define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_and_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    not a3, a3
@@ -1058,7 +1058,7 @@ define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_and_i8_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    not a3, a3
@@ -1110,7 +1110,7 @@ define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_and_i8_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    not a3, a3
@@ -1162,7 +1162,7 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_and_i8_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    not a3, a3
@@ -1214,7 +1214,7 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_and_i8_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    not a3, a3
@@ -1273,7 +1273,7 @@ define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_nand_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -1339,7 +1339,7 @@ define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_nand_i8_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -1405,7 +1405,7 @@ define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_nand_i8_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -1471,7 +1471,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_nand_i8_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -1537,7 +1537,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_nand_i8_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -1592,7 +1592,7 @@ define i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_or_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoor.w a1, a1, (a2)
@@ -1636,7 +1636,7 @@ define i8 @atomicrmw_or_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_or_i8_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoor.w.aq a1, a1, (a2)
@@ -1680,7 +1680,7 @@ define i8 @atomicrmw_or_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_or_i8_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoor.w.rl a1, a1, (a2)
@@ -1724,7 +1724,7 @@ define i8 @atomicrmw_or_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_or_i8_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoor.w.aqrl a1, a1, (a2)
@@ -1768,7 +1768,7 @@ define i8 @atomicrmw_or_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_or_i8_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoor.w.aqrl a1, a1, (a2)
@@ -1812,7 +1812,7 @@ define i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xor_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoxor.w a1, a1, (a2)
@@ -1856,7 +1856,7 @@ define i8 @atomicrmw_xor_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xor_i8_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoxor.w.aq a1, a1, (a2)
@@ -1900,7 +1900,7 @@ define i8 @atomicrmw_xor_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xor_i8_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoxor.w.rl a1, a1, (a2)
@@ -1944,7 +1944,7 @@ define i8 @atomicrmw_xor_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xor_i8_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoxor.w.aqrl a1, a1, (a2)
@@ -1988,7 +1988,7 @@ define i8 @atomicrmw_xor_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xor_i8_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoxor.w.aqrl a1, a1, (a2)
@@ -3586,7 +3586,7 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umax_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -3718,7 +3718,7 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umax_i8_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -3850,7 +3850,7 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umax_i8_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -3982,7 +3982,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umax_i8_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -4114,7 +4114,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umax_i8_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -4246,7 +4246,7 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umin_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -4378,7 +4378,7 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umin_i8_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -4510,7 +4510,7 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umin_i8_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -4642,7 +4642,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umin_i8_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -4774,7 +4774,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umin_i8_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -4843,7 +4843,7 @@ define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -4909,7 +4909,7 @@ define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xchg_i16_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -4975,7 +4975,7 @@ define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xchg_i16_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5041,7 +5041,7 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xchg_i16_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5107,7 +5107,7 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xchg_i16_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5173,7 +5173,7 @@ define i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_add_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5239,7 +5239,7 @@ define i16 @atomicrmw_add_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_add_i16_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5305,7 +5305,7 @@ define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_add_i16_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5371,7 +5371,7 @@ define i16 @atomicrmw_add_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_add_i16_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5437,7 +5437,7 @@ define i16 @atomicrmw_add_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_add_i16_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5503,7 +5503,7 @@ define i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_sub_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5569,7 +5569,7 @@ define i16 @atomicrmw_sub_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_sub_i16_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5635,7 +5635,7 @@ define i16 @atomicrmw_sub_i16_release(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_sub_i16_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5701,7 +5701,7 @@ define i16 @atomicrmw_sub_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_sub_i16_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5767,7 +5767,7 @@ define i16 @atomicrmw_sub_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_sub_i16_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5827,7 +5827,7 @@ define i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_and_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5881,7 +5881,7 @@ define i16 @atomicrmw_and_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_and_i16_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5935,7 +5935,7 @@ define i16 @atomicrmw_and_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_and_i16_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -5989,7 +5989,7 @@ define i16 @atomicrmw_and_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_and_i16_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -6043,7 +6043,7 @@ define i16 @atomicrmw_and_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_and_i16_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -6104,7 +6104,7 @@ define i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_nand_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -6172,7 +6172,7 @@ define i16 @atomicrmw_nand_i16_acquire(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_nand_i16_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -6240,7 +6240,7 @@ define i16 @atomicrmw_nand_i16_release(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_nand_i16_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -6308,7 +6308,7 @@ define i16 @atomicrmw_nand_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_nand_i16_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -6376,7 +6376,7 @@ define i16 @atomicrmw_nand_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_nand_i16_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -6434,7 +6434,7 @@ define i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_or_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    and a1, a1, a3
@@ -6482,7 +6482,7 @@ define i16 @atomicrmw_or_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_or_i16_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    and a1, a1, a3
@@ -6530,7 +6530,7 @@ define i16 @atomicrmw_or_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_or_i16_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    and a1, a1, a3
@@ -6578,7 +6578,7 @@ define i16 @atomicrmw_or_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_or_i16_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    and a1, a1, a3
@@ -6626,7 +6626,7 @@ define i16 @atomicrmw_or_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_or_i16_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    and a1, a1, a3
@@ -6674,7 +6674,7 @@ define i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xor_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    and a1, a1, a3
@@ -6722,7 +6722,7 @@ define i16 @atomicrmw_xor_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xor_i16_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    and a1, a1, a3
@@ -6770,7 +6770,7 @@ define i16 @atomicrmw_xor_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xor_i16_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    and a1, a1, a3
@@ -6818,7 +6818,7 @@ define i16 @atomicrmw_xor_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xor_i16_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    and a1, a1, a3
@@ -6866,7 +6866,7 @@ define i16 @atomicrmw_xor_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xor_i16_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    and a1, a1, a3
@@ -8495,7 +8495,7 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umax_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -8637,7 +8637,7 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umax_i16_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -8779,7 +8779,7 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umax_i16_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -8921,7 +8921,7 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umax_i16_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -9063,7 +9063,7 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umax_i16_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -9205,7 +9205,7 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umin_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -9347,7 +9347,7 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umin_i16_acquire:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -9489,7 +9489,7 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umin_i16_release:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -9631,7 +9631,7 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umin_i16_acq_rel:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -9773,7 +9773,7 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umin_i16_seq_cst:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0

diff  --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll
index dee847c7cdae..e7ec2bf88866 100644
--- a/llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -170,7 +170,7 @@ define signext i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xchg_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -242,7 +242,7 @@ define signext i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_add_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -314,7 +314,7 @@ define signext i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_sub_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -380,7 +380,7 @@ define signext i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_and_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    not a3, a3
@@ -447,7 +447,7 @@ define signext i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_nand_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -510,7 +510,7 @@ define signext i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_or_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoor.w a1, a1, (a2)
@@ -562,7 +562,7 @@ define signext i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xor_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    andi a1, a1, 255
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoxor.w a1, a1, (a2)
@@ -994,7 +994,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umax_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -1132,7 +1132,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umin_i8_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    addi a3, zero, 255
 ; RV64IA-NEXT:    sllw a3, a3, a0
 ; RV64IA-NEXT:    andi a1, a1, 255
@@ -1209,7 +1209,7 @@ define signext i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -1283,7 +1283,7 @@ define signext i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_add_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -1357,7 +1357,7 @@ define signext i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_sub_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -1425,7 +1425,7 @@ define signext i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_and_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -1494,7 +1494,7 @@ define signext i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_nand_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -1560,7 +1560,7 @@ define signext i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_or_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    and a1, a1, a3
@@ -1616,7 +1616,7 @@ define signext i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_xor_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    and a1, a1, a3
@@ -2063,7 +2063,7 @@ define signext i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umax_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0
@@ -2211,7 +2211,7 @@ define signext i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; RV64IA-LABEL: atomicrmw_umin_i16_monotonic:
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a6, a0, -4
-; RV64IA-NEXT:    slli a0, a0, 3
+; RV64IA-NEXT:    slliw a0, a0, 3
 ; RV64IA-NEXT:    lui a3, 16
 ; RV64IA-NEXT:    addiw a3, a3, -1
 ; RV64IA-NEXT:    sllw a4, a3, a0

diff  --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
index 65a152b44dc7..84dfed0d57e0 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -54,15 +54,15 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i128 %d, i32 %e, i32 %f,
 ; RV64I-NEXT:    lui a0, 16
 ; RV64I-NEXT:    addiw a0, a0, -1
 ; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    add a0, t2, a0
-; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addw a0, t2, a0
+; RV64I-NEXT:    addw a0, a0, a2
 ; RV64I-NEXT:    xor a1, a4, t1
 ; RV64I-NEXT:    xor a2, a3, a7
 ; RV64I-NEXT:    or a1, a2, a1
 ; RV64I-NEXT:    seqz a1, a1
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    add a0, a0, a5
-; RV64I-NEXT:    add a0, a0, a6
+; RV64I-NEXT:    addw a0, a1, a0
+; RV64I-NEXT:    addw a0, a0, a5
+; RV64I-NEXT:    addw a0, a0, a6
 ; RV64I-NEXT:    addw a0, a0, t0
 ; RV64I-NEXT:    ret
   %a_ext = zext i8 %a to i32

diff  --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll
index 991c91cc70ea..e03273abf781 100644
--- a/llvm/test/CodeGen/RISCV/div.ll
+++ b/llvm/test/CodeGen/RISCV/div.ll
@@ -713,7 +713,7 @@ define i32 @sdiv_pow2(i32 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sraiw a1, a0, 31
 ; RV64I-NEXT:    srliw a1, a1, 29
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    sraiw a0, a0, 3
 ; RV64I-NEXT:    ret
 ;
@@ -721,7 +721,7 @@ define i32 @sdiv_pow2(i32 %a) nounwind {
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    sraiw a1, a0, 31
 ; RV64IM-NEXT:    srliw a1, a1, 29
-; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    addw a0, a0, a1
 ; RV64IM-NEXT:    sraiw a0, a0, 3
 ; RV64IM-NEXT:    ret
   %1 = sdiv i32 %a, 8
@@ -749,7 +749,7 @@ define i32 @sdiv_pow2_2(i32 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sraiw a1, a0, 31
 ; RV64I-NEXT:    srliw a1, a1, 16
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    sraiw a0, a0, 16
 ; RV64I-NEXT:    ret
 ;
@@ -757,7 +757,7 @@ define i32 @sdiv_pow2_2(i32 %a) nounwind {
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    sraiw a1, a0, 31
 ; RV64IM-NEXT:    srliw a1, a1, 16
-; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    addw a0, a0, a1
 ; RV64IM-NEXT:    sraiw a0, a0, 16
 ; RV64IM-NEXT:    ret
   %1 = sdiv i32 %a, 65536
@@ -1117,7 +1117,7 @@ define i8 @sdiv8_pow2(i8 %a) nounwind {
 ; RV64I-NEXT:    srai a1, a1, 56
 ; RV64I-NEXT:    srli a1, a1, 12
 ; RV64I-NEXT:    andi a1, a1, 7
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 59
 ; RV64I-NEXT:    ret
@@ -1128,7 +1128,7 @@ define i8 @sdiv8_pow2(i8 %a) nounwind {
 ; RV64IM-NEXT:    srai a1, a1, 56
 ; RV64IM-NEXT:    srli a1, a1, 12
 ; RV64IM-NEXT:    andi a1, a1, 7
-; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    addw a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 56
 ; RV64IM-NEXT:    srai a0, a0, 59
 ; RV64IM-NEXT:    ret
@@ -1309,7 +1309,7 @@ define i16 @sdiv16_pow2(i16 %a) nounwind {
 ; RV64I-NEXT:    srai a1, a1, 48
 ; RV64I-NEXT:    srli a1, a1, 28
 ; RV64I-NEXT:    andi a1, a1, 7
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 51
 ; RV64I-NEXT:    ret
@@ -1320,7 +1320,7 @@ define i16 @sdiv16_pow2(i16 %a) nounwind {
 ; RV64IM-NEXT:    srai a1, a1, 48
 ; RV64IM-NEXT:    srli a1, a1, 28
 ; RV64IM-NEXT:    andi a1, a1, 7
-; RV64IM-NEXT:    add a0, a0, a1
+; RV64IM-NEXT:    addw a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 48
 ; RV64IM-NEXT:    srai a0, a0, 51
 ; RV64IM-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index 2260233a4559..d26eb1065332 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -91,13 +91,13 @@ define signext i32 @mul_constant(i32 %a) nounwind {
 ;
 ; RV64I-LABEL: mul_constant:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    slliw a1, a0, 2
 ; RV64I-NEXT:    addw a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: mul_constant:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    slli a1, a0, 2
+; RV64IM-NEXT:    slliw a1, a0, 2
 ; RV64IM-NEXT:    addw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = mul i32 %a, 5
@@ -485,13 +485,13 @@ define i32 @muli32_p65(i32 %a) nounwind {
 ;
 ; RV64I-LABEL: muli32_p65:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slliw a1, a0, 6
 ; RV64I-NEXT:    addw a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muli32_p65:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    slli a1, a0, 6
+; RV64IM-NEXT:    slliw a1, a0, 6
 ; RV64IM-NEXT:    addw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = mul i32 %a, 65
@@ -513,13 +513,13 @@ define i32 @muli32_p63(i32 %a) nounwind {
 ;
 ; RV64I-LABEL: muli32_p63:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slliw a1, a0, 6
 ; RV64I-NEXT:    subw a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muli32_p63:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    slli a1, a0, 6
+; RV64IM-NEXT:    slliw a1, a0, 6
 ; RV64IM-NEXT:    subw a0, a1, a0
 ; RV64IM-NEXT:    ret
   %1 = mul i32 %a, 63
@@ -620,13 +620,13 @@ define i32 @muli32_m63(i32 %a) nounwind {
 ;
 ; RV64I-LABEL: muli32_m63:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slliw a1, a0, 6
 ; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muli32_m63:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    slli a1, a0, 6
+; RV64IM-NEXT:    slliw a1, a0, 6
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = mul i32 %a, -63
@@ -650,15 +650,15 @@ define i32 @muli32_m65(i32 %a) nounwind {
 ;
 ; RV64I-LABEL: muli32_m65:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 6
-; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    slliw a1, a0, 6
+; RV64I-NEXT:    addw a0, a1, a0
 ; RV64I-NEXT:    negw a0, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muli32_m65:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    slli a1, a0, 6
-; RV64IM-NEXT:    add a0, a1, a0
+; RV64IM-NEXT:    slliw a1, a0, 6
+; RV64IM-NEXT:    addw a0, a1, a0
 ; RV64IM-NEXT:    negw a0, a0
 ; RV64IM-NEXT:    ret
   %1 = mul i32 %a, -65
@@ -841,15 +841,15 @@ define i32 @muli32_p4352(i32 %a) nounwind {
 ;
 ; RV64I-LABEL: muli32_p4352:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 8
-; RV64I-NEXT:    slli a0, a0, 12
+; RV64I-NEXT:    slliw a1, a0, 8
+; RV64I-NEXT:    slliw a0, a0, 12
 ; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muli32_p4352:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    slli a1, a0, 8
-; RV64IM-NEXT:    slli a0, a0, 12
+; RV64IM-NEXT:    slliw a1, a0, 8
+; RV64IM-NEXT:    slliw a0, a0, 12
 ; RV64IM-NEXT:    addw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = mul i32 %a, 4352
@@ -873,15 +873,15 @@ define i32 @muli32_p3840(i32 %a) nounwind {
 ;
 ; RV64I-LABEL: muli32_p3840:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 8
-; RV64I-NEXT:    slli a0, a0, 12
+; RV64I-NEXT:    slliw a1, a0, 8
+; RV64I-NEXT:    slliw a0, a0, 12
 ; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muli32_p3840:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    slli a1, a0, 8
-; RV64IM-NEXT:    slli a0, a0, 12
+; RV64IM-NEXT:    slliw a1, a0, 8
+; RV64IM-NEXT:    slliw a0, a0, 12
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = mul i32 %a, 3840
@@ -905,15 +905,15 @@ define i32 @muli32_m3840(i32 %a) nounwind {
 ;
 ; RV64I-LABEL: muli32_m3840:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a0, 12
-; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    slliw a1, a0, 12
+; RV64I-NEXT:    slliw a0, a0, 8
 ; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muli32_m3840:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    slli a1, a0, 12
-; RV64IM-NEXT:    slli a0, a0, 8
+; RV64IM-NEXT:    slliw a1, a0, 12
+; RV64IM-NEXT:    slliw a0, a0, 8
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = mul i32 %a, -3840

diff  --git a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll
index 671f90b5791a..373325fcdfdc 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll
@@ -10,10 +10,10 @@ define i32 @foo(i32 %x, i32 %y, i32 %z) {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    mulw a0, a0, a0
-; CHECK-NEXT:    addi a0, a0, 1
-; CHECK-NEXT:    mul a0, a0, a0
-; CHECK-NEXT:    add a0, a0, a2
-; CHECK-NEXT:    addi a0, a0, 1
+; CHECK-NEXT:    addiw a0, a0, 1
+; CHECK-NEXT:    mulw a0, a0, a0
+; CHECK-NEXT:    addw a0, a0, a2
+; CHECK-NEXT:    addiw a0, a0, 1
 ; CHECK-NEXT:    sllw a0, a0, a1
 ; CHECK-NEXT:    ret
   %b = mul i32 %x, %x

diff  --git a/llvm/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll b/llvm/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll
index df0520aee11e..54d9aeabfc73 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll
@@ -178,7 +178,7 @@ define signext i32 @sext_addw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind
 define zeroext i32 @zext_addw_aext_aext(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_addw_aext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -189,7 +189,7 @@ define zeroext i32 @zext_addw_aext_aext(i32 %a, i32 %b) nounwind {
 define zeroext i32 @zext_addw_aext_sext(i32 %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_addw_aext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -200,7 +200,7 @@ define zeroext i32 @zext_addw_aext_sext(i32 %a, i32 signext %b) nounwind {
 define zeroext i32 @zext_addw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_addw_aext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -211,7 +211,7 @@ define zeroext i32 @zext_addw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 define zeroext i32 @zext_addw_sext_aext(i32 signext %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_addw_sext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -222,7 +222,7 @@ define zeroext i32 @zext_addw_sext_aext(i32 signext %a, i32 %b) nounwind {
 define zeroext i32 @zext_addw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_addw_sext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -233,7 +233,7 @@ define zeroext i32 @zext_addw_sext_sext(i32 signext %a, i32 signext %b) nounwind
 define zeroext i32 @zext_addw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_addw_sext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -244,7 +244,7 @@ define zeroext i32 @zext_addw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind
 define zeroext i32 @zext_addw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_addw_zext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -255,7 +255,7 @@ define zeroext i32 @zext_addw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 define zeroext i32 @zext_addw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_addw_zext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -266,7 +266,7 @@ define zeroext i32 @zext_addw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind
 define zeroext i32 @zext_addw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_addw_zext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -445,7 +445,7 @@ define signext i32 @sext_subw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind
 define zeroext i32 @zext_subw_aext_aext(i32 %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_subw_aext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -456,7 +456,7 @@ define zeroext i32 @zext_subw_aext_aext(i32 %a, i32 %b) nounwind {
 define zeroext i32 @zext_subw_aext_sext(i32 %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_subw_aext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -467,7 +467,7 @@ define zeroext i32 @zext_subw_aext_sext(i32 %a, i32 signext %b) nounwind {
 define zeroext i32 @zext_subw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_subw_aext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -478,7 +478,7 @@ define zeroext i32 @zext_subw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 define zeroext i32 @zext_subw_sext_aext(i32 signext %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_subw_sext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -489,7 +489,7 @@ define zeroext i32 @zext_subw_sext_aext(i32 signext %a, i32 %b) nounwind {
 define zeroext i32 @zext_subw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_subw_sext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -500,7 +500,7 @@ define zeroext i32 @zext_subw_sext_sext(i32 signext %a, i32 signext %b) nounwind
 define zeroext i32 @zext_subw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_subw_sext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -511,7 +511,7 @@ define zeroext i32 @zext_subw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind
 define zeroext i32 @zext_subw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 ; RV64I-LABEL: zext_subw_zext_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -522,7 +522,7 @@ define zeroext i32 @zext_subw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 define zeroext i32 @zext_subw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: zext_subw_zext_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -533,7 +533,7 @@ define zeroext i32 @zext_subw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind
 define zeroext i32 @zext_subw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64I-LABEL: zext_subw_zext_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1387,7 +1387,7 @@ define signext i32 @sext_addiw_zext(i32 zeroext %a) nounwind {
 define zeroext i32 @zext_addiw_aext(i32 %a) nounwind {
 ; RV64I-LABEL: zext_addiw_aext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 7
+; RV64I-NEXT:    addiw a0, a0, 7
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1398,7 +1398,7 @@ define zeroext i32 @zext_addiw_aext(i32 %a) nounwind {
 define zeroext i32 @zext_addiw_sext(i32 signext %a) nounwind {
 ; RV64I-LABEL: zext_addiw_sext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 8
+; RV64I-NEXT:    addiw a0, a0, 8
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret
@@ -1409,7 +1409,7 @@ define zeroext i32 @zext_addiw_sext(i32 signext %a) nounwind {
 define zeroext i32 @zext_addiw_zext(i32 zeroext %a) nounwind {
 ; RV64I-LABEL: zext_addiw_zext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 9
+; RV64I-NEXT:    addiw a0, a0, 9
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll
index 682f351478ed..40db46ad1cae 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll
@@ -7,16 +7,16 @@ define signext i32 @addw(i32 signext %s, i32 signext %n, i32 signext %k) nounwin
 ; CHECK-NEXT:    bge a0, a1, .LBB0_2
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
 ; CHECK-NEXT:    not a2, a0
-; CHECK-NEXT:    add a2, a2, a1
-; CHECK-NEXT:    addi a3, a0, 1
-; CHECK-NEXT:    mul a3, a2, a3
-; CHECK-NEXT:    sub a1, a1, a0
-; CHECK-NEXT:    addi a1, a1, -2
+; CHECK-NEXT:    addw a2, a2, a1
+; CHECK-NEXT:    addiw a3, a0, 1
+; CHECK-NEXT:    mulw a3, a2, a3
+; CHECK-NEXT:    subw a1, a1, a0
+; CHECK-NEXT:    addiw a1, a1, -2
 ; CHECK-NEXT:    slli a1, a1, 32
 ; CHECK-NEXT:    slli a2, a2, 32
 ; CHECK-NEXT:    mulhu a1, a2, a1
 ; CHECK-NEXT:    srli a1, a1, 1
-; CHECK-NEXT:    add a0, a3, a0
+; CHECK-NEXT:    addw a0, a3, a0
 ; CHECK-NEXT:    addw a0, a0, a1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB0_2:
@@ -53,15 +53,15 @@ define signext i32 @subw(i32 signext %s, i32 signext %n, i32 signext %k) nounwin
 ; CHECK-NEXT:    bge a0, a1, .LBB1_2
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
 ; CHECK-NEXT:    not a2, a0
-; CHECK-NEXT:    add a3, a2, a1
-; CHECK-NEXT:    mul a2, a3, a2
-; CHECK-NEXT:    sub a1, a1, a0
-; CHECK-NEXT:    addi a1, a1, -2
+; CHECK-NEXT:    addw a3, a2, a1
+; CHECK-NEXT:    mulw a2, a3, a2
+; CHECK-NEXT:    subw a1, a1, a0
+; CHECK-NEXT:    addiw a1, a1, -2
 ; CHECK-NEXT:    slli a1, a1, 32
 ; CHECK-NEXT:    slli a3, a3, 32
 ; CHECK-NEXT:    mulhu a1, a3, a1
 ; CHECK-NEXT:    srli a1, a1, 1
-; CHECK-NEXT:    sub a0, a2, a0
+; CHECK-NEXT:    subw a0, a2, a0
 ; CHECK-NEXT:    subw a0, a0, a1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB1_2:

diff  --git a/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll b/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll
index 5732ac9aa410..7da8cc82d32a 100644
--- a/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll
+++ b/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll
@@ -171,7 +171,7 @@ define signext i32 @sext_mulw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind
 define zeroext i32 @zext_mulw_aext_aext(i32 %a, i32 %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_aext_aext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -182,7 +182,7 @@ define zeroext i32 @zext_mulw_aext_aext(i32 %a, i32 %b) nounwind {
 define zeroext i32 @zext_mulw_aext_sext(i32 %a, i32 signext %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_aext_sext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -193,7 +193,7 @@ define zeroext i32 @zext_mulw_aext_sext(i32 %a, i32 signext %b) nounwind {
 define zeroext i32 @zext_mulw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_aext_zext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -204,7 +204,7 @@ define zeroext i32 @zext_mulw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 define zeroext i32 @zext_mulw_sext_aext(i32 signext %a, i32 %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_sext_aext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -215,7 +215,7 @@ define zeroext i32 @zext_mulw_sext_aext(i32 signext %a, i32 %b) nounwind {
 define zeroext i32 @zext_mulw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_sext_sext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -226,7 +226,7 @@ define zeroext i32 @zext_mulw_sext_sext(i32 signext %a, i32 signext %b) nounwind
 define zeroext i32 @zext_mulw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_sext_zext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -237,7 +237,7 @@ define zeroext i32 @zext_mulw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind
 define zeroext i32 @zext_mulw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_zext_aext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -248,7 +248,7 @@ define zeroext i32 @zext_mulw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 define zeroext i32 @zext_mulw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_zext_sext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -259,7 +259,7 @@ define zeroext i32 @zext_mulw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind
 define zeroext i32 @zext_mulw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_zext_zext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    mulw a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
index 41cf5478f0b0..798bec0113b8 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll
@@ -170,7 +170,7 @@ define signext i32 @rol_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: rol_i32:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sllw a2, a0, a1
-; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    negw a1, a1
 ; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
@@ -198,7 +198,7 @@ define void @rol_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
 ; RV64I-LABEL: rol_i32_nosext:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sllw a3, a0, a1
-; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    negw a1, a1
 ; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    or a0, a3, a0
 ; RV64I-NEXT:    sw a0, 0(a2)
@@ -231,7 +231,7 @@ define signext i32 @rol_i32_neg_constant_rhs(i32 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi a1, zero, -2
 ; RV64I-NEXT:    sllw a2, a1, a0
-; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    negw a0, a0
 ; RV64I-NEXT:    srlw a0, a1, a0
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
@@ -292,7 +292,7 @@ define signext i32 @ror_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: ror_i32:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    srlw a2, a0, a1
-; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    negw a1, a1
 ; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret
@@ -320,7 +320,7 @@ define void @ror_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
 ; RV64I-LABEL: ror_i32_nosext:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    srlw a3, a0, a1
-; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    negw a1, a1
 ; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    or a0, a3, a0
 ; RV64I-NEXT:    sw a0, 0(a2)
@@ -353,7 +353,7 @@ define signext i32 @ror_i32_neg_constant_rhs(i32 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi a1, zero, -2
 ; RV64I-NEXT:    srlw a2, a1, a0
-; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    negw a0, a0
 ; RV64I-NEXT:    sllw a0, a1, a0
 ; RV64I-NEXT:    or a0, a2, a0
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 361db386583b..4d819955f12a 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -265,7 +265,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
 ;
 ; RV64B-LABEL: log2_ceil_i32:
 ; RV64B:       # %bb.0:
-; RV64B-NEXT:    addi a0, a0, -1
+; RV64B-NEXT:    addiw a0, a0, -1
 ; RV64B-NEXT:    clzw a0, a0
 ; RV64B-NEXT:    addi a1, zero, 32
 ; RV64B-NEXT:    sub a0, a1, a0
@@ -273,7 +273,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
 ;
 ; RV64ZBB-LABEL: log2_ceil_i32:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, -1
+; RV64ZBB-NEXT:    addiw a0, a0, -1
 ; RV64ZBB-NEXT:    clzw a0, a0
 ; RV64ZBB-NEXT:    addi a1, zero, 32
 ; RV64ZBB-NEXT:    sub a0, a1, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
index b5dbfb821289..82cd16db58fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
@@ -537,7 +537,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    addi a4, a4, 257
 ; LMULMAX2-RV64-NEXT:    mul a5, a5, a4
 ; LMULMAX2-RV64-NEXT:    srli a5, a5, 56
-; LMULMAX2-RV64-NEXT:    addi a5, a5, -56
+; LMULMAX2-RV64-NEXT:    addiw a5, a5, -56
 ; LMULMAX2-RV64-NEXT:    sb a5, 16(sp)
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 15
@@ -568,7 +568,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 31(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 14
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -598,7 +598,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 30(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 13
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -628,7 +628,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 29(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 12
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -658,7 +658,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 28(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 11
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -688,7 +688,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 27(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 10
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -718,7 +718,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 26(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 9
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -748,7 +748,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 25(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 8
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -778,7 +778,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 24(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 7
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -808,7 +808,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 23(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 6
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -838,7 +838,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 22(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 5
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -868,7 +868,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 21(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 4
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -898,7 +898,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 20(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 3
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -928,7 +928,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 19(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 2
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -958,7 +958,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 18(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v25, v25, 1
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v25
@@ -988,7 +988,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 17(sp)
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX2-RV64-NEXT:    addi a1, sp, 16
@@ -1529,7 +1529,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    addi a4, a4, 257
 ; LMULMAX1-RV64-NEXT:    mul a5, a5, a4
 ; LMULMAX1-RV64-NEXT:    srli a5, a5, 56
-; LMULMAX1-RV64-NEXT:    addi a5, a5, -56
+; LMULMAX1-RV64-NEXT:    addiw a5, a5, -56
 ; LMULMAX1-RV64-NEXT:    sb a5, 16(sp)
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 15
@@ -1560,7 +1560,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 31(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 14
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -1590,7 +1590,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 30(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 13
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -1620,7 +1620,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 29(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 12
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -1650,7 +1650,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 28(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 11
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -1680,7 +1680,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 27(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 10
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -1710,7 +1710,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 26(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 9
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -1740,7 +1740,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 25(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 8
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -1770,7 +1770,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 24(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 7
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -1800,7 +1800,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 23(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 6
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -1830,7 +1830,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 22(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 5
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -1860,7 +1860,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 21(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 4
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -1890,7 +1890,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 20(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 3
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -1920,7 +1920,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 19(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 2
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -1950,7 +1950,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 18(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v25, v25, 1
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v25
@@ -1980,7 +1980,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 17(sp)
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    addi a1, sp, 16
@@ -2309,7 +2309,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    addi a5, a5, 257
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 16(sp)
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 7
@@ -2340,7 +2340,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 30(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 6
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -2370,7 +2370,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 28(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 5
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -2400,7 +2400,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 26(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 4
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -2430,7 +2430,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 24(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 3
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -2460,7 +2460,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 22(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 2
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -2490,7 +2490,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 20(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v25, v25, 1
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v25
@@ -2520,7 +2520,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 18(sp)
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX2-RV64-NEXT:    addi a1, sp, 16
@@ -2841,7 +2841,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    addi a5, a5, 257
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX1-RV64-NEXT:    sh a1, 16(sp)
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 7
@@ -2872,7 +2872,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX1-RV64-NEXT:    sh a1, 30(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 6
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -2902,7 +2902,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX1-RV64-NEXT:    sh a1, 28(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 5
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -2932,7 +2932,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX1-RV64-NEXT:    sh a1, 26(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 4
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -2962,7 +2962,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX1-RV64-NEXT:    sh a1, 24(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 3
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -2992,7 +2992,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX1-RV64-NEXT:    sh a1, 22(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 2
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -3022,7 +3022,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX1-RV64-NEXT:    sh a1, 20(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v25, v25, 1
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v25
@@ -3052,7 +3052,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX1-RV64-NEXT:    sh a1, 18(sp)
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    addi a1, sp, 16
@@ -3258,7 +3258,7 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; LMULMAX2-RV64-NEXT:    addi a4, a4, 257
 ; LMULMAX2-RV64-NEXT:    mul a5, a5, a4
 ; LMULMAX2-RV64-NEXT:    srli a5, a5, 56
-; LMULMAX2-RV64-NEXT:    addi a5, a5, -32
+; LMULMAX2-RV64-NEXT:    addiw a5, a5, -32
 ; LMULMAX2-RV64-NEXT:    sw a5, 16(sp)
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 3
@@ -3290,7 +3290,7 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX2-RV64-NEXT:    sw a1, 28(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v25, 2
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -3321,7 +3321,7 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX2-RV64-NEXT:    sw a1, 24(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v25, v25, 1
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v25
@@ -3352,7 +3352,7 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX2-RV64-NEXT:    sw a1, 20(sp)
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX2-RV64-NEXT:    addi a1, sp, 16
@@ -3550,7 +3550,7 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; LMULMAX1-RV64-NEXT:    addi a4, a4, 257
 ; LMULMAX1-RV64-NEXT:    mul a5, a5, a4
 ; LMULMAX1-RV64-NEXT:    srli a5, a5, 56
-; LMULMAX1-RV64-NEXT:    addi a5, a5, -32
+; LMULMAX1-RV64-NEXT:    addiw a5, a5, -32
 ; LMULMAX1-RV64-NEXT:    sw a5, 16(sp)
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 3
@@ -3582,7 +3582,7 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX1-RV64-NEXT:    sw a1, 28(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 2
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -3613,7 +3613,7 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX1-RV64-NEXT:    sw a1, 24(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v25, v25, 1
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v25
@@ -3644,7 +3644,7 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX1-RV64-NEXT:    sw a1, 20(sp)
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    addi a1, sp, 16
@@ -5138,7 +5138,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    addi a5, a5, 257
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 32(sp)
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 31
@@ -5169,7 +5169,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 63(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 30
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5199,7 +5199,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 62(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 29
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5229,7 +5229,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 61(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 28
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5259,7 +5259,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 60(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 27
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5289,7 +5289,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 59(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 26
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5319,7 +5319,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 58(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 25
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5349,7 +5349,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 57(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 24
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5379,7 +5379,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 56(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 23
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5409,7 +5409,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 55(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 22
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5439,7 +5439,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 54(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 21
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5469,7 +5469,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 53(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 20
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5499,7 +5499,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 52(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 19
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5529,7 +5529,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 51(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 18
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5559,7 +5559,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 50(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 17
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5589,7 +5589,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 49(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 16
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5619,7 +5619,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 48(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 15
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5649,7 +5649,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 47(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 14
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5679,7 +5679,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 46(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 13
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5709,7 +5709,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 45(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 12
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5739,7 +5739,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 44(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 11
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5769,7 +5769,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 43(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 10
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5799,7 +5799,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 42(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 9
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5829,7 +5829,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 41(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 8
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5859,7 +5859,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 40(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 7
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5889,7 +5889,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 39(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 6
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5919,7 +5919,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 38(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 5
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5949,7 +5949,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 37(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 4
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -5979,7 +5979,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 36(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 3
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -6009,7 +6009,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 35(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 2
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -6039,7 +6039,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 34(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v26, 1
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -6069,7 +6069,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX2-RV64-NEXT:    sb a1, 33(sp)
 ; LMULMAX2-RV64-NEXT:    vsetvli zero, a6, e8, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    addi a1, sp, 32
@@ -7067,7 +7067,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    addi a5, a5, 257
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 32(sp)
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 15
@@ -7098,7 +7098,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 47(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 14
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -7128,7 +7128,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 46(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 13
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -7158,7 +7158,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 45(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 12
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -7188,7 +7188,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 44(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 11
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -7218,7 +7218,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 43(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 10
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -7248,7 +7248,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 42(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 9
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -7278,7 +7278,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 41(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 8
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -7308,7 +7308,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 40(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 7
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -7338,7 +7338,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 39(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 6
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -7368,7 +7368,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 38(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 5
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -7398,7 +7398,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 37(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 4
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -7428,7 +7428,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 36(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 3
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -7458,7 +7458,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 35(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 2
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -7488,7 +7488,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 34(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v26, 1
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7518,7 +7518,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 33(sp)
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v25
 ; LMULMAX1-RV64-NEXT:    andi a1, a1, 255
@@ -7547,7 +7547,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 16(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 15
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7577,7 +7577,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 31(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 14
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7607,7 +7607,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 30(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 13
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7637,7 +7637,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 29(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 12
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7667,7 +7667,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 28(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 11
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7697,7 +7697,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 27(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 10
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7727,7 +7727,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 26(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 9
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7757,7 +7757,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 25(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 8
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7787,7 +7787,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 24(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 7
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7817,7 +7817,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 23(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 6
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7847,7 +7847,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 22(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 5
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7877,7 +7877,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 21(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 4
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7907,7 +7907,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 20(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 3
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7937,7 +7937,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 19(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 2
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -7967,7 +7967,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 18(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v25, v25, 1
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v25
@@ -7997,7 +7997,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -56
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -56
 ; LMULMAX1-RV64-NEXT:    sb a1, 17(sp)
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    addi a1, sp, 16
@@ -8570,7 +8570,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    addi a5, a5, 257
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 32(sp)
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 1, e16, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 15
@@ -8601,7 +8601,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 62(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 14
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -8631,7 +8631,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 60(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 13
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -8661,7 +8661,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 58(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 12
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -8691,7 +8691,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 56(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 11
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -8721,7 +8721,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 54(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 10
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -8751,7 +8751,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 52(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 9
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -8781,7 +8781,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 50(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 8
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -8811,7 +8811,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 48(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 7
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -8841,7 +8841,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 46(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 6
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -8871,7 +8871,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 44(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 5
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -8901,7 +8901,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 42(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 4
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -8931,7 +8931,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 40(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 3
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -8961,7 +8961,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 38(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 2
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -8991,7 +8991,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 36(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v26, 1
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -9021,7 +9021,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX2-RV64-NEXT:    sh a1, 34(sp)
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    addi a1, sp, 32
@@ -9575,7 +9575,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    addi a1, a1, 257
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 32(sp)
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 7
@@ -9606,7 +9606,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 46(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 6
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v27
@@ -9636,7 +9636,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 44(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 5
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v27
@@ -9666,7 +9666,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 42(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 4
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v27
@@ -9696,7 +9696,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 40(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 3
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v27
@@ -9726,7 +9726,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 38(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 2
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v27
@@ -9756,7 +9756,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 36(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v26, 1
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v26
@@ -9786,7 +9786,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 34(sp)
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v25
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a7
@@ -9815,7 +9815,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 16(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 7
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v26
@@ -9845,7 +9845,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 30(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 6
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v26
@@ -9875,7 +9875,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 28(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 5
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v26
@@ -9905,7 +9905,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 26(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 4
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v26
@@ -9935,7 +9935,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 24(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 3
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v26
@@ -9965,7 +9965,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 22(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 2
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v26
@@ -9995,7 +9995,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a2, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a2, a2, 56
-; LMULMAX1-RV64-NEXT:    addi a2, a2, -48
+; LMULMAX1-RV64-NEXT:    addiw a2, a2, -48
 ; LMULMAX1-RV64-NEXT:    sh a2, 20(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v25, v25, 1
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a2, v25
@@ -10025,7 +10025,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a2, a2, a5
 ; LMULMAX1-RV64-NEXT:    mul a1, a2, a1
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -48
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -48
 ; LMULMAX1-RV64-NEXT:    sh a1, 18(sp)
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    addi a1, sp, 16
@@ -10355,7 +10355,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-RV64-NEXT:    addi a4, a4, 257
 ; LMULMAX2-RV64-NEXT:    mul a5, a5, a4
 ; LMULMAX2-RV64-NEXT:    srli a5, a5, 56
-; LMULMAX2-RV64-NEXT:    addi a5, a5, -32
+; LMULMAX2-RV64-NEXT:    addiw a5, a5, -32
 ; LMULMAX2-RV64-NEXT:    sw a5, 32(sp)
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 7
@@ -10387,7 +10387,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX2-RV64-NEXT:    sw a1, 60(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 6
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -10418,7 +10418,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX2-RV64-NEXT:    sw a1, 56(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 5
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -10449,7 +10449,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX2-RV64-NEXT:    sw a1, 52(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 4
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -10480,7 +10480,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX2-RV64-NEXT:    sw a1, 48(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 3
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -10511,7 +10511,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX2-RV64-NEXT:    sw a1, 44(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v28, v26, 2
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v28
@@ -10542,7 +10542,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX2-RV64-NEXT:    sw a1, 40(sp)
 ; LMULMAX2-RV64-NEXT:    vslidedown.vi v26, v26, 1
 ; LMULMAX2-RV64-NEXT:    vmv.x.s a1, v26
@@ -10573,7 +10573,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX2-RV64-NEXT:    and a1, a1, a3
 ; LMULMAX2-RV64-NEXT:    mul a1, a1, a4
 ; LMULMAX2-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX2-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX2-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX2-RV64-NEXT:    sw a1, 36(sp)
 ; LMULMAX2-RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; LMULMAX2-RV64-NEXT:    addi a1, sp, 32
@@ -10884,7 +10884,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX1-RV64-NEXT:    addi a5, a5, 257
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX1-RV64-NEXT:    sw a1, 32(sp)
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 3
@@ -10916,7 +10916,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX1-RV64-NEXT:    sw a1, 44(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v27, v26, 2
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v27
@@ -10947,7 +10947,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX1-RV64-NEXT:    sw a1, 40(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v26, 1
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -10978,7 +10978,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX1-RV64-NEXT:    sw a1, 36(sp)
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v25
 ; LMULMAX1-RV64-NEXT:    srliw a2, a1, 1
@@ -11008,7 +11008,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX1-RV64-NEXT:    sw a1, 16(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 3
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -11039,7 +11039,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX1-RV64-NEXT:    sw a1, 28(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v26, v25, 2
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v26
@@ -11070,7 +11070,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX1-RV64-NEXT:    sw a1, 24(sp)
 ; LMULMAX1-RV64-NEXT:    vslidedown.vi v25, v25, 1
 ; LMULMAX1-RV64-NEXT:    vmv.x.s a1, v25
@@ -11101,7 +11101,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
 ; LMULMAX1-RV64-NEXT:    and a1, a1, a4
 ; LMULMAX1-RV64-NEXT:    mul a1, a1, a5
 ; LMULMAX1-RV64-NEXT:    srli a1, a1, 56
-; LMULMAX1-RV64-NEXT:    addi a1, a1, -32
+; LMULMAX1-RV64-NEXT:    addiw a1, a1, -32
 ; LMULMAX1-RV64-NEXT:    sw a1, 20(sp)
 ; LMULMAX1-RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
 ; LMULMAX1-RV64-NEXT:    addi a1, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
index e0d7e0deb8a2..eb79a95e63a2 100644
--- a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
@@ -231,7 +231,7 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
-; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    mulw a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 48
 ; RV64I-NEXT:    srai a1, a1, 48
 ; RV64I-NEXT:    add a0, a0, a1
@@ -309,7 +309,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
-; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    mulw a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 56
 ; RV64I-NEXT:    srai a1, a1, 56
 ; RV64I-NEXT:    add a0, a0, a1
@@ -384,7 +384,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 60
 ; RV64I-NEXT:    srai a0, a0, 60
-; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    mulw a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 60
 ; RV64I-NEXT:    srai a1, a1, 60
 ; RV64I-NEXT:    add a0, a0, a1
@@ -421,7 +421,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    slli a0, a0, 60
 ; RV64IZbb-NEXT:    srai a0, a0, 60
-; RV64IZbb-NEXT:    mul a1, a1, a2
+; RV64IZbb-NEXT:    mulw a1, a1, a2
 ; RV64IZbb-NEXT:    slli a1, a1, 60
 ; RV64IZbb-NEXT:    srai a1, a1, 60
 ; RV64IZbb-NEXT:    add a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
index 3c02a6094152..93ea48e3bc1d 100644
--- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll
+++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
@@ -121,8 +121,8 @@ define i32 @f4() shadowcallstack {
 ; RV64-NEXT:    call bar at plt
 ; RV64-NEXT:    mv s0, a0
 ; RV64-NEXT:    call bar at plt
-; RV64-NEXT:    add a1, s3, s1
-; RV64-NEXT:    add a0, s0, a0
+; RV64-NEXT:    addw a1, s3, s1
+; RV64-NEXT:    addw a0, s0, a0
 ; RV64-NEXT:    addw a0, a1, a0
 ; RV64-NEXT:    ld s3, 0(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll b/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll
index 6163d9de39d3..d7d901e6cf80 100644
--- a/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll
+++ b/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll
@@ -112,7 +112,7 @@ define i32 @sll_redundant_mask_zeros(i32 %a, i32 %b) nounwind {
 ;
 ; RV64I-LABEL: sll_redundant_mask_zeros:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a1, 1
+; RV64I-NEXT:    slliw a1, a1, 1
 ; RV64I-NEXT:    sllw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %b, 1
@@ -130,7 +130,7 @@ define i32 @srl_redundant_mask_zeros(i32 %a, i32 %b) nounwind {
 ;
 ; RV64I-LABEL: srl_redundant_mask_zeros:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a1, 2
+; RV64I-NEXT:    slliw a1, a1, 2
 ; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %b, 2
@@ -148,7 +148,7 @@ define i32 @sra_redundant_mask_zeros(i32 %a, i32 %b) nounwind {
 ;
 ; RV64I-LABEL: sra_redundant_mask_zeros:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a1, a1, 3
+; RV64I-NEXT:    slliw a1, a1, 3
 ; RV64I-NEXT:    sraw a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = shl i32 %b, 3

diff  --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll
index 8efffbabc74c..a890e04008b9 100644
--- a/llvm/test/CodeGen/RISCV/srem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll
@@ -54,9 +54,9 @@ define i32 @fold_srem_positive_odd(i32 %x) nounwind {
 ; RV64IM-NEXT:    addw a1, a1, a0
 ; RV64IM-NEXT:    srliw a2, a1, 31
 ; RV64IM-NEXT:    srli a1, a1, 6
-; RV64IM-NEXT:    add a1, a1, a2
+; RV64IM-NEXT:    addw a1, a1, a2
 ; RV64IM-NEXT:    addi a2, zero, 95
-; RV64IM-NEXT:    mul a1, a1, a2
+; RV64IM-NEXT:    mulw a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = srem i32 %x, 95
@@ -107,9 +107,9 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
 ; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    srli a2, a1, 63
 ; RV64IM-NEXT:    srai a1, a1, 40
-; RV64IM-NEXT:    add a1, a1, a2
+; RV64IM-NEXT:    addw a1, a1, a2
 ; RV64IM-NEXT:    addi a2, zero, 1060
-; RV64IM-NEXT:    mul a1, a1, a2
+; RV64IM-NEXT:    mulw a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = srem i32 %x, 1060
@@ -160,9 +160,9 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
 ; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    srli a2, a1, 63
 ; RV64IM-NEXT:    srai a1, a1, 40
-; RV64IM-NEXT:    add a1, a1, a2
+; RV64IM-NEXT:    addw a1, a1, a2
 ; RV64IM-NEXT:    addi a2, zero, -723
-; RV64IM-NEXT:    mul a1, a1, a2
+; RV64IM-NEXT:    mulw a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = srem i32 %x, -723
@@ -216,10 +216,10 @@ define i32 @fold_srem_negative_even(i32 %x) nounwind {
 ; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    srli a2, a1, 63
 ; RV64IM-NEXT:    srai a1, a1, 40
-; RV64IM-NEXT:    add a1, a1, a2
+; RV64IM-NEXT:    addw a1, a1, a2
 ; RV64IM-NEXT:    lui a2, 1048570
 ; RV64IM-NEXT:    addiw a2, a2, 1595
-; RV64IM-NEXT:    mul a1, a1, a2
+; RV64IM-NEXT:    mulw a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = srem i32 %x, -22981
@@ -292,14 +292,13 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
 ; RV64IM-NEXT:    addiw a2, a2, 389
 ; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    srli a1, a1, 32
-; RV64IM-NEXT:    addw a2, a1, a0
-; RV64IM-NEXT:    srliw a2, a2, 31
-; RV64IM-NEXT:    add a1, a1, a0
+; RV64IM-NEXT:    addw a1, a1, a0
+; RV64IM-NEXT:    srliw a2, a1, 31
 ; RV64IM-NEXT:    sraiw a1, a1, 6
-; RV64IM-NEXT:    add a1, a1, a2
+; RV64IM-NEXT:    addw a1, a1, a2
 ; RV64IM-NEXT:    addi a2, zero, 95
-; RV64IM-NEXT:    mul a2, a1, a2
-; RV64IM-NEXT:    sub a0, a0, a2
+; RV64IM-NEXT:    mulw a2, a1, a2
+; RV64IM-NEXT:    subw a0, a0, a2
 ; RV64IM-NEXT:    addw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = srem i32 %x, 95

diff  --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 8c57e6069117..f24e0eefcc90 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -462,11 +462,11 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64-NEXT:    and a3, a3, a4
 ; RV64-NEXT:    srli a3, a3, 32
 ; RV64-NEXT:    sb a3, 12(s0)
-; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    slliw a1, a1, 2
 ; RV64-NEXT:    srli a3, s4, 31
 ; RV64-NEXT:    and a2, a2, a3
 ; RV64-NEXT:    srli a4, a2, 31
-; RV64-NEXT:    sub a1, a4, a1
+; RV64-NEXT:    subw a1, a4, a1
 ; RV64-NEXT:    sw a1, 8(s0)
 ; RV64-NEXT:    and a0, a0, a3
 ; RV64-NEXT:    slli a1, a2, 33
@@ -649,11 +649,11 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64M-NEXT:    and a4, a4, a5
 ; RV64M-NEXT:    srli a4, a4, 32
 ; RV64M-NEXT:    sb a4, 12(a0)
-; RV64M-NEXT:    slli a2, a2, 2
+; RV64M-NEXT:    slliw a2, a2, 2
 ; RV64M-NEXT:    srli a4, a6, 31
 ; RV64M-NEXT:    and a1, a1, a4
 ; RV64M-NEXT:    srli a5, a1, 31
-; RV64M-NEXT:    sub a2, a5, a2
+; RV64M-NEXT:    subw a2, a5, a2
 ; RV64M-NEXT:    sw a2, 8(a0)
 ; RV64M-NEXT:    slli a1, a1, 33
 ; RV64M-NEXT:    and a2, a3, a4

diff  --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
index 0261d3695975..42a7ee5c2396 100644
--- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
@@ -165,10 +165,10 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a5, a5, a1
 ; RV64IM-NEXT:    srli a2, a5, 63
 ; RV64IM-NEXT:    srli a5, a5, 6
-; RV64IM-NEXT:    add a2, a5, a2
+; RV64IM-NEXT:    addw a2, a5, a2
 ; RV64IM-NEXT:    addi a5, zero, 95
-; RV64IM-NEXT:    mul a2, a2, a5
-; RV64IM-NEXT:    sub a1, a1, a2
+; RV64IM-NEXT:    mulw a2, a2, a5
+; RV64IM-NEXT:    subw a1, a1, a2
 ; RV64IM-NEXT:    lui a2, 777976
 ; RV64IM-NEXT:    addiw a2, a2, -1057
 ; RV64IM-NEXT:    slli a2, a2, 15
@@ -180,10 +180,10 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    sub a2, a2, a4
 ; RV64IM-NEXT:    srli a5, a2, 63
 ; RV64IM-NEXT:    srli a2, a2, 6
-; RV64IM-NEXT:    add a2, a2, a5
+; RV64IM-NEXT:    addw a2, a2, a5
 ; RV64IM-NEXT:    addi a5, zero, -124
-; RV64IM-NEXT:    mul a2, a2, a5
-; RV64IM-NEXT:    sub a2, a4, a2
+; RV64IM-NEXT:    mulw a2, a2, a5
+; RV64IM-NEXT:    subw a2, a4, a2
 ; RV64IM-NEXT:    lui a4, 2675
 ; RV64IM-NEXT:    addiw a4, a4, -251
 ; RV64IM-NEXT:    slli a4, a4, 13
@@ -195,10 +195,10 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    mulh a4, a3, a4
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srli a4, a4, 5
-; RV64IM-NEXT:    add a4, a4, a5
+; RV64IM-NEXT:    addw a4, a4, a5
 ; RV64IM-NEXT:    addi a5, zero, 98
-; RV64IM-NEXT:    mul a4, a4, a5
-; RV64IM-NEXT:    sub a3, a3, a4
+; RV64IM-NEXT:    mulw a4, a4, a5
+; RV64IM-NEXT:    subw a3, a3, a4
 ; RV64IM-NEXT:    lui a4, 1040212
 ; RV64IM-NEXT:    addiw a4, a4, 1977
 ; RV64IM-NEXT:    slli a4, a4, 12
@@ -210,10 +210,10 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    mulh a4, a6, a4
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srli a4, a4, 7
-; RV64IM-NEXT:    add a4, a4, a5
+; RV64IM-NEXT:    addw a4, a4, a5
 ; RV64IM-NEXT:    addi a5, zero, -1003
-; RV64IM-NEXT:    mul a4, a4, a5
-; RV64IM-NEXT:    sub a4, a6, a4
+; RV64IM-NEXT:    mulw a4, a4, a5
+; RV64IM-NEXT:    subw a4, a6, a4
 ; RV64IM-NEXT:    sh a4, 6(a0)
 ; RV64IM-NEXT:    sh a3, 4(a0)
 ; RV64IM-NEXT:    sh a2, 2(a0)
@@ -373,31 +373,31 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a2, a2, a1
 ; RV64IM-NEXT:    srli a3, a2, 63
 ; RV64IM-NEXT:    srli a2, a2, 6
-; RV64IM-NEXT:    add a2, a2, a3
+; RV64IM-NEXT:    addw a2, a2, a3
 ; RV64IM-NEXT:    addi a3, zero, 95
-; RV64IM-NEXT:    mul a2, a2, a3
-; RV64IM-NEXT:    sub t0, a1, a2
+; RV64IM-NEXT:    mulw a2, a2, a3
+; RV64IM-NEXT:    subw t0, a1, a2
 ; RV64IM-NEXT:    mulh a2, a4, a5
 ; RV64IM-NEXT:    add a2, a2, a4
 ; RV64IM-NEXT:    srli a1, a2, 63
 ; RV64IM-NEXT:    srli a2, a2, 6
-; RV64IM-NEXT:    add a1, a2, a1
-; RV64IM-NEXT:    mul a1, a1, a3
-; RV64IM-NEXT:    sub a1, a4, a1
+; RV64IM-NEXT:    addw a1, a2, a1
+; RV64IM-NEXT:    mulw a1, a1, a3
+; RV64IM-NEXT:    subw a1, a4, a1
 ; RV64IM-NEXT:    mulh a2, a7, a5
 ; RV64IM-NEXT:    add a2, a2, a7
 ; RV64IM-NEXT:    srli a4, a2, 63
 ; RV64IM-NEXT:    srli a2, a2, 6
-; RV64IM-NEXT:    add a2, a2, a4
-; RV64IM-NEXT:    mul a2, a2, a3
-; RV64IM-NEXT:    sub a2, a7, a2
+; RV64IM-NEXT:    addw a2, a2, a4
+; RV64IM-NEXT:    mulw a2, a2, a3
+; RV64IM-NEXT:    subw a2, a7, a2
 ; RV64IM-NEXT:    mulh a4, a6, a5
 ; RV64IM-NEXT:    add a4, a4, a6
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srli a4, a4, 6
-; RV64IM-NEXT:    add a4, a4, a5
-; RV64IM-NEXT:    mul a3, a4, a3
-; RV64IM-NEXT:    sub a3, a6, a3
+; RV64IM-NEXT:    addw a4, a4, a5
+; RV64IM-NEXT:    mulw a3, a4, a3
+; RV64IM-NEXT:    subw a3, a6, a3
 ; RV64IM-NEXT:    sh a3, 6(a0)
 ; RV64IM-NEXT:    sh a2, 4(a0)
 ; RV64IM-NEXT:    sh a1, 2(a0)
@@ -579,10 +579,10 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __divdi3 at plt
-; RV64I-NEXT:    add a0, s8, a0
-; RV64I-NEXT:    add a1, s7, s1
-; RV64I-NEXT:    add a2, s6, s4
-; RV64I-NEXT:    add a3, s5, s9
+; RV64I-NEXT:    addw a0, s8, a0
+; RV64I-NEXT:    addw a1, s7, s1
+; RV64I-NEXT:    addw a2, s6, s4
+; RV64I-NEXT:    addw a3, s5, s9
 ; RV64I-NEXT:    sh a3, 6(s0)
 ; RV64I-NEXT:    sh a2, 4(s0)
 ; RV64I-NEXT:    sh a1, 2(s0)
@@ -619,35 +619,35 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a2, a2, a1
 ; RV64IM-NEXT:    srli a3, a2, 63
 ; RV64IM-NEXT:    srai a2, a2, 6
-; RV64IM-NEXT:    add t3, a2, a3
+; RV64IM-NEXT:    addw t3, a2, a3
 ; RV64IM-NEXT:    addi t0, zero, 95
-; RV64IM-NEXT:    mul a3, t3, t0
-; RV64IM-NEXT:    sub t1, a1, a3
+; RV64IM-NEXT:    mulw a3, t3, t0
+; RV64IM-NEXT:    subw t1, a1, a3
 ; RV64IM-NEXT:    mulh a3, a4, a5
 ; RV64IM-NEXT:    add a3, a3, a4
 ; RV64IM-NEXT:    srli a1, a3, 63
 ; RV64IM-NEXT:    srai a3, a3, 6
-; RV64IM-NEXT:    add a1, a3, a1
-; RV64IM-NEXT:    mul a3, a1, t0
-; RV64IM-NEXT:    sub t2, a4, a3
+; RV64IM-NEXT:    addw a1, a3, a1
+; RV64IM-NEXT:    mulw a3, a1, t0
+; RV64IM-NEXT:    subw t2, a4, a3
 ; RV64IM-NEXT:    mulh a4, a7, a5
 ; RV64IM-NEXT:    add a4, a4, a7
 ; RV64IM-NEXT:    srli a3, a4, 63
 ; RV64IM-NEXT:    srai a4, a4, 6
-; RV64IM-NEXT:    add a3, a4, a3
-; RV64IM-NEXT:    mul a4, a3, t0
-; RV64IM-NEXT:    sub a4, a7, a4
+; RV64IM-NEXT:    addw a3, a4, a3
+; RV64IM-NEXT:    mulw a4, a3, t0
+; RV64IM-NEXT:    subw a4, a7, a4
 ; RV64IM-NEXT:    mulh a5, a6, a5
 ; RV64IM-NEXT:    add a5, a5, a6
 ; RV64IM-NEXT:    srli a2, a5, 63
 ; RV64IM-NEXT:    srai a5, a5, 6
-; RV64IM-NEXT:    add a2, a5, a2
-; RV64IM-NEXT:    mul a5, a2, t0
-; RV64IM-NEXT:    sub a5, a6, a5
-; RV64IM-NEXT:    add a2, a5, a2
-; RV64IM-NEXT:    add a3, a4, a3
-; RV64IM-NEXT:    add a1, t2, a1
-; RV64IM-NEXT:    add a4, t1, t3
+; RV64IM-NEXT:    addw a2, a5, a2
+; RV64IM-NEXT:    mulw a5, a2, t0
+; RV64IM-NEXT:    subw a5, a6, a5
+; RV64IM-NEXT:    addw a2, a5, a2
+; RV64IM-NEXT:    addw a3, a4, a3
+; RV64IM-NEXT:    addw a1, t2, a1
+; RV64IM-NEXT:    addw a4, t1, t3
 ; RV64IM-NEXT:    sh a4, 6(a0)
 ; RV64IM-NEXT:    sh a1, 4(a0)
 ; RV64IM-NEXT:    sh a3, 2(a0)
@@ -750,15 +750,15 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    srli a4, a2, 58
 ; RV64I-NEXT:    add a4, a2, a4
 ; RV64I-NEXT:    andi a4, a4, -64
-; RV64I-NEXT:    sub s2, a2, a4
+; RV64I-NEXT:    subw s2, a2, a4
 ; RV64I-NEXT:    srli a2, a1, 59
 ; RV64I-NEXT:    add a2, a1, a2
 ; RV64I-NEXT:    andi a2, a2, -32
-; RV64I-NEXT:    sub s3, a1, a2
+; RV64I-NEXT:    subw s3, a1, a2
 ; RV64I-NEXT:    srli a1, a3, 61
 ; RV64I-NEXT:    add a1, a3, a1
 ; RV64I-NEXT:    andi a1, a1, -8
-; RV64I-NEXT:    sub s1, a3, a1
+; RV64I-NEXT:    subw s1, a3, a1
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    sh a0, 6(s0)
@@ -791,22 +791,22 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a5, a5, a1
 ; RV64IM-NEXT:    srli a2, a5, 63
 ; RV64IM-NEXT:    srli a5, a5, 6
-; RV64IM-NEXT:    add a2, a5, a2
+; RV64IM-NEXT:    addw a2, a5, a2
 ; RV64IM-NEXT:    addi a5, zero, 95
-; RV64IM-NEXT:    mul a2, a2, a5
-; RV64IM-NEXT:    sub a1, a1, a2
+; RV64IM-NEXT:    mulw a2, a2, a5
+; RV64IM-NEXT:    subw a1, a1, a2
 ; RV64IM-NEXT:    srli a2, a4, 58
 ; RV64IM-NEXT:    add a2, a4, a2
 ; RV64IM-NEXT:    andi a2, a2, -64
-; RV64IM-NEXT:    sub a2, a4, a2
+; RV64IM-NEXT:    subw a2, a4, a2
 ; RV64IM-NEXT:    srli a4, a3, 59
 ; RV64IM-NEXT:    add a4, a3, a4
 ; RV64IM-NEXT:    andi a4, a4, -32
-; RV64IM-NEXT:    sub a3, a3, a4
+; RV64IM-NEXT:    subw a3, a3, a4
 ; RV64IM-NEXT:    srli a4, a6, 61
 ; RV64IM-NEXT:    add a4, a6, a4
 ; RV64IM-NEXT:    andi a4, a4, -8
-; RV64IM-NEXT:    sub a4, a6, a4
+; RV64IM-NEXT:    subw a4, a6, a4
 ; RV64IM-NEXT:    sh a4, 4(a0)
 ; RV64IM-NEXT:    sh a3, 2(a0)
 ; RV64IM-NEXT:    sh a2, 0(a0)
@@ -948,10 +948,10 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a4, a4, a1
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srli a4, a4, 4
-; RV64IM-NEXT:    add a4, a4, a5
+; RV64IM-NEXT:    addw a4, a4, a5
 ; RV64IM-NEXT:    addi a5, zero, 23
-; RV64IM-NEXT:    mul a4, a4, a5
-; RV64IM-NEXT:    sub a1, a1, a4
+; RV64IM-NEXT:    mulw a4, a4, a5
+; RV64IM-NEXT:    subw a1, a1, a4
 ; RV64IM-NEXT:    lui a4, 6413
 ; RV64IM-NEXT:    addiw a4, a4, 1265
 ; RV64IM-NEXT:    slli a4, a4, 13
@@ -963,10 +963,10 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    mulh a4, a3, a4
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srli a4, a4, 8
-; RV64IM-NEXT:    add a4, a4, a5
+; RV64IM-NEXT:    addw a4, a4, a5
 ; RV64IM-NEXT:    addi a5, zero, 654
-; RV64IM-NEXT:    mul a4, a4, a5
-; RV64IM-NEXT:    sub a3, a3, a4
+; RV64IM-NEXT:    mulw a4, a4, a5
+; RV64IM-NEXT:    subw a3, a3, a4
 ; RV64IM-NEXT:    lui a4, 12375
 ; RV64IM-NEXT:    addiw a4, a4, -575
 ; RV64IM-NEXT:    slli a4, a4, 12
@@ -978,11 +978,11 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    mulh a4, a2, a4
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srli a4, a4, 11
-; RV64IM-NEXT:    add a4, a4, a5
+; RV64IM-NEXT:    addw a4, a4, a5
 ; RV64IM-NEXT:    lui a5, 1
 ; RV64IM-NEXT:    addiw a5, a5, 1327
-; RV64IM-NEXT:    mul a4, a4, a5
-; RV64IM-NEXT:    sub a2, a2, a4
+; RV64IM-NEXT:    mulw a4, a4, a5
+; RV64IM-NEXT:    subw a2, a2, a4
 ; RV64IM-NEXT:    sh zero, 0(a0)
 ; RV64IM-NEXT:    sh a2, 6(a0)
 ; RV64IM-NEXT:    sh a3, 2(a0)
@@ -1082,7 +1082,7 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    add a1, a2, a1
 ; RV64I-NEXT:    lui a3, 8
 ; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    sub s3, a2, a1
+; RV64I-NEXT:    subw s3, a2, a1
 ; RV64I-NEXT:    addi a1, zero, 23
 ; RV64I-NEXT:    call __moddi3 at plt
 ; RV64I-NEXT:    mv s1, a0
@@ -1119,10 +1119,10 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a4, a4, a1
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srli a4, a4, 4
-; RV64IM-NEXT:    add a4, a4, a5
+; RV64IM-NEXT:    addw a4, a4, a5
 ; RV64IM-NEXT:    addi a5, zero, 23
-; RV64IM-NEXT:    mul a4, a4, a5
-; RV64IM-NEXT:    sub a1, a1, a4
+; RV64IM-NEXT:    mulw a4, a4, a5
+; RV64IM-NEXT:    subw a1, a1, a4
 ; RV64IM-NEXT:    lui a4, 12375
 ; RV64IM-NEXT:    addiw a4, a4, -575
 ; RV64IM-NEXT:    slli a4, a4, 12
@@ -1134,16 +1134,16 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    mulh a4, a3, a4
 ; RV64IM-NEXT:    srli a5, a4, 63
 ; RV64IM-NEXT:    srli a4, a4, 11
-; RV64IM-NEXT:    add a4, a4, a5
+; RV64IM-NEXT:    addw a4, a4, a5
 ; RV64IM-NEXT:    lui a5, 1
 ; RV64IM-NEXT:    addiw a5, a5, 1327
-; RV64IM-NEXT:    mul a4, a4, a5
-; RV64IM-NEXT:    sub a3, a3, a4
+; RV64IM-NEXT:    mulw a4, a4, a5
+; RV64IM-NEXT:    subw a3, a3, a4
 ; RV64IM-NEXT:    srli a4, a2, 49
 ; RV64IM-NEXT:    add a4, a2, a4
 ; RV64IM-NEXT:    lui a5, 8
 ; RV64IM-NEXT:    and a4, a4, a5
-; RV64IM-NEXT:    sub a2, a2, a4
+; RV64IM-NEXT:    subw a2, a2, a4
 ; RV64IM-NEXT:    sh zero, 0(a0)
 ; RV64IM-NEXT:    sh a2, 2(a0)
 ; RV64IM-NEXT:    sh a3, 6(a0)

diff  --git a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
index 923cf75996a6..605d7eeef2a3 100644
--- a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
@@ -230,7 +230,7 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
-; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    mulw a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 48
 ; RV64I-NEXT:    srai a1, a1, 48
 ; RV64I-NEXT:    sub a0, a0, a1
@@ -308,7 +308,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
-; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    mulw a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 56
 ; RV64I-NEXT:    srai a1, a1, 56
 ; RV64I-NEXT:    sub a0, a0, a1
@@ -383,7 +383,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 60
 ; RV64I-NEXT:    srai a0, a0, 60
-; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    mulw a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 60
 ; RV64I-NEXT:    srai a1, a1, 60
 ; RV64I-NEXT:    sub a0, a0, a1
@@ -420,7 +420,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    slli a0, a0, 60
 ; RV64IZbb-NEXT:    srai a0, a0, 60
-; RV64IZbb-NEXT:    mul a1, a1, a2
+; RV64IZbb-NEXT:    mulw a1, a1, a2
 ; RV64IZbb-NEXT:    slli a1, a1, 60
 ; RV64IZbb-NEXT:    srai a1, a1, 60
 ; RV64IZbb-NEXT:    sub a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
index 589374493d71..251e53adfe8f 100644
--- a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
@@ -25,7 +25,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ;
 ; RV64I-LABEL: func32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    mulw a1, a1, a2
 ; RV64I-NEXT:    addw a1, a0, a1
 ; RV64I-NEXT:    sext.w a2, a0
 ; RV64I-NEXT:    addi a0, zero, -1

diff  --git a/llvm/test/CodeGen/RISCV/urem-lkk.ll b/llvm/test/CodeGen/RISCV/urem-lkk.ll
index f448ec6fa906..e34845807c3f 100644
--- a/llvm/test/CodeGen/RISCV/urem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-lkk.ll
@@ -53,12 +53,12 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind {
 ; RV64IM-NEXT:    addiw a2, a2, 777
 ; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    srli a1, a1, 32
-; RV64IM-NEXT:    sub a2, a0, a1
+; RV64IM-NEXT:    subw a2, a0, a1
 ; RV64IM-NEXT:    srliw a2, a2, 1
 ; RV64IM-NEXT:    add a1, a2, a1
 ; RV64IM-NEXT:    srli a1, a1, 6
 ; RV64IM-NEXT:    addi a2, zero, 95
-; RV64IM-NEXT:    mul a1, a1, a2
+; RV64IM-NEXT:    mulw a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = urem i32 %x, 95
@@ -110,7 +110,7 @@ define i32 @fold_urem_positive_even(i32 %x) nounwind {
 ; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    srli a1, a1, 42
 ; RV64IM-NEXT:    addi a2, zero, 1060
-; RV64IM-NEXT:    mul a1, a1, a2
+; RV64IM-NEXT:    mulw a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = urem i32 %x, 1060
@@ -185,13 +185,13 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
 ; RV64IM-NEXT:    addiw a2, a2, 777
 ; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    srli a1, a1, 32
-; RV64IM-NEXT:    sub a2, a0, a1
+; RV64IM-NEXT:    subw a2, a0, a1
 ; RV64IM-NEXT:    srliw a2, a2, 1
 ; RV64IM-NEXT:    add a1, a2, a1
 ; RV64IM-NEXT:    srli a1, a1, 6
 ; RV64IM-NEXT:    addi a2, zero, 95
-; RV64IM-NEXT:    mul a2, a1, a2
-; RV64IM-NEXT:    sub a0, a0, a2
+; RV64IM-NEXT:    mulw a2, a1, a2
+; RV64IM-NEXT:    subw a0, a0, a2
 ; RV64IM-NEXT:    addw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = urem i32 %x, 95

diff  --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index e91b0834e7d2..17b68b99fc0b 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -162,8 +162,8 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a2, a2, a5
 ; RV64IM-NEXT:    srli a2, a2, 6
 ; RV64IM-NEXT:    addi a5, zero, 95
-; RV64IM-NEXT:    mul a2, a2, a5
-; RV64IM-NEXT:    sub a1, a1, a2
+; RV64IM-NEXT:    mulw a2, a2, a5
+; RV64IM-NEXT:    subw a1, a1, a2
 ; RV64IM-NEXT:    srli a2, a4, 2
 ; RV64IM-NEXT:    lui a5, 264
 ; RV64IM-NEXT:    addiw a5, a5, 1057
@@ -176,8 +176,8 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    mulhu a2, a2, a5
 ; RV64IM-NEXT:    srli a2, a2, 3
 ; RV64IM-NEXT:    addi a5, zero, 124
-; RV64IM-NEXT:    mul a2, a2, a5
-; RV64IM-NEXT:    sub a2, a4, a2
+; RV64IM-NEXT:    mulw a2, a2, a5
+; RV64IM-NEXT:    subw a2, a4, a2
 ; RV64IM-NEXT:    srli a4, a3, 1
 ; RV64IM-NEXT:    lui a5, 2675
 ; RV64IM-NEXT:    addiw a5, a5, -251
@@ -190,8 +190,8 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    mulhu a4, a4, a5
 ; RV64IM-NEXT:    srli a4, a4, 4
 ; RV64IM-NEXT:    addi a5, zero, 98
-; RV64IM-NEXT:    mul a4, a4, a5
-; RV64IM-NEXT:    sub a3, a3, a4
+; RV64IM-NEXT:    mulw a4, a4, a5
+; RV64IM-NEXT:    subw a3, a3, a4
 ; RV64IM-NEXT:    lui a4, 8364
 ; RV64IM-NEXT:    addiw a4, a4, -1977
 ; RV64IM-NEXT:    slli a4, a4, 12
@@ -203,8 +203,8 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    mulhu a4, a6, a4
 ; RV64IM-NEXT:    srli a4, a4, 7
 ; RV64IM-NEXT:    addi a5, zero, 1003
-; RV64IM-NEXT:    mul a4, a4, a5
-; RV64IM-NEXT:    sub a4, a6, a4
+; RV64IM-NEXT:    mulw a4, a4, a5
+; RV64IM-NEXT:    subw a4, a6, a4
 ; RV64IM-NEXT:    sh a4, 6(a0)
 ; RV64IM-NEXT:    sh a3, 4(a0)
 ; RV64IM-NEXT:    sh a2, 2(a0)
@@ -366,29 +366,29 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a2, a3, a2
 ; RV64IM-NEXT:    srli a2, a2, 6
 ; RV64IM-NEXT:    addi a3, zero, 95
-; RV64IM-NEXT:    mul a2, a2, a3
-; RV64IM-NEXT:    sub t0, a1, a2
+; RV64IM-NEXT:    mulw a2, a2, a3
+; RV64IM-NEXT:    subw t0, a1, a2
 ; RV64IM-NEXT:    mulhu a2, a4, a5
 ; RV64IM-NEXT:    sub a1, a4, a2
 ; RV64IM-NEXT:    srli a1, a1, 1
 ; RV64IM-NEXT:    add a1, a1, a2
 ; RV64IM-NEXT:    srli a1, a1, 6
-; RV64IM-NEXT:    mul a1, a1, a3
-; RV64IM-NEXT:    sub a1, a4, a1
+; RV64IM-NEXT:    mulw a1, a1, a3
+; RV64IM-NEXT:    subw a1, a4, a1
 ; RV64IM-NEXT:    mulhu a2, a7, a5
 ; RV64IM-NEXT:    sub a4, a7, a2
 ; RV64IM-NEXT:    srli a4, a4, 1
 ; RV64IM-NEXT:    add a2, a4, a2
 ; RV64IM-NEXT:    srli a2, a2, 6
-; RV64IM-NEXT:    mul a2, a2, a3
-; RV64IM-NEXT:    sub a2, a7, a2
+; RV64IM-NEXT:    mulw a2, a2, a3
+; RV64IM-NEXT:    subw a2, a7, a2
 ; RV64IM-NEXT:    mulhu a4, a6, a5
 ; RV64IM-NEXT:    sub a5, a6, a4
 ; RV64IM-NEXT:    srli a5, a5, 1
 ; RV64IM-NEXT:    add a4, a5, a4
 ; RV64IM-NEXT:    srli a4, a4, 6
-; RV64IM-NEXT:    mul a3, a4, a3
-; RV64IM-NEXT:    sub a3, a6, a3
+; RV64IM-NEXT:    mulw a3, a4, a3
+; RV64IM-NEXT:    subw a3, a6, a3
 ; RV64IM-NEXT:    sh a3, 6(a0)
 ; RV64IM-NEXT:    sh a2, 4(a0)
 ; RV64IM-NEXT:    sh a1, 2(a0)
@@ -570,10 +570,10 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    addi a1, zero, 95
 ; RV64I-NEXT:    mv a0, s2
 ; RV64I-NEXT:    call __udivdi3 at plt
-; RV64I-NEXT:    add a0, s8, a0
-; RV64I-NEXT:    add a1, s7, s1
-; RV64I-NEXT:    add a2, s6, s4
-; RV64I-NEXT:    add a3, s5, s9
+; RV64I-NEXT:    addw a0, s8, a0
+; RV64I-NEXT:    addw a1, s7, s1
+; RV64I-NEXT:    addw a2, s6, s4
+; RV64I-NEXT:    addw a3, s5, s9
 ; RV64I-NEXT:    sh a3, 6(s0)
 ; RV64I-NEXT:    sh a2, 4(s0)
 ; RV64I-NEXT:    sh a1, 2(s0)
@@ -612,33 +612,33 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a2, a3, a2
 ; RV64IM-NEXT:    srli t3, a2, 6
 ; RV64IM-NEXT:    addi t0, zero, 95
-; RV64IM-NEXT:    mul a3, t3, t0
-; RV64IM-NEXT:    sub t1, a1, a3
+; RV64IM-NEXT:    mulw a3, t3, t0
+; RV64IM-NEXT:    subw t1, a1, a3
 ; RV64IM-NEXT:    mulhu a3, a4, a5
 ; RV64IM-NEXT:    sub a1, a4, a3
 ; RV64IM-NEXT:    srli a1, a1, 1
 ; RV64IM-NEXT:    add a1, a1, a3
 ; RV64IM-NEXT:    srli a1, a1, 6
-; RV64IM-NEXT:    mul a3, a1, t0
-; RV64IM-NEXT:    sub t2, a4, a3
+; RV64IM-NEXT:    mulw a3, a1, t0
+; RV64IM-NEXT:    subw t2, a4, a3
 ; RV64IM-NEXT:    mulhu a4, a7, a5
 ; RV64IM-NEXT:    sub a3, a7, a4
 ; RV64IM-NEXT:    srli a3, a3, 1
 ; RV64IM-NEXT:    add a3, a3, a4
 ; RV64IM-NEXT:    srli a3, a3, 6
-; RV64IM-NEXT:    mul a4, a3, t0
-; RV64IM-NEXT:    sub a4, a7, a4
+; RV64IM-NEXT:    mulw a4, a3, t0
+; RV64IM-NEXT:    subw a4, a7, a4
 ; RV64IM-NEXT:    mulhu a5, a6, a5
 ; RV64IM-NEXT:    sub a2, a6, a5
 ; RV64IM-NEXT:    srli a2, a2, 1
 ; RV64IM-NEXT:    add a2, a2, a5
 ; RV64IM-NEXT:    srli a2, a2, 6
-; RV64IM-NEXT:    mul a5, a2, t0
-; RV64IM-NEXT:    sub a5, a6, a5
-; RV64IM-NEXT:    add a2, a5, a2
-; RV64IM-NEXT:    add a3, a4, a3
-; RV64IM-NEXT:    add a1, t2, a1
-; RV64IM-NEXT:    add a4, t1, t3
+; RV64IM-NEXT:    mulw a5, a2, t0
+; RV64IM-NEXT:    subw a5, a6, a5
+; RV64IM-NEXT:    addw a2, a5, a2
+; RV64IM-NEXT:    addw a3, a4, a3
+; RV64IM-NEXT:    addw a1, t2, a1
+; RV64IM-NEXT:    addw a4, t1, t3
 ; RV64IM-NEXT:    sh a4, 6(a0)
 ; RV64IM-NEXT:    sh a1, 4(a0)
 ; RV64IM-NEXT:    sh a3, 2(a0)
@@ -759,8 +759,8 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a2, a2, a5
 ; RV64IM-NEXT:    srli a2, a2, 6
 ; RV64IM-NEXT:    addi a5, zero, 95
-; RV64IM-NEXT:    mul a2, a2, a5
-; RV64IM-NEXT:    sub a1, a1, a2
+; RV64IM-NEXT:    mulw a2, a2, a5
+; RV64IM-NEXT:    subw a1, a1, a2
 ; RV64IM-NEXT:    andi a2, a4, 63
 ; RV64IM-NEXT:    andi a3, a3, 31
 ; RV64IM-NEXT:    andi a4, a6, 7
@@ -900,8 +900,8 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a4, a5, a4
 ; RV64IM-NEXT:    srli a4, a4, 4
 ; RV64IM-NEXT:    addi a5, zero, 23
-; RV64IM-NEXT:    mul a4, a4, a5
-; RV64IM-NEXT:    sub a1, a1, a4
+; RV64IM-NEXT:    mulw a4, a4, a5
+; RV64IM-NEXT:    subw a1, a1, a4
 ; RV64IM-NEXT:    srli a4, a3, 1
 ; RV64IM-NEXT:    lui a5, 6413
 ; RV64IM-NEXT:    addiw a5, a5, 1265
@@ -914,8 +914,8 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    mulhu a4, a4, a5
 ; RV64IM-NEXT:    srli a4, a4, 7
 ; RV64IM-NEXT:    addi a5, zero, 654
-; RV64IM-NEXT:    mul a4, a4, a5
-; RV64IM-NEXT:    sub a3, a3, a4
+; RV64IM-NEXT:    mulw a4, a4, a5
+; RV64IM-NEXT:    subw a3, a3, a4
 ; RV64IM-NEXT:    lui a4, 1044567
 ; RV64IM-NEXT:    addiw a4, a4, -575
 ; RV64IM-NEXT:    slli a4, a4, 12
@@ -928,8 +928,8 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a4, a4, 12
 ; RV64IM-NEXT:    lui a5, 1
 ; RV64IM-NEXT:    addiw a5, a5, 1327
-; RV64IM-NEXT:    mul a4, a4, a5
-; RV64IM-NEXT:    sub a2, a2, a4
+; RV64IM-NEXT:    mulw a4, a4, a5
+; RV64IM-NEXT:    subw a2, a2, a4
 ; RV64IM-NEXT:    sh zero, 0(a0)
 ; RV64IM-NEXT:    sh a2, 6(a0)
 ; RV64IM-NEXT:    sh a3, 2(a0)

diff  --git a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
index beeaf54e4238..7e28cfbe5c37 100644
--- a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
@@ -25,7 +25,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ;
 ; RV64I-LABEL: func32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    mulw a1, a1, a2
 ; RV64I-NEXT:    subw a1, a0, a1
 ; RV64I-NEXT:    sext.w a2, a0
 ; RV64I-NEXT:    mv a0, zero

diff  --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index 7efa1a372603..59782bd448b6 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -1314,8 +1314,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addi a3, a0, 8
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    sd a3, 8(sp)
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld a0, 0(a0)
-; LP64-LP64F-LP64D-FPELIM-NEXT:    add a1, a1, s0
-; LP64-LP64F-LP64D-FPELIM-NEXT:    add a1, a1, a2
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addw a1, a1, s0
+; LP64-LP64F-LP64D-FPELIM-NEXT:    addw a1, a1, a2
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    addw a0, a1, a0
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-FPELIM-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -1357,8 +1357,8 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addi a3, a0, 8
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    sd a3, -32(s0)
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld a0, 0(a0)
-; LP64-LP64F-LP64D-WITHFP-NEXT:    add a1, a1, s1
-; LP64-LP64F-LP64D-WITHFP-NEXT:    add a1, a1, a2
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addw a1, a1, s1
+; LP64-LP64F-LP64D-WITHFP-NEXT:    addw a1, a1, a2
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    addw a0, a1, a0
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; LP64-LP64F-LP64D-WITHFP-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index 93789c0085de..5fe5d68922cf 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -358,12 +358,10 @@ define zeroext i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) {
 ;
 ; RV64-LABEL: uaddo.i32:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addw a3, a0, a1
-; RV64-NEXT:    sext.w a4, a0
-; RV64-NEXT:    sltu a3, a3, a4
-; RV64-NEXT:    add a0, a0, a1
-; RV64-NEXT:    sw a0, 0(a2)
-; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    addw a1, a0, a1
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    sltu a0, a1, a0
+; RV64-NEXT:    sw a1, 0(a2)
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: uaddo.i32:
@@ -375,12 +373,10 @@ define zeroext i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) {
 ;
 ; RV64ZBA-LABEL: uaddo.i32:
 ; RV64ZBA:       # %bb.0: # %entry
-; RV64ZBA-NEXT:    addw a3, a0, a1
-; RV64ZBA-NEXT:    sext.w a4, a0
-; RV64ZBA-NEXT:    sltu a3, a3, a4
-; RV64ZBA-NEXT:    add a0, a0, a1
-; RV64ZBA-NEXT:    sw a0, 0(a2)
-; RV64ZBA-NEXT:    mv a0, a3
+; RV64ZBA-NEXT:    addw a1, a0, a1
+; RV64ZBA-NEXT:    sext.w a0, a0
+; RV64ZBA-NEXT:    sltu a0, a1, a0
+; RV64ZBA-NEXT:    sw a1, 0(a2)
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
@@ -402,10 +398,8 @@ define zeroext i1 @uaddo.i32.constant(i32 %v1, i32* %res) {
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    sext.w a2, a0
 ; RV64-NEXT:    addiw a3, a0, -2
-; RV64-NEXT:    sltu a2, a3, a2
-; RV64-NEXT:    addi a0, a0, -2
-; RV64-NEXT:    sw a0, 0(a1)
-; RV64-NEXT:    mv a0, a2
+; RV64-NEXT:    sltu a0, a3, a2
+; RV64-NEXT:    sw a3, 0(a1)
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: uaddo.i32.constant:
@@ -419,10 +413,8 @@ define zeroext i1 @uaddo.i32.constant(i32 %v1, i32* %res) {
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    sext.w a2, a0
 ; RV64ZBA-NEXT:    addiw a3, a0, -2
-; RV64ZBA-NEXT:    sltu a2, a3, a2
-; RV64ZBA-NEXT:    addi a0, a0, -2
-; RV64ZBA-NEXT:    sw a0, 0(a1)
-; RV64ZBA-NEXT:    mv a0, a2
+; RV64ZBA-NEXT:    sltu a0, a3, a2
+; RV64ZBA-NEXT:    sw a3, 0(a1)
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 -2)
@@ -638,12 +630,10 @@ define zeroext i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) {
 ;
 ; RV64-LABEL: usubo.i32:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    subw a3, a0, a1
-; RV64-NEXT:    sext.w a4, a0
-; RV64-NEXT:    sltu a3, a4, a3
-; RV64-NEXT:    sub a0, a0, a1
-; RV64-NEXT:    sw a0, 0(a2)
-; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    subw a1, a0, a1
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    sltu a0, a0, a1
+; RV64-NEXT:    sw a1, 0(a2)
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: usubo.i32:
@@ -655,12 +645,10 @@ define zeroext i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) {
 ;
 ; RV64ZBA-LABEL: usubo.i32:
 ; RV64ZBA:       # %bb.0: # %entry
-; RV64ZBA-NEXT:    subw a3, a0, a1
-; RV64ZBA-NEXT:    sext.w a4, a0
-; RV64ZBA-NEXT:    sltu a3, a4, a3
-; RV64ZBA-NEXT:    sub a0, a0, a1
-; RV64ZBA-NEXT:    sw a0, 0(a2)
-; RV64ZBA-NEXT:    mv a0, a3
+; RV64ZBA-NEXT:    subw a1, a0, a1
+; RV64ZBA-NEXT:    sext.w a0, a0
+; RV64ZBA-NEXT:    sltu a0, a0, a1
+; RV64ZBA-NEXT:    sw a1, 0(a2)
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
@@ -681,11 +669,9 @@ define zeroext i1 @usubo.i32.constant.rhs(i32 %v1, i32* %res) {
 ; RV64-LABEL: usubo.i32.constant.rhs:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    addiw a2, a0, 2
-; RV64-NEXT:    sext.w a3, a0
-; RV64-NEXT:    sltu a2, a3, a2
-; RV64-NEXT:    addi a0, a0, 2
-; RV64-NEXT:    sw a0, 0(a1)
-; RV64-NEXT:    mv a0, a2
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    sltu a0, a0, a2
+; RV64-NEXT:    sw a2, 0(a1)
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: usubo.i32.constant.rhs:
@@ -698,11 +684,9 @@ define zeroext i1 @usubo.i32.constant.rhs(i32 %v1, i32* %res) {
 ; RV64ZBA-LABEL: usubo.i32.constant.rhs:
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    addiw a2, a0, 2
-; RV64ZBA-NEXT:    sext.w a3, a0
-; RV64ZBA-NEXT:    sltu a2, a3, a2
-; RV64ZBA-NEXT:    addi a0, a0, 2
-; RV64ZBA-NEXT:    sw a0, 0(a1)
-; RV64ZBA-NEXT:    mv a0, a2
+; RV64ZBA-NEXT:    sext.w a0, a0
+; RV64ZBA-NEXT:    sltu a0, a0, a2
+; RV64ZBA-NEXT:    sw a2, 0(a1)
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 -2)
@@ -724,13 +708,11 @@ define zeroext i1 @usubo.i32.constant.lhs(i32 %v1, i32* %res) {
 ;
 ; RV64-LABEL: usubo.i32.constant.lhs:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addi a3, zero, -2
-; RV64-NEXT:    subw a2, a3, a0
-; RV64-NEXT:    addi a2, a2, 1
-; RV64-NEXT:    seqz a2, a2
-; RV64-NEXT:    sub a0, a3, a0
-; RV64-NEXT:    sw a0, 0(a1)
-; RV64-NEXT:    mv a0, a2
+; RV64-NEXT:    addi a2, zero, -2
+; RV64-NEXT:    subw a2, a2, a0
+; RV64-NEXT:    addi a0, a2, 1
+; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    sw a2, 0(a1)
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: usubo.i32.constant.lhs:
@@ -744,13 +726,11 @@ define zeroext i1 @usubo.i32.constant.lhs(i32 %v1, i32* %res) {
 ;
 ; RV64ZBA-LABEL: usubo.i32.constant.lhs:
 ; RV64ZBA:       # %bb.0: # %entry
-; RV64ZBA-NEXT:    addi a3, zero, -2
-; RV64ZBA-NEXT:    subw a2, a3, a0
-; RV64ZBA-NEXT:    addi a2, a2, 1
-; RV64ZBA-NEXT:    seqz a2, a2
-; RV64ZBA-NEXT:    sub a0, a3, a0
-; RV64ZBA-NEXT:    sw a0, 0(a1)
-; RV64ZBA-NEXT:    mv a0, a2
+; RV64ZBA-NEXT:    addi a2, zero, -2
+; RV64ZBA-NEXT:    subw a2, a2, a0
+; RV64ZBA-NEXT:    addi a0, a2, 1
+; RV64ZBA-NEXT:    seqz a0, a0
+; RV64ZBA-NEXT:    sw a2, 0(a1)
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 -2, i32 %v1)


        


More information about the llvm-commits mailing list