[llvm] d741a31 - [RISCV][CodeGen][SelectionDAG] Recursively check hasAllNBitUsers for logical machine opcodes

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 14 15:16:12 PST 2022


Author: Nitin John Raj
Date: 2022-12-14T15:15:30-08:00
New Revision: d741a31a39041413a705a50ff94b4d0bc7aaf598

URL: https://github.com/llvm/llvm-project/commit/d741a31a39041413a705a50ff94b4d0bc7aaf598
DIFF: https://github.com/llvm/llvm-project/commit/d741a31a39041413a705a50ff94b4d0bc7aaf598.diff

LOG: [RISCV][CodeGen][SelectionDAG] Recursively check hasAllNBitUsers for logical machine opcodes

We don’t have W versions of AND/OR/XOR/ANDN/ORN/XNOR so we should recursively check their users. We should limit the recursion to SelectionDAG::MaxRecursionDepth levels.

We need to add a Depth argument, all existing callers should pass 0 to the Depth. The new recursive calls should increment it by 1. At the top of the function we should give up and return false if Depth >= SelectionDAG::MaxRecursionDepth.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D139462

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
    llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
    llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
    llvm/test/CodeGen/RISCV/double-convert.ll
    llvm/test/CodeGen/RISCV/float-convert.ll
    llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
    llvm/test/CodeGen/RISCV/half-convert.ll
    llvm/test/CodeGen/RISCV/rem.ll
    llvm/test/CodeGen/RISCV/rotl-rotr.ll
    llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
    llvm/test/CodeGen/RISCV/rv64zbb.ll
    llvm/test/CodeGen/RISCV/rv64zbkb.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
    llvm/test/CodeGen/RISCV/select-binop-identity.ll
    llvm/test/CodeGen/RISCV/sextw-removal.ll
    llvm/test/CodeGen/RISCV/srem-lkk.ll
    llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
    llvm/test/CodeGen/RISCV/unaligned-load-store.ll
    llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 1680ee478dffe..889575244d343 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -2283,17 +2283,19 @@ bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt,
 // may be able to use a W instruction and CSE with the other instruction if
 // this has happened. We could try to detect that the CSE opportunity exists
 // before doing this, but that would be more complicated.
-// TODO: Does this need to look through AND/OR/XOR to their users to find more
-// opportunities.
-bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
+bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits,
+                                        const unsigned Depth) const {
   assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
           Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
           Node->getOpcode() == ISD::SRL || Node->getOpcode() == ISD::AND ||
           Node->getOpcode() == ISD::OR || Node->getOpcode() == ISD::XOR ||
           Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
-          isa<ConstantSDNode>(Node)) &&
+          isa<ConstantSDNode>(Node) || Depth != 0) &&
          "Unexpected opcode");
 
+  if (Depth >= SelectionDAG::MaxRecursionDepth)
+    return false;
+
   for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
     SDNode *User = *UI;
     // Users of this node should have already been instruction selected
@@ -2353,15 +2355,25 @@ bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
         return false;
       break;
     case RISCV::ANDI:
-      if (Bits < (64 - countLeadingZeros(User->getConstantOperandVal(1))))
-        return false;
-      break;
+      if (Bits >= (64 - countLeadingZeros(User->getConstantOperandVal(1))))
+        break;
+      goto RecCheck;
     case RISCV::ORI: {
       uint64_t Imm = cast<ConstantSDNode>(User->getOperand(1))->getSExtValue();
-      if (Bits < (64 - countLeadingOnes(Imm)))
+      if (Bits >= (64 - countLeadingOnes(Imm)))
+        break;
+      [[fallthrough]];
+    }
+    case RISCV::AND:
+    case RISCV::OR:
+    case RISCV::XOR:
+    case RISCV::ANDN:
+    case RISCV::ORN:
+    case RISCV::XNOR:
+    RecCheck:
+      if (!hasAllNBitUsers(User, Bits, Depth + 1))
         return false;
       break;
-    }
     case RISCV::SEXT_B:
     case RISCV::PACKH:
       if (Bits < 8)

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 8683deeaed7a5..5e45f42c7c917 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -75,7 +75,8 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
     return selectSHXADD_UWOp(N, ShAmt, Val);
   }
 
-  bool hasAllNBitUsers(SDNode *Node, unsigned Bits) const;
+  bool hasAllNBitUsers(SDNode *Node, unsigned Bits,
+                       const unsigned Depth = 0) const;
   bool hasAllHUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 16); }
   bool hasAllWUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 32); }
 

diff  --git a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
index 616b9fce88c90..b0ec85e4f6a09 100644
--- a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
+++ b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll
@@ -1512,7 +1512,7 @@ define i32 @pr55484(i32 %0) {
 ; RV64ZBB-LABEL: pr55484:
 ; RV64ZBB:       # %bb.0:
 ; RV64ZBB-NEXT:    srli a1, a0, 8
-; RV64ZBB-NEXT:    slli a0, a0, 8
+; RV64ZBB-NEXT:    slliw a0, a0, 8
 ; RV64ZBB-NEXT:    or a0, a1, a0
 ; RV64ZBB-NEXT:    sext.h a0, a0
 ; RV64ZBB-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index 0f44968184526..8f8703e322a45 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -239,7 +239,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV64M-NEXT:    sext.w a1, a0
 ; RV64M-NEXT:    beqz a1, .LBB2_2
 ; RV64M-NEXT:  # %bb.1: # %cond.false
-; RV64M-NEXT:    neg a1, a0
+; RV64M-NEXT:    negw a1, a0
 ; RV64M-NEXT:    and a0, a0, a1
 ; RV64M-NEXT:    lui a1, 30667
 ; RV64M-NEXT:    addiw a1, a1, 1329
@@ -577,7 +577,7 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
 ;
 ; RV64M-LABEL: test_cttz_i32_zero_undef:
 ; RV64M:       # %bb.0:
-; RV64M-NEXT:    neg a1, a0
+; RV64M-NEXT:    negw a1, a0
 ; RV64M-NEXT:    and a0, a0, a1
 ; RV64M-NEXT:    lui a1, 30667
 ; RV64M-NEXT:    addiw a1, a1, 1329
@@ -1063,7 +1063,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
 ; RV64M-NEXT:    and a0, a0, a1
 ; RV64M-NEXT:    add a0, a2, a0
 ; RV64M-NEXT:    srli a1, a0, 4
-; RV64M-NEXT:    add a0, a0, a1
+; RV64M-NEXT:    addw a0, a0, a1
 ; RV64M-NEXT:    lui a1, 61681
 ; RV64M-NEXT:    addiw a1, a1, -241
 ; RV64M-NEXT:    and a0, a0, a1
@@ -1643,7 +1643,7 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
 ; RV64M-NEXT:    and a0, a0, a1
 ; RV64M-NEXT:    add a0, a2, a0
 ; RV64M-NEXT:    srli a1, a0, 4
-; RV64M-NEXT:    add a0, a0, a1
+; RV64M-NEXT:    addw a0, a0, a1
 ; RV64M-NEXT:    lui a1, 61681
 ; RV64M-NEXT:    addiw a1, a1, -241
 ; RV64M-NEXT:    and a0, a0, a1
@@ -2116,7 +2116,7 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
 ; RV64M-NEXT:    and a0, a0, a1
 ; RV64M-NEXT:    add a0, a2, a0
 ; RV64M-NEXT:    srli a1, a0, 4
-; RV64M-NEXT:    add a0, a0, a1
+; RV64M-NEXT:    addw a0, a0, a1
 ; RV64M-NEXT:    lui a1, 61681
 ; RV64M-NEXT:    addiw a1, a1, -241
 ; RV64M-NEXT:    and a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index 2eb7970e6eb29..0ff0a43754ad0 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -288,7 +288,7 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV64IFD-NEXT:    seqz a1, a1
-; RV64IFD-NEXT:    addi a1, a1, -1
+; RV64IFD-NEXT:    addiw a1, a1, -1
 ; RV64IFD-NEXT:    and a0, a0, a1
 ; RV64IFD-NEXT:    slli a0, a0, 32
 ; RV64IFD-NEXT:    srli a0, a0, 32
@@ -1406,7 +1406,7 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call __unorddf2 at plt
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    addiw a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
@@ -1708,7 +1708,7 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call __unorddf2 at plt
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    addiw a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
@@ -1877,7 +1877,7 @@ define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.wu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    feq.d a1, fa0, fa0
 ; RV64IFD-NEXT:    seqz a1, a1
-; RV64IFD-NEXT:    addi a1, a1, -1
+; RV64IFD-NEXT:    addiw a1, a1, -1
 ; RV64IFD-NEXT:    and a0, a0, a1
 ; RV64IFD-NEXT:    slli a0, a0, 32
 ; RV64IFD-NEXT:    srli a0, a0, 32
@@ -2059,7 +2059,7 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call __unorddf2 at plt
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    addiw a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 92b79f8f1d22a..c4de90b3a91b6 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -219,7 +219,7 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV64IF-NEXT:    feq.s a1, fa0, fa0
 ; RV64IF-NEXT:    seqz a1, a1
-; RV64IF-NEXT:    addi a1, a1, -1
+; RV64IF-NEXT:    addiw a1, a1, -1
 ; RV64IF-NEXT:    and a0, a0, a1
 ; RV64IF-NEXT:    slli a0, a0, 32
 ; RV64IF-NEXT:    srli a0, a0, 32
@@ -1253,7 +1253,7 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call __unordsf2 at plt
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    addiw a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
@@ -1533,7 +1533,7 @@ define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call __unordsf2 at plt
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    addiw a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
@@ -1690,7 +1690,7 @@ define zeroext i32 @fcvt_wu_s_sat_zext(float %a) nounwind {
 ; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
 ; RV64IF-NEXT:    feq.s a1, fa0, fa0
 ; RV64IF-NEXT:    seqz a1, a1
-; RV64IF-NEXT:    addi a1, a1, -1
+; RV64IF-NEXT:    addiw a1, a1, -1
 ; RV64IF-NEXT:    and a0, a0, a1
 ; RV64IF-NEXT:    slli a0, a0, 32
 ; RV64IF-NEXT:    srli a0, a0, 32
@@ -1848,7 +1848,7 @@ define signext i32 @fcvt_w_s_sat_sext(float %a) nounwind {
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call __unordsf2 at plt
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    addiw a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
index 505df78dd43bf..233b2e6048c69 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
@@ -295,13 +295,13 @@ define <4 x i32> @ustest_f32i32(<4 x float> %x) {
 ; CHECK-NOV-NEXT:    sgtz a6, a3
 ; CHECK-NOV-NEXT:    sgtz a7, a2
 ; CHECK-NOV-NEXT:    sgtz t0, a1
-; CHECK-NOV-NEXT:    neg t0, t0
+; CHECK-NOV-NEXT:    negw t0, t0
 ; CHECK-NOV-NEXT:    and a1, t0, a1
-; CHECK-NOV-NEXT:    neg a7, a7
+; CHECK-NOV-NEXT:    negw a7, a7
 ; CHECK-NOV-NEXT:    and a2, a7, a2
-; CHECK-NOV-NEXT:    neg a6, a6
+; CHECK-NOV-NEXT:    negw a6, a6
 ; CHECK-NOV-NEXT:    and a3, a6, a3
-; CHECK-NOV-NEXT:    neg a4, a4
+; CHECK-NOV-NEXT:    negw a4, a4
 ; CHECK-NOV-NEXT:    and a4, a4, a5
 ; CHECK-NOV-NEXT:    sw a4, 12(a0)
 ; CHECK-NOV-NEXT:    sw a3, 8(a0)
@@ -719,13 +719,13 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    sgtz a4, a1
 ; CHECK-NOV-NEXT:    sgtz a5, s2
 ; CHECK-NOV-NEXT:    sgtz a6, a0
-; CHECK-NOV-NEXT:    neg a6, a6
+; CHECK-NOV-NEXT:    negw a6, a6
 ; CHECK-NOV-NEXT:    and a0, a6, a0
-; CHECK-NOV-NEXT:    neg a5, a5
+; CHECK-NOV-NEXT:    negw a5, a5
 ; CHECK-NOV-NEXT:    and a5, a5, s2
-; CHECK-NOV-NEXT:    neg a4, a4
+; CHECK-NOV-NEXT:    negw a4, a4
 ; CHECK-NOV-NEXT:    and a1, a4, a1
-; CHECK-NOV-NEXT:    neg a2, a2
+; CHECK-NOV-NEXT:    negw a2, a2
 ; CHECK-NOV-NEXT:    and a2, a2, a3
 ; CHECK-NOV-NEXT:    sw a2, 12(s0)
 ; CHECK-NOV-NEXT:    sw a1, 8(s0)
@@ -1120,13 +1120,13 @@ define <4 x i16> @ustest_f32i16(<4 x float> %x) {
 ; CHECK-NOV-NEXT:    sgtz a6, a3
 ; CHECK-NOV-NEXT:    sgtz a7, a2
 ; CHECK-NOV-NEXT:    sgtz t0, a1
-; CHECK-NOV-NEXT:    neg t0, t0
+; CHECK-NOV-NEXT:    negw t0, t0
 ; CHECK-NOV-NEXT:    and a1, t0, a1
-; CHECK-NOV-NEXT:    neg a7, a7
+; CHECK-NOV-NEXT:    negw a7, a7
 ; CHECK-NOV-NEXT:    and a2, a7, a2
-; CHECK-NOV-NEXT:    neg a6, a6
+; CHECK-NOV-NEXT:    negw a6, a6
 ; CHECK-NOV-NEXT:    and a3, a6, a3
-; CHECK-NOV-NEXT:    neg a4, a4
+; CHECK-NOV-NEXT:    negw a4, a4
 ; CHECK-NOV-NEXT:    and a4, a4, a5
 ; CHECK-NOV-NEXT:    sh a4, 6(a0)
 ; CHECK-NOV-NEXT:    sh a3, 4(a0)
@@ -1862,21 +1862,21 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    sgtz t4, a1
 ; CHECK-NOV-NEXT:    sgtz t5, s2
 ; CHECK-NOV-NEXT:    sgtz t6, a0
-; CHECK-NOV-NEXT:    neg t6, t6
+; CHECK-NOV-NEXT:    negw t6, t6
 ; CHECK-NOV-NEXT:    and a0, t6, a0
-; CHECK-NOV-NEXT:    neg t5, t5
+; CHECK-NOV-NEXT:    negw t5, t5
 ; CHECK-NOV-NEXT:    and t5, t5, s2
-; CHECK-NOV-NEXT:    neg t4, t4
+; CHECK-NOV-NEXT:    negw t4, t4
 ; CHECK-NOV-NEXT:    and a1, t4, a1
-; CHECK-NOV-NEXT:    neg t3, t3
+; CHECK-NOV-NEXT:    negw t3, t3
 ; CHECK-NOV-NEXT:    and a2, t3, a2
-; CHECK-NOV-NEXT:    neg t2, t2
+; CHECK-NOV-NEXT:    negw t2, t2
 ; CHECK-NOV-NEXT:    and a4, t2, a4
-; CHECK-NOV-NEXT:    neg t1, t1
+; CHECK-NOV-NEXT:    negw t1, t1
 ; CHECK-NOV-NEXT:    and a5, t1, a5
-; CHECK-NOV-NEXT:    neg t0, t0
+; CHECK-NOV-NEXT:    negw t0, t0
 ; CHECK-NOV-NEXT:    and a6, t0, a6
-; CHECK-NOV-NEXT:    neg a3, a3
+; CHECK-NOV-NEXT:    negw a3, a3
 ; CHECK-NOV-NEXT:    and a3, a3, a7
 ; CHECK-NOV-NEXT:    sh a3, 14(s0)
 ; CHECK-NOV-NEXT:    sh a6, 12(s0)
@@ -3587,16 +3587,16 @@ define <4 x i32> @ustest_f32i32_mm(<4 x float> %x) {
 ; CHECK-NOV-NEXT:    mv a5, a4
 ; CHECK-NOV-NEXT:  .LBB32_5: # %entry
 ; CHECK-NOV-NEXT:    sgtz a4, a5
-; CHECK-NOV-NEXT:    neg a4, a4
+; CHECK-NOV-NEXT:    negw a4, a4
 ; CHECK-NOV-NEXT:    and a4, a4, a5
 ; CHECK-NOV-NEXT:    sgtz a5, a3
-; CHECK-NOV-NEXT:    neg a5, a5
+; CHECK-NOV-NEXT:    negw a5, a5
 ; CHECK-NOV-NEXT:    and a3, a5, a3
 ; CHECK-NOV-NEXT:    sgtz a5, a2
-; CHECK-NOV-NEXT:    neg a5, a5
+; CHECK-NOV-NEXT:    negw a5, a5
 ; CHECK-NOV-NEXT:    and a2, a5, a2
 ; CHECK-NOV-NEXT:    sgtz a5, a1
-; CHECK-NOV-NEXT:    neg a5, a5
+; CHECK-NOV-NEXT:    negw a5, a5
 ; CHECK-NOV-NEXT:    and a1, a5, a1
 ; CHECK-NOV-NEXT:    sw a1, 12(a0)
 ; CHECK-NOV-NEXT:    sw a2, 8(a0)
@@ -4006,16 +4006,16 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
 ; CHECK-NOV-NEXT:    mv a3, a2
 ; CHECK-NOV-NEXT:  .LBB35_5: # %entry
 ; CHECK-NOV-NEXT:    sgtz a2, a3
-; CHECK-NOV-NEXT:    neg a2, a2
+; CHECK-NOV-NEXT:    negw a2, a2
 ; CHECK-NOV-NEXT:    and a2, a2, a3
 ; CHECK-NOV-NEXT:    sgtz a3, a1
-; CHECK-NOV-NEXT:    neg a3, a3
+; CHECK-NOV-NEXT:    negw a3, a3
 ; CHECK-NOV-NEXT:    and a1, a3, a1
 ; CHECK-NOV-NEXT:    sgtz a3, s2
-; CHECK-NOV-NEXT:    neg a3, a3
+; CHECK-NOV-NEXT:    negw a3, a3
 ; CHECK-NOV-NEXT:    and a3, a3, s2
 ; CHECK-NOV-NEXT:    sgtz a4, a0
-; CHECK-NOV-NEXT:    neg a4, a4
+; CHECK-NOV-NEXT:    negw a4, a4
 ; CHECK-NOV-NEXT:    and a0, a4, a0
 ; CHECK-NOV-NEXT:    sw a0, 12(s0)
 ; CHECK-NOV-NEXT:    sw a3, 8(s0)
@@ -4397,16 +4397,16 @@ define <4 x i16> @ustest_f32i16_mm(<4 x float> %x) {
 ; CHECK-NOV-NEXT:    mv a5, a4
 ; CHECK-NOV-NEXT:  .LBB41_5: # %entry
 ; CHECK-NOV-NEXT:    sgtz a4, a5
-; CHECK-NOV-NEXT:    neg a4, a4
+; CHECK-NOV-NEXT:    negw a4, a4
 ; CHECK-NOV-NEXT:    and a4, a4, a5
 ; CHECK-NOV-NEXT:    sgtz a5, a3
-; CHECK-NOV-NEXT:    neg a5, a5
+; CHECK-NOV-NEXT:    negw a5, a5
 ; CHECK-NOV-NEXT:    and a3, a5, a3
 ; CHECK-NOV-NEXT:    sgtz a5, a2
-; CHECK-NOV-NEXT:    neg a5, a5
+; CHECK-NOV-NEXT:    negw a5, a5
 ; CHECK-NOV-NEXT:    and a2, a5, a2
 ; CHECK-NOV-NEXT:    sgtz a5, a1
-; CHECK-NOV-NEXT:    neg a5, a5
+; CHECK-NOV-NEXT:    negw a5, a5
 ; CHECK-NOV-NEXT:    and a1, a5, a1
 ; CHECK-NOV-NEXT:    sh a1, 6(a0)
 ; CHECK-NOV-NEXT:    sh a2, 4(a0)
@@ -5128,28 +5128,28 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
 ; CHECK-NOV-NEXT:    mv a7, a3
 ; CHECK-NOV-NEXT:  .LBB44_9: # %entry
 ; CHECK-NOV-NEXT:    sgtz a3, a7
-; CHECK-NOV-NEXT:    neg a3, a3
+; CHECK-NOV-NEXT:    negw a3, a3
 ; CHECK-NOV-NEXT:    and a3, a3, a7
 ; CHECK-NOV-NEXT:    sgtz a7, a6
-; CHECK-NOV-NEXT:    neg a7, a7
+; CHECK-NOV-NEXT:    negw a7, a7
 ; CHECK-NOV-NEXT:    and a6, a7, a6
 ; CHECK-NOV-NEXT:    sgtz a7, a5
-; CHECK-NOV-NEXT:    neg a7, a7
+; CHECK-NOV-NEXT:    negw a7, a7
 ; CHECK-NOV-NEXT:    and a5, a7, a5
 ; CHECK-NOV-NEXT:    sgtz a7, a4
-; CHECK-NOV-NEXT:    neg a7, a7
+; CHECK-NOV-NEXT:    negw a7, a7
 ; CHECK-NOV-NEXT:    and a4, a7, a4
 ; CHECK-NOV-NEXT:    sgtz a7, a2
-; CHECK-NOV-NEXT:    neg a7, a7
+; CHECK-NOV-NEXT:    negw a7, a7
 ; CHECK-NOV-NEXT:    and a2, a7, a2
 ; CHECK-NOV-NEXT:    sgtz a7, a1
-; CHECK-NOV-NEXT:    neg a7, a7
+; CHECK-NOV-NEXT:    negw a7, a7
 ; CHECK-NOV-NEXT:    and a1, a7, a1
 ; CHECK-NOV-NEXT:    sgtz a7, s2
-; CHECK-NOV-NEXT:    neg a7, a7
+; CHECK-NOV-NEXT:    negw a7, a7
 ; CHECK-NOV-NEXT:    and a7, a7, s2
 ; CHECK-NOV-NEXT:    sgtz t0, a0
-; CHECK-NOV-NEXT:    neg t0, t0
+; CHECK-NOV-NEXT:    negw t0, t0
 ; CHECK-NOV-NEXT:    and a0, t0, a0
 ; CHECK-NOV-NEXT:    sh a0, 14(s0)
 ; CHECK-NOV-NEXT:    sh a7, 12(s0)

diff  --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index bdf4dfc828539..f5cf8caa26bc1 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -831,7 +831,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV64IZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
 ; RV64IZFH-NEXT:    feq.h a1, fa0, fa0
 ; RV64IZFH-NEXT:    seqz a1, a1
-; RV64IZFH-NEXT:    addi a1, a1, -1
+; RV64IZFH-NEXT:    addiw a1, a1, -1
 ; RV64IZFH-NEXT:    and a0, a0, a1
 ; RV64IZFH-NEXT:    slli a0, a0, 32
 ; RV64IZFH-NEXT:    srli a0, a0, 32
@@ -851,7 +851,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV64IDZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
 ; RV64IDZFH-NEXT:    feq.h a1, fa0, fa0
 ; RV64IDZFH-NEXT:    seqz a1, a1
-; RV64IDZFH-NEXT:    addi a1, a1, -1
+; RV64IDZFH-NEXT:    addiw a1, a1, -1
 ; RV64IDZFH-NEXT:    and a0, a0, a1
 ; RV64IDZFH-NEXT:    slli a0, a0, 32
 ; RV64IDZFH-NEXT:    srli a0, a0, 32
@@ -943,7 +943,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
 ; CHECK64-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
 ; CHECK64-IZFHMIN-NEXT:    seqz a1, a1
-; CHECK64-IZFHMIN-NEXT:    addi a1, a1, -1
+; CHECK64-IZFHMIN-NEXT:    addiw a1, a1, -1
 ; CHECK64-IZFHMIN-NEXT:    and a0, a0, a1
 ; CHECK64-IZFHMIN-NEXT:    slli a0, a0, 32
 ; CHECK64-IZFHMIN-NEXT:    srli a0, a0, 32
@@ -2926,7 +2926,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call __unordsf2 at plt
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    addiw a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
@@ -3386,7 +3386,7 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call __unordsf2 at plt
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    addiw a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
@@ -3655,7 +3655,7 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ; RV64IZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
 ; RV64IZFH-NEXT:    feq.h a1, fa0, fa0
 ; RV64IZFH-NEXT:    seqz a1, a1
-; RV64IZFH-NEXT:    addi a1, a1, -1
+; RV64IZFH-NEXT:    addiw a1, a1, -1
 ; RV64IZFH-NEXT:    and a0, a0, a1
 ; RV64IZFH-NEXT:    slli a0, a0, 32
 ; RV64IZFH-NEXT:    srli a0, a0, 32
@@ -3675,7 +3675,7 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ; RV64IDZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
 ; RV64IDZFH-NEXT:    feq.h a1, fa0, fa0
 ; RV64IDZFH-NEXT:    seqz a1, a1
-; RV64IDZFH-NEXT:    addi a1, a1, -1
+; RV64IDZFH-NEXT:    addiw a1, a1, -1
 ; RV64IDZFH-NEXT:    and a0, a0, a1
 ; RV64IDZFH-NEXT:    slli a0, a0, 32
 ; RV64IDZFH-NEXT:    srli a0, a0, 32
@@ -3769,7 +3769,7 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ; CHECK64-IZFHMIN-NEXT:    fcvt.wu.s a0, ft0, rtz
 ; CHECK64-IZFHMIN-NEXT:    feq.s a1, ft0, ft0
 ; CHECK64-IZFHMIN-NEXT:    seqz a1, a1
-; CHECK64-IZFHMIN-NEXT:    addi a1, a1, -1
+; CHECK64-IZFHMIN-NEXT:    addiw a1, a1, -1
 ; CHECK64-IZFHMIN-NEXT:    and a0, a0, a1
 ; CHECK64-IZFHMIN-NEXT:    slli a0, a0, 32
 ; CHECK64-IZFHMIN-NEXT:    srli a0, a0, 32
@@ -3887,7 +3887,7 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call __unordsf2 at plt
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    addiw a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll
index 3d5fd3fd43317..3cd5d056f9ab4 100644
--- a/llvm/test/CodeGen/RISCV/rem.ll
+++ b/llvm/test/CodeGen/RISCV/rem.ll
@@ -125,7 +125,7 @@ define i32 @srem_pow2(i32 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sraiw a1, a0, 31
 ; RV64I-NEXT:    srliw a1, a1, 29
-; RV64I-NEXT:    add a1, a0, a1
+; RV64I-NEXT:    addw a1, a0, a1
 ; RV64I-NEXT:    andi a1, a1, -8
 ; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -134,7 +134,7 @@ define i32 @srem_pow2(i32 %a) nounwind {
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    sraiw a1, a0, 31
 ; RV64IM-NEXT:    srliw a1, a1, 29
-; RV64IM-NEXT:    add a1, a0, a1
+; RV64IM-NEXT:    addw a1, a0, a1
 ; RV64IM-NEXT:    andi a1, a1, -8
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
@@ -167,7 +167,7 @@ define i32 @srem_pow2_2(i32 %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sraiw a1, a0, 31
 ; RV64I-NEXT:    srliw a1, a1, 16
-; RV64I-NEXT:    add a1, a0, a1
+; RV64I-NEXT:    addw a1, a0, a1
 ; RV64I-NEXT:    lui a2, 1048560
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    subw a0, a0, a1
@@ -177,7 +177,7 @@ define i32 @srem_pow2_2(i32 %a) nounwind {
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    sraiw a1, a0, 31
 ; RV64IM-NEXT:    srliw a1, a1, 16
-; RV64IM-NEXT:    add a1, a0, a1
+; RV64IM-NEXT:    addw a1, a0, a1
 ; RV64IM-NEXT:    lui a2, 1048560
 ; RV64IM-NEXT:    and a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
index 9b13bd394febd..0ff2889901ba7 100644
--- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll
+++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll
@@ -718,7 +718,7 @@ define i64 @rotl_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    ori a2, a1, 128
 ; RV64I-NEXT:    sll a2, a0, a2
-; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    negw a1, a1
 ; RV64I-NEXT:    ori a1, a1, 64
 ; RV64I-NEXT:    srl a0, a0, a1
 ; RV64I-NEXT:    or a0, a2, a0
@@ -963,7 +963,7 @@ define i64 @rotr_64_mask_or_128_or_64(i64 %x, i64 %y) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    ori a2, a1, 128
 ; RV64I-NEXT:    srl a2, a0, a2
-; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    negw a1, a1
 ; RV64I-NEXT:    ori a1, a1, 64
 ; RV64I-NEXT:    sll a0, a0, a1
 ; RV64I-NEXT:    or a0, a2, a0

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
index 57a90b662c412..4fab086a214c4 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll
@@ -279,7 +279,7 @@ define void @rori_i32_fshl_nosext(i32 signext %a, i32* %x) nounwind {
 ; RV64I-LABEL: rori_i32_fshl_nosext:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    srliw a2, a0, 1
-; RV64I-NEXT:    slli a0, a0, 31
+; RV64I-NEXT:    slliw a0, a0, 31
 ; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    sw a0, 0(a1)
 ; RV64I-NEXT:    ret
@@ -314,7 +314,7 @@ define signext i32 @rori_i32_fshr(i32 signext %a) nounwind {
 define void @rori_i32_fshr_nosext(i32 signext %a, i32* %x) nounwind {
 ; RV64I-LABEL: rori_i32_fshr_nosext:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a2, a0, 1
+; RV64I-NEXT:    slliw a2, a0, 1
 ; RV64I-NEXT:    srliw a0, a0, 31
 ; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    sw a0, 0(a1)
@@ -352,7 +352,7 @@ define signext i32 @not_rori_i32(i32 signext %x, i32 signext %y) nounwind {
 define i64 @roriw_bug(i64 %x) nounwind {
 ; CHECK-LABEL: roriw_bug:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    slli a1, a0, 31
+; CHECK-NEXT:    slliw a1, a0, 31
 ; CHECK-NEXT:    andi a2, a0, -2
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    or a0, a1, a0

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 713f0c05c0556..87940d9e88d45 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -981,8 +981,8 @@ define void @bswap_i32_nosext(i32 signext %a, i32* %x) nounwind {
 ; RV64I-NEXT:    srliw a4, a0, 24
 ; RV64I-NEXT:    or a2, a2, a4
 ; RV64I-NEXT:    and a3, a0, a3
-; RV64I-NEXT:    slli a3, a3, 8
-; RV64I-NEXT:    slli a0, a0, 24
+; RV64I-NEXT:    slliw a3, a3, 8
+; RV64I-NEXT:    slliw a0, a0, 24
 ; RV64I-NEXT:    or a0, a0, a3
 ; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    sw a0, 0(a1)

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
index 7ac8b567d0c6e..bd5fb1c0497a2 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
@@ -45,7 +45,7 @@ define signext i32 @pack_i32_2(i16 zeroext %a, i16 zeroext %b) nounwind {
 define signext i32 @pack_i32_3(i16 zeroext %0, i16 zeroext %1, i32 signext %2) {
 ; RV64I-LABEL: pack_i32_3:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slli a0, a0, 16
+; RV64I-NEXT:    slliw a0, a0, 16
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    addw a0, a0, a2
 ; RV64I-NEXT:    ret
@@ -227,8 +227,8 @@ define zeroext i16 @packh_i16(i8 zeroext %a, i8 zeroext %b) nounwind {
 define zeroext i16 @packh_i16_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2) {
 ; RV64I-LABEL: packh_i16_2:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    addw a0, a1, a0
+; RV64I-NEXT:    slliw a0, a0, 8
 ; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
index ee4cbae9efd96..057981e237d7b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
@@ -431,7 +431,7 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
 ; RV64-NEXT:    feq.d a2, ft2, ft2
 ; RV64-NEXT:    seqz a2, a2
-; RV64-NEXT:    addi a2, a2, -1
+; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 8(sp)
 ; RV64-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
@@ -442,7 +442,7 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
 ; RV64-NEXT:    feq.d a2, ft2, ft2
 ; RV64-NEXT:    seqz a2, a2
-; RV64-NEXT:    addi a2, a2, -1
+; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 15(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 6
@@ -452,7 +452,7 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
 ; RV64-NEXT:    feq.d a2, ft2, ft2
 ; RV64-NEXT:    seqz a2, a2
-; RV64-NEXT:    addi a2, a2, -1
+; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 14(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 5
@@ -462,7 +462,7 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
 ; RV64-NEXT:    feq.d a2, ft2, ft2
 ; RV64-NEXT:    seqz a2, a2
-; RV64-NEXT:    addi a2, a2, -1
+; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 13(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 4
@@ -472,7 +472,7 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
 ; RV64-NEXT:    feq.d a2, ft2, ft2
 ; RV64-NEXT:    seqz a2, a2
-; RV64-NEXT:    addi a2, a2, -1
+; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 12(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 3
@@ -482,7 +482,7 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
 ; RV64-NEXT:    feq.d a2, ft2, ft2
 ; RV64-NEXT:    seqz a2, a2
-; RV64-NEXT:    addi a2, a2, -1
+; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 11(sp)
 ; RV64-NEXT:    vslidedown.vi v12, v8, 2
@@ -492,7 +492,7 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; RV64-NEXT:    fcvt.l.d a0, ft3, rtz
 ; RV64-NEXT:    feq.d a2, ft2, ft2
 ; RV64-NEXT:    seqz a2, a2
-; RV64-NEXT:    addi a2, a2, -1
+; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 10(sp)
 ; RV64-NEXT:    vslidedown.vi v8, v8, 1
@@ -502,7 +502,7 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; RV64-NEXT:    fcvt.l.d a0, ft0, rtz
 ; RV64-NEXT:    feq.d a2, ft2, ft2
 ; RV64-NEXT:    seqz a2, a2
-; RV64-NEXT:    addi a2, a2, -1
+; RV64-NEXT:    addiw a2, a2, -1
 ; RV64-NEXT:    and a0, a2, a0
 ; RV64-NEXT:    sb a0, 9(sp)
 ; RV64-NEXT:    addi a0, sp, 8

diff  --git a/llvm/test/CodeGen/RISCV/select-binop-identity.ll b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
index 7dee8f857060c..6e206abe09a7b 100644
--- a/llvm/test/CodeGen/RISCV/select-binop-identity.ll
+++ b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
@@ -202,7 +202,7 @@ define signext i32 @add_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32
 ;
 ; RV64I-LABEL: add_select_all_zeros_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    addiw a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    addw a0, a2, a0
 ; RV64I-NEXT:    ret
@@ -264,7 +264,7 @@ define signext i32 @sub_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32
 ;
 ; RV64I-LABEL: sub_select_all_zeros_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    addiw a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    subw a0, a2, a0
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/sextw-removal.ll b/llvm/test/CodeGen/RISCV/sextw-removal.ll
index a0bb68b2ab351..ffc441983840a 100644
--- a/llvm/test/CodeGen/RISCV/sextw-removal.ll
+++ b/llvm/test/CodeGen/RISCV/sextw-removal.ll
@@ -196,7 +196,7 @@ define void @test5(i32 signext %arg, i32 signext %arg1) nounwind {
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    add a0, a2, a0
 ; RV64I-NEXT:    srli a2, a0, 4
-; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addw a0, a0, a2
 ; RV64I-NEXT:    and a0, a0, s2
 ; RV64I-NEXT:    mulw a0, a0, s3
 ; RV64I-NEXT:    srliw a0, a0, 24

diff  --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll
index c28a92603e739..0f1288bb0b734 100644
--- a/llvm/test/CodeGen/RISCV/srem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll
@@ -311,7 +311,7 @@ define i32 @dont_fold_srem_power_of_two(i32 %x) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sraiw a1, a0, 31
 ; RV64I-NEXT:    srliw a1, a1, 26
-; RV64I-NEXT:    add a1, a0, a1
+; RV64I-NEXT:    addw a1, a0, a1
 ; RV64I-NEXT:    andi a1, a1, -64
 ; RV64I-NEXT:    subw a0, a0, a1
 ; RV64I-NEXT:    ret
@@ -320,7 +320,7 @@ define i32 @dont_fold_srem_power_of_two(i32 %x) nounwind {
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    sraiw a1, a0, 31
 ; RV64IM-NEXT:    srliw a1, a1, 26
-; RV64IM-NEXT:    add a1, a0, a1
+; RV64IM-NEXT:    addw a1, a0, a1
 ; RV64IM-NEXT:    andi a1, a1, -64
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
@@ -364,7 +364,7 @@ define i32 @dont_fold_srem_i32_smax(i32 %x) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sraiw a1, a0, 31
 ; RV64I-NEXT:    srliw a1, a1, 1
-; RV64I-NEXT:    add a1, a0, a1
+; RV64I-NEXT:    addw a1, a0, a1
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    addw a0, a0, a1
@@ -374,7 +374,7 @@ define i32 @dont_fold_srem_i32_smax(i32 %x) nounwind {
 ; RV64IM:       # %bb.0:
 ; RV64IM-NEXT:    sraiw a1, a0, 31
 ; RV64IM-NEXT:    srliw a1, a1, 1
-; RV64IM-NEXT:    add a1, a0, a1
+; RV64IM-NEXT:    addw a1, a0, a1
 ; RV64IM-NEXT:    lui a2, 524288
 ; RV64IM-NEXT:    and a1, a1, a2
 ; RV64IM-NEXT:    addw a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index e1b9670f5e6e8..71e190583af53 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -427,7 +427,7 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64-NEXT:    neg a0, a0
 ; RV64-NEXT:    addi a2, a2, -1
 ; RV64-NEXT:    addi a1, a1, -1
-; RV64-NEXT:    slli a3, a1, 2
+; RV64-NEXT:    slliw a3, a1, 2
 ; RV64-NEXT:    slli a4, a2, 31
 ; RV64-NEXT:    srli a4, a4, 62
 ; RV64-NEXT:    or a3, a4, a3
@@ -585,7 +585,7 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64M-NEXT:    srli a1, a1, 31
 ; RV64M-NEXT:    or a1, a1, a4
 ; RV64M-NEXT:    sd a1, 0(a0)
-; RV64M-NEXT:    slli a1, a2, 2
+; RV64M-NEXT:    slliw a1, a2, 2
 ; RV64M-NEXT:    slli a3, a3, 31
 ; RV64M-NEXT:    srli a3, a3, 62
 ; RV64M-NEXT:    or a1, a3, a1
@@ -779,11 +779,11 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64MV-NEXT:    slli a4, a3, 33
 ; RV64MV-NEXT:    or a1, a1, a4
 ; RV64MV-NEXT:    sd a1, 0(a0)
-; RV64MV-NEXT:    slli a2, a2, 2
+; RV64MV-NEXT:    slliw a1, a2, 2
 ; RV64MV-NEXT:    slli a3, a3, 31
 ; RV64MV-NEXT:    srli a3, a3, 62
-; RV64MV-NEXT:    or a2, a3, a2
-; RV64MV-NEXT:    sw a2, 8(a0)
+; RV64MV-NEXT:    or a1, a3, a1
+; RV64MV-NEXT:    sw a1, 8(a0)
 ; RV64MV-NEXT:    addi sp, s0, -64
 ; RV64MV-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64MV-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
index 8356029a8ccd7..2e9224ed10424 100644
--- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
@@ -701,15 +701,15 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    lh a3, 16(a1)
 ; RV64I-NEXT:    lh a1, 8(a1)
 ; RV64I-NEXT:    srli a4, a2, 58
-; RV64I-NEXT:    add a4, a2, a4
+; RV64I-NEXT:    addw a4, a2, a4
 ; RV64I-NEXT:    andi a4, a4, -64
 ; RV64I-NEXT:    subw s1, a2, a4
 ; RV64I-NEXT:    srli a2, a1, 59
-; RV64I-NEXT:    add a2, a1, a2
+; RV64I-NEXT:    addw a2, a1, a2
 ; RV64I-NEXT:    andi a2, a2, -32
 ; RV64I-NEXT:    subw s2, a1, a2
 ; RV64I-NEXT:    srli a1, a3, 61
-; RV64I-NEXT:    add a1, a3, a1
+; RV64I-NEXT:    addw a1, a3, a1
 ; RV64I-NEXT:    andi a1, a1, -8
 ; RV64I-NEXT:    subw s3, a3, a1
 ; RV64I-NEXT:    li a1, 95
@@ -743,15 +743,15 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    mulw a3, a3, a6
 ; RV64IM-NEXT:    subw a2, a2, a3
 ; RV64IM-NEXT:    srli a3, a1, 58
-; RV64IM-NEXT:    add a3, a1, a3
+; RV64IM-NEXT:    addw a3, a1, a3
 ; RV64IM-NEXT:    andi a3, a3, -64
 ; RV64IM-NEXT:    subw a1, a1, a3
 ; RV64IM-NEXT:    srli a3, a5, 59
-; RV64IM-NEXT:    add a3, a5, a3
+; RV64IM-NEXT:    addw a3, a5, a3
 ; RV64IM-NEXT:    andi a3, a3, -32
 ; RV64IM-NEXT:    subw a5, a5, a3
 ; RV64IM-NEXT:    srli a3, a4, 61
-; RV64IM-NEXT:    add a3, a4, a3
+; RV64IM-NEXT:    addw a3, a4, a3
 ; RV64IM-NEXT:    andi a3, a3, -8
 ; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    sh a4, 4(a0)
@@ -1008,7 +1008,7 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV64I-NEXT:    lh s1, 24(a1)
 ; RV64I-NEXT:    lh a0, 16(a1)
 ; RV64I-NEXT:    srli a1, a2, 49
-; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    addw a1, a2, a1
 ; RV64I-NEXT:    lui a3, 8
 ; RV64I-NEXT:    and a1, a1, a3
 ; RV64I-NEXT:    subw s3, a2, a1
@@ -1057,7 +1057,7 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    mulw a3, a3, a5
 ; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    srli a3, a1, 49
-; RV64IM-NEXT:    add a3, a1, a3
+; RV64IM-NEXT:    addw a3, a1, a3
 ; RV64IM-NEXT:    lui a5, 8
 ; RV64IM-NEXT:    and a3, a3, a5
 ; RV64IM-NEXT:    subw a1, a1, a3

diff  --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
index f82f320e9d234..099080ff5a7c6 100644
--- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
@@ -138,11 +138,11 @@ define i64 @load_i64(i64* %p) {
 ; RV64I-NEXT:    lbu a3, 4(a0)
 ; RV64I-NEXT:    lbu a4, 6(a0)
 ; RV64I-NEXT:    lbu a0, 7(a0)
-; RV64I-NEXT:    slli a2, a2, 8
+; RV64I-NEXT:    slliw a2, a2, 8
 ; RV64I-NEXT:    or a2, a2, a3
-; RV64I-NEXT:    slli a4, a4, 16
-; RV64I-NEXT:    slli a0, a0, 24
-; RV64I-NEXT:    or a0, a0, a4
+; RV64I-NEXT:    slliw a3, a4, 16
+; RV64I-NEXT:    slliw a0, a0, 24
+; RV64I-NEXT:    or a0, a0, a3
 ; RV64I-NEXT:    or a0, a0, a2
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    or a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index 948858f95c5d0..36bf5c4c9986b 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -107,7 +107,7 @@ define i1 @test_urem_even(i27 %X) nounwind {
 ; RV64-NEXT:    lui a1, 28087
 ; RV64-NEXT:    addiw a1, a1, -585
 ; RV64-NEXT:    call __muldi3 at plt
-; RV64-NEXT:    slli a1, a0, 26
+; RV64-NEXT:    slliw a1, a0, 26
 ; RV64-NEXT:    slli a0, a0, 37
 ; RV64-NEXT:    srli a0, a0, 38
 ; RV64-NEXT:    or a0, a0, a1
@@ -140,8 +140,8 @@ define i1 @test_urem_even(i27 %X) nounwind {
 ; RV64M:       # %bb.0:
 ; RV64M-NEXT:    lui a1, 28087
 ; RV64M-NEXT:    addiw a1, a1, -585
-; RV64M-NEXT:    mul a0, a0, a1
-; RV64M-NEXT:    slli a1, a0, 26
+; RV64M-NEXT:    mulw a0, a0, a1
+; RV64M-NEXT:    slliw a1, a0, 26
 ; RV64M-NEXT:    slli a0, a0, 37
 ; RV64M-NEXT:    srli a0, a0, 38
 ; RV64M-NEXT:    or a0, a0, a1
@@ -172,8 +172,8 @@ define i1 @test_urem_even(i27 %X) nounwind {
 ; RV64MV:       # %bb.0:
 ; RV64MV-NEXT:    lui a1, 28087
 ; RV64MV-NEXT:    addiw a1, a1, -585
-; RV64MV-NEXT:    mul a0, a0, a1
-; RV64MV-NEXT:    slli a1, a0, 26
+; RV64MV-NEXT:    mulw a0, a0, a1
+; RV64MV-NEXT:    slliw a1, a0, 26
 ; RV64MV-NEXT:    slli a0, a0, 37
 ; RV64MV-NEXT:    srli a0, a0, 38
 ; RV64MV-NEXT:    or a0, a0, a1
@@ -396,7 +396,7 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64-NEXT:    andi a0, a0, 2047
 ; RV64-NEXT:    li a1, 683
 ; RV64-NEXT:    call __muldi3 at plt
-; RV64-NEXT:    slli a1, a0, 10
+; RV64-NEXT:    slliw a1, a0, 10
 ; RV64-NEXT:    slli a0, a0, 53
 ; RV64-NEXT:    srli a0, a0, 54
 ; RV64-NEXT:    or a0, a0, a1
@@ -487,8 +487,8 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64M-NEXT:    srli a3, a1, 11
 ; RV64M-NEXT:    andi a1, a1, 2047
 ; RV64M-NEXT:    li a4, 683
-; RV64M-NEXT:    mul a1, a1, a4
-; RV64M-NEXT:    slli a4, a1, 10
+; RV64M-NEXT:    mulw a1, a1, a4
+; RV64M-NEXT:    slliw a4, a1, 10
 ; RV64M-NEXT:    slli a1, a1, 53
 ; RV64M-NEXT:    srli a1, a1, 54
 ; RV64M-NEXT:    or a1, a1, a4

diff  --git a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll
index a0c63ded72765..ef2a51bfc5c84 100644
--- a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll
+++ b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll
@@ -50,14 +50,14 @@ define void @vec3_setcc_crash(<3 x i8>* %in, <3 x i8>* %out) {
 ; RV64-NEXT:    sgtz a6, a6
 ; RV64-NEXT:    sgtz a5, a5
 ; RV64-NEXT:    sgtz a4, a4
-; RV64-NEXT:    neg a4, a4
+; RV64-NEXT:    negw a4, a4
 ; RV64-NEXT:    and a3, a4, a3
-; RV64-NEXT:    slli a3, a3, 8
-; RV64-NEXT:    neg a4, a5
+; RV64-NEXT:    slliw a3, a3, 8
+; RV64-NEXT:    negw a4, a5
 ; RV64-NEXT:    and a0, a4, a0
 ; RV64-NEXT:    andi a0, a0, 255
 ; RV64-NEXT:    or a0, a0, a3
-; RV64-NEXT:    neg a3, a6
+; RV64-NEXT:    negw a3, a6
 ; RV64-NEXT:    and a2, a3, a2
 ; RV64-NEXT:    sb a2, 2(a1)
 ; RV64-NEXT:    sh a0, 0(a1)


        


More information about the llvm-commits mailing list