[llvm] 4063369 - [RISCV] Add MULW to RISCVStripWSuffix.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 16 19:43:21 PDT 2023


Author: Craig Topper
Date: 2023-03-16T19:42:33-07:00
New Revision: 4063369fd452b9bb9941494023eea6395a1872d3

URL: https://github.com/llvm/llvm-project/commit/4063369fd452b9bb9941494023eea6395a1872d3
DIFF: https://github.com/llvm/llvm-project/commit/4063369fd452b9bb9941494023eea6395a1872d3.diff

LOG: [RISCV] Add MULW to RISCVStripWSuffix.

This converts MULW to MUL if the upper bits aren't used.
This will give more opportunities to use c.mul with Zcb.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVStripWSuffix.cpp
    llvm/test/CodeGen/RISCV/addimm-mulimm.ll
    llvm/test/CodeGen/RISCV/bitextract-mac.ll
    llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
    llvm/test/CodeGen/RISCV/machine-combiner.ll
    llvm/test/CodeGen/RISCV/machine-outliner-throw.ll
    llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll
    llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll
    llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll
    llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
    llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
    llvm/test/CodeGen/RISCV/sextw-removal.ll
    llvm/test/CodeGen/RISCV/srem-lkk.ll
    llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
    llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
    llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
    llvm/test/CodeGen/RISCV/urem-lkk.ll
    llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
    llvm/test/CodeGen/RISCV/usub_sat_plus.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVStripWSuffix.cpp b/llvm/lib/Target/RISCV/RISCVStripWSuffix.cpp
index 14ab9c2dd6557..8324f16497413 100644
--- a/llvm/lib/Target/RISCV/RISCVStripWSuffix.cpp
+++ b/llvm/lib/Target/RISCV/RISCVStripWSuffix.cpp
@@ -69,16 +69,18 @@ bool RISCVStripWSuffix::runOnMachineFunction(MachineFunction &MF) {
     for (auto I = MBB.begin(), IE = MBB.end(); I != IE; ++I) {
       MachineInstr &MI = *I;
 
+      unsigned Opc;
       switch (MI.getOpcode()) {
-      case RISCV::ADDW:
-      case RISCV::SLLIW:
-        if (TII.hasAllWUsers(MI, MRI)) {
-          unsigned Opc =
-              MI.getOpcode() == RISCV::ADDW ? RISCV::ADD : RISCV::SLLI;
-          MI.setDesc(TII.get(Opc));
-          MadeChange = true;
-        }
-        break;
+      default:
+        continue;
+      case RISCV::ADDW:  Opc = RISCV::ADD;  break;
+      case RISCV::MULW:  Opc = RISCV::MUL;  break;
+      case RISCV::SLLIW: Opc = RISCV::SLLI; break;
+      }
+
+      if (TII.hasAllWUsers(MI, MRI)) {
+        MI.setDesc(TII.get(Opc));
+        MadeChange = true;
       }
     }
   }

diff  --git a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll
index 4454af837004a..d1bc480455dd3 100644
--- a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll
+++ b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll
@@ -18,7 +18,7 @@ define i32 @add_mul_combine_accept_a1(i32 %x) {
 ; RV64IMB-LABEL: add_mul_combine_accept_a1:
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    li a1, 29
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, 1073
 ; RV64IMB-NEXT:    ret
   %tmp0 = add i32 %x, 37
@@ -37,7 +37,7 @@ define signext i32 @add_mul_combine_accept_a2(i32 signext %x) {
 ; RV64IMB-LABEL: add_mul_combine_accept_a2:
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    li a1, 29
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, 1073
 ; RV64IMB-NEXT:    ret
   %tmp0 = add i32 %x, 37
@@ -82,7 +82,7 @@ define i32 @add_mul_combine_accept_b1(i32 %x) {
 ; RV64IMB-LABEL: add_mul_combine_accept_b1:
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    li a1, 23
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    lui a1, 50
 ; RV64IMB-NEXT:    addiw a1, a1, 1119
 ; RV64IMB-NEXT:    addw a0, a0, a1
@@ -105,7 +105,7 @@ define signext i32 @add_mul_combine_accept_b2(i32 signext %x) {
 ; RV64IMB-LABEL: add_mul_combine_accept_b2:
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    li a1, 23
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    lui a1, 50
 ; RV64IMB-NEXT:    addiw a1, a1, 1119
 ; RV64IMB-NEXT:    addw a0, a0, a1
@@ -416,7 +416,7 @@ define i32 @add_mul_combine_reject_f1(i32 %x) {
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    addiw a0, a0, 1972
 ; RV64IMB-NEXT:    li a1, 29
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, 11
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i32 %x, 29
@@ -437,7 +437,7 @@ define signext i32 @add_mul_combine_reject_f2(i32 signext %x) {
 ; RV64IMB:       # %bb.0:
 ; RV64IMB-NEXT:    addiw a0, a0, 1972
 ; RV64IMB-NEXT:    li a1, 29
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, 11
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i32 %x, 29
@@ -584,7 +584,7 @@ define i32 @mul3000_add8990_a(i32 %x) {
 ; RV64IMB-NEXT:    addiw a0, a0, 3
 ; RV64IMB-NEXT:    lui a1, 1
 ; RV64IMB-NEXT:    addiw a1, a1, -1096
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, -10
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i32 %x, 3000
@@ -607,7 +607,7 @@ define signext i32 @mul3000_add8990_b(i32 signext %x) {
 ; RV64IMB-NEXT:    addiw a0, a0, 3
 ; RV64IMB-NEXT:    lui a1, 1
 ; RV64IMB-NEXT:    addiw a1, a1, -1096
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, -10
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i32 %x, 3000
@@ -659,7 +659,7 @@ define i32 @mul3000_sub8990_a(i32 %x) {
 ; RV64IMB-NEXT:    addiw a0, a0, -3
 ; RV64IMB-NEXT:    lui a1, 1
 ; RV64IMB-NEXT:    addiw a1, a1, -1096
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, 10
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i32 %x, 3000
@@ -682,7 +682,7 @@ define signext i32 @mul3000_sub8990_b(i32 signext %x) {
 ; RV64IMB-NEXT:    addiw a0, a0, -3
 ; RV64IMB-NEXT:    lui a1, 1
 ; RV64IMB-NEXT:    addiw a1, a1, -1096
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, 10
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i32 %x, 3000
@@ -735,7 +735,7 @@ define i32 @mulneg3000_add8990_a(i32 %x) {
 ; RV64IMB-NEXT:    addiw a0, a0, -3
 ; RV64IMB-NEXT:    lui a1, 1048575
 ; RV64IMB-NEXT:    addiw a1, a1, 1096
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, -10
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i32 %x, -3000
@@ -758,7 +758,7 @@ define signext i32 @mulneg3000_add8990_b(i32 signext %x) {
 ; RV64IMB-NEXT:    addiw a0, a0, -3
 ; RV64IMB-NEXT:    lui a1, 1048575
 ; RV64IMB-NEXT:    addiw a1, a1, 1096
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, -10
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i32 %x, -3000
@@ -811,7 +811,7 @@ define i32 @mulneg3000_sub8990_a(i32 %x) {
 ; RV64IMB-NEXT:    addiw a0, a0, 3
 ; RV64IMB-NEXT:    lui a1, 1048575
 ; RV64IMB-NEXT:    addiw a1, a1, 1096
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, 10
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i32 %x, -3000
@@ -834,7 +834,7 @@ define signext i32 @mulneg3000_sub8990_b(i32 signext %x) {
 ; RV64IMB-NEXT:    addiw a0, a0, 3
 ; RV64IMB-NEXT:    lui a1, 1048575
 ; RV64IMB-NEXT:    addiw a1, a1, 1096
-; RV64IMB-NEXT:    mulw a0, a0, a1
+; RV64IMB-NEXT:    mul a0, a0, a1
 ; RV64IMB-NEXT:    addiw a0, a0, 10
 ; RV64IMB-NEXT:    ret
   %tmp0 = mul i32 %x, -3000

diff  --git a/llvm/test/CodeGen/RISCV/bitextract-mac.ll b/llvm/test/CodeGen/RISCV/bitextract-mac.ll
index f0abc372fb057..ce1e0c4711ffb 100644
--- a/llvm/test/CodeGen/RISCV/bitextract-mac.ll
+++ b/llvm/test/CodeGen/RISCV/bitextract-mac.ll
@@ -74,29 +74,29 @@ define i32 @f(i32 %A, i32 %B, i32 %C) {
 ;
 ; RV64I-LABEL: f:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    mulw a0, a1, a0
+; RV64I-NEXT:    mul a0, a1, a0
 ; RV64I-NEXT:    slli a1, a0, 58
 ; RV64I-NEXT:    srli a1, a1, 60
 ; RV64I-NEXT:    slli a0, a0, 52
 ; RV64I-NEXT:    srli a0, a0, 57
-; RV64I-NEXT:    mulw a0, a1, a0
+; RV64I-NEXT:    mul a0, a1, a0
 ; RV64I-NEXT:    addw a0, a0, a2
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: f:
 ; RV64ZBB:       # %bb.0: # %entry
-; RV64ZBB-NEXT:    mulw a0, a1, a0
+; RV64ZBB-NEXT:    mul a0, a1, a0
 ; RV64ZBB-NEXT:    slli a1, a0, 58
 ; RV64ZBB-NEXT:    srli a1, a1, 60
 ; RV64ZBB-NEXT:    slli a0, a0, 52
 ; RV64ZBB-NEXT:    srli a0, a0, 57
-; RV64ZBB-NEXT:    mulw a0, a1, a0
+; RV64ZBB-NEXT:    mul a0, a1, a0
 ; RV64ZBB-NEXT:    addw a0, a0, a2
 ; RV64ZBB-NEXT:    ret
 ;
 ; RV64XTHEADMAC-LABEL: f:
 ; RV64XTHEADMAC:       # %bb.0: # %entry
-; RV64XTHEADMAC-NEXT:    mulw a0, a1, a0
+; RV64XTHEADMAC-NEXT:    mul a0, a1, a0
 ; RV64XTHEADMAC-NEXT:    slli a1, a0, 58
 ; RV64XTHEADMAC-NEXT:    srli a1, a1, 60
 ; RV64XTHEADMAC-NEXT:    slli a0, a0, 52
@@ -110,7 +110,7 @@ define i32 @f(i32 %A, i32 %B, i32 %C) {
 ; RV64XTHEADBB-NEXT:    mul a0, a1, a0
 ; RV64XTHEADBB-NEXT:    th.extu a1, a0, 5, 2
 ; RV64XTHEADBB-NEXT:    th.extu a0, a0, 11, 5
-; RV64XTHEADBB-NEXT:    mulw a0, a1, a0
+; RV64XTHEADBB-NEXT:    mul a0, a1, a0
 ; RV64XTHEADBB-NEXT:    addw a0, a0, a2
 ; RV64XTHEADBB-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index f251c9808f05c..ac1b94d1c9146 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -309,7 +309,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV64M-NEXT:    and a0, a0, a1
 ; RV64M-NEXT:    lui a1, 30667
 ; RV64M-NEXT:    addiw a1, a1, 1329
-; RV64M-NEXT:    mulw a0, a0, a1
+; RV64M-NEXT:    mul a0, a0, a1
 ; RV64M-NEXT:    srliw a0, a0, 27
 ; RV64M-NEXT:    lui a1, %hi(.LCPI2_0)
 ; RV64M-NEXT:    addi a1, a1, %lo(.LCPI2_0)
@@ -754,7 +754,7 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
 ; RV64M-NEXT:    and a0, a0, a1
 ; RV64M-NEXT:    lui a1, 30667
 ; RV64M-NEXT:    addiw a1, a1, 1329
-; RV64M-NEXT:    mulw a0, a0, a1
+; RV64M-NEXT:    mul a0, a0, a1
 ; RV64M-NEXT:    srliw a0, a0, 27
 ; RV64M-NEXT:    lui a1, %hi(.LCPI6_0)
 ; RV64M-NEXT:    addi a1, a1, %lo(.LCPI6_0)
@@ -1319,7 +1319,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
 ; RV64M-NEXT:    and a0, a0, a1
 ; RV64M-NEXT:    lui a1, 4112
 ; RV64M-NEXT:    addiw a1, a1, 257
-; RV64M-NEXT:    mulw a0, a0, a1
+; RV64M-NEXT:    mul a0, a0, a1
 ; RV64M-NEXT:    srliw a0, a0, 24
 ; RV64M-NEXT:    ret
 ; RV64M-NEXT:  .LBB10_2:
@@ -1957,7 +1957,7 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
 ; RV64M-NEXT:    and a0, a0, a1
 ; RV64M-NEXT:    lui a1, 4112
 ; RV64M-NEXT:    addiw a1, a1, 257
-; RV64M-NEXT:    mulw a0, a0, a1
+; RV64M-NEXT:    mul a0, a0, a1
 ; RV64M-NEXT:    srliw a0, a0, 24
 ; RV64M-NEXT:    ret
 ;
@@ -2530,7 +2530,7 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
 ; RV64M-NEXT:    and a0, a0, a1
 ; RV64M-NEXT:    lui a1, 4112
 ; RV64M-NEXT:    addiw a1, a1, 257
-; RV64M-NEXT:    mulw a0, a0, a1
+; RV64M-NEXT:    mul a0, a0, a1
 ; RV64M-NEXT:    srliw a0, a0, 24
 ; RV64M-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/machine-combiner.ll b/llvm/test/CodeGen/RISCV/machine-combiner.ll
index 1cb57ff307310..197abcfb0e1e1 100644
--- a/llvm/test/CodeGen/RISCV/machine-combiner.ll
+++ b/llvm/test/CodeGen/RISCV/machine-combiner.ll
@@ -713,8 +713,8 @@ define i16 @test_reassoc_mul_i16(i16 %a0, i16 %a1, i16 %a2, i16 %a3) {
 define i32 @test_reassoc_mul_i32(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
 ; CHECK-LABEL: test_reassoc_mul_i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    mulw a0, a0, a1
-; CHECK-NEXT:    mulw a1, a2, a3
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    mul a1, a2, a3
 ; CHECK-NEXT:    mulw a0, a0, a1
 ; CHECK-NEXT:    ret
   %t0 = mul i32 %a0, %a1

diff  --git a/llvm/test/CodeGen/RISCV/machine-outliner-throw.ll b/llvm/test/CodeGen/RISCV/machine-outliner-throw.ll
index d5d263a05133c..03419932d030a 100644
--- a/llvm/test/CodeGen/RISCV/machine-outliner-throw.ll
+++ b/llvm/test/CodeGen/RISCV/machine-outliner-throw.ll
@@ -12,7 +12,7 @@ define i32 @func1(i32 %x) #0 {
 ; CHECK-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_offset ra, -8
 ; CHECK-NEXT:    .cfi_offset s0, -16
-; CHECK-NEXT:    mulw a0, a0, a0
+; CHECK-NEXT:    mul a0, a0, a0
 ; CHECK-NEXT:    addiw s0, a0, 1
 ; CHECK-NEXT:    li a0, 4
 ; CHECK-NEXT:    call __cxa_allocate_exception at plt
@@ -39,7 +39,7 @@ define i32 @func2(i32 %x) #0 {
 ; CHECK-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_offset ra, -8
 ; CHECK-NEXT:    .cfi_offset s0, -16
-; CHECK-NEXT:    mulw a0, a0, a0
+; CHECK-NEXT:    mul a0, a0, a0
 ; CHECK-NEXT:    addiw s0, a0, 1
 ; CHECK-NEXT:    li a0, 4
 ; CHECK-NEXT:    call __cxa_allocate_exception at plt

diff  --git a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll
index 8427c64b86edd..f957d25f5c490 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll
@@ -8,9 +8,9 @@
 define i32 @foo(i32 %x, i32 %y, i32 %z) {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    mulw a0, a0, a0
+; CHECK-NEXT:    mul a0, a0, a0
 ; CHECK-NEXT:    addiw a0, a0, 1
-; CHECK-NEXT:    mulw a0, a0, a0
+; CHECK-NEXT:    mul a0, a0, a0
 ; CHECK-NEXT:    add a0, a0, a2
 ; CHECK-NEXT:    addiw a0, a0, 1
 ; CHECK-NEXT:    sllw a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll
index c616fbcd37425..eea04ae03f8d6 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll
@@ -9,7 +9,7 @@ define signext i32 @addw(i32 signext %s, i32 signext %n, i32 signext %k) nounwin
 ; CHECK-NEXT:    not a2, a0
 ; CHECK-NEXT:    add a2, a2, a1
 ; CHECK-NEXT:    addiw a3, a0, 1
-; CHECK-NEXT:    mulw a3, a2, a3
+; CHECK-NEXT:    mul a3, a2, a3
 ; CHECK-NEXT:    subw a1, a1, a0
 ; CHECK-NEXT:    addiw a1, a1, -2
 ; CHECK-NEXT:    slli a1, a1, 32
@@ -54,7 +54,7 @@ define signext i32 @subw(i32 signext %s, i32 signext %n, i32 signext %k) nounwin
 ; CHECK-NEXT:  # %bb.1: # %for.body.preheader
 ; CHECK-NEXT:    not a2, a0
 ; CHECK-NEXT:    add a3, a2, a1
-; CHECK-NEXT:    mulw a2, a3, a2
+; CHECK-NEXT:    mul a2, a3, a2
 ; CHECK-NEXT:    subw a1, a1, a0
 ; CHECK-NEXT:    addiw a1, a1, -2
 ; CHECK-NEXT:    slli a1, a1, 32

diff  --git a/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll b/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll
index 7da8cc82d32ac..5732ac9aa4109 100644
--- a/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll
+++ b/llvm/test/CodeGen/RISCV/rv64m-exhaustive-w-insts.ll
@@ -171,7 +171,7 @@ define signext i32 @sext_mulw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind
 define zeroext i32 @zext_mulw_aext_aext(i32 %a, i32 %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_aext_aext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    mul a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -182,7 +182,7 @@ define zeroext i32 @zext_mulw_aext_aext(i32 %a, i32 %b) nounwind {
 define zeroext i32 @zext_mulw_aext_sext(i32 %a, i32 signext %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_aext_sext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    mul a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -193,7 +193,7 @@ define zeroext i32 @zext_mulw_aext_sext(i32 %a, i32 signext %b) nounwind {
 define zeroext i32 @zext_mulw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_aext_zext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    mul a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -204,7 +204,7 @@ define zeroext i32 @zext_mulw_aext_zext(i32 %a, i32 zeroext %b) nounwind {
 define zeroext i32 @zext_mulw_sext_aext(i32 signext %a, i32 %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_sext_aext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    mul a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -215,7 +215,7 @@ define zeroext i32 @zext_mulw_sext_aext(i32 signext %a, i32 %b) nounwind {
 define zeroext i32 @zext_mulw_sext_sext(i32 signext %a, i32 signext %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_sext_sext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    mul a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -226,7 +226,7 @@ define zeroext i32 @zext_mulw_sext_sext(i32 signext %a, i32 signext %b) nounwind
 define zeroext i32 @zext_mulw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_sext_zext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    mul a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -237,7 +237,7 @@ define zeroext i32 @zext_mulw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind
 define zeroext i32 @zext_mulw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_zext_aext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    mul a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -248,7 +248,7 @@ define zeroext i32 @zext_mulw_zext_aext(i32 zeroext %a, i32 %b) nounwind {
 define zeroext i32 @zext_mulw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_zext_sext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    mul a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret
@@ -259,7 +259,7 @@ define zeroext i32 @zext_mulw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind
 define zeroext i32 @zext_mulw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind {
 ; RV64IM-LABEL: zext_mulw_zext_zext:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    mul a0, a0, a1
 ; RV64IM-NEXT:    slli a0, a0, 32
 ; RV64IM-NEXT:    srli a0, a0, 32
 ; RV64IM-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 01be29994cbe6..583e29b82c675 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -275,7 +275,7 @@ define void @sink_splat_mul_scalable(ptr nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB7_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a2, 0(a0)
-; CHECK-NEXT:    mulw a2, a2, a1
+; CHECK-NEXT:    mul a2, a2, a1
 ; CHECK-NEXT:    sw a2, 0(a0)
 ; CHECK-NEXT:    addi a3, a3, 1
 ; CHECK-NEXT:    addi a0, a0, 4

diff  --git a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
index ae31ba89423f6..48b7ead5e7320 100644
--- a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
@@ -148,7 +148,7 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
-; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 48
 ; RV64I-NEXT:    srai a1, a1, 48
 ; RV64I-NEXT:    add a0, a0, a1
@@ -184,7 +184,7 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
 ; RV64IZbb-LABEL: func16:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    sext.h a0, a0
-; RV64IZbb-NEXT:    mulw a1, a1, a2
+; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    sext.h a1, a1
 ; RV64IZbb-NEXT:    add a0, a0, a1
 ; RV64IZbb-NEXT:    lui a1, 8
@@ -226,7 +226,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
-; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 56
 ; RV64I-NEXT:    srai a1, a1, 56
 ; RV64I-NEXT:    add a0, a0, a1
@@ -260,7 +260,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64IZbb-LABEL: func8:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    sext.b a0, a0
-; RV64IZbb-NEXT:    mulw a1, a1, a2
+; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    sext.b a1, a1
 ; RV64IZbb-NEXT:    add a0, a0, a1
 ; RV64IZbb-NEXT:    li a1, 127
@@ -301,7 +301,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 60
 ; RV64I-NEXT:    srai a0, a0, 60
-; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 60
 ; RV64I-NEXT:    srai a1, a1, 60
 ; RV64I-NEXT:    add a0, a0, a1
@@ -338,7 +338,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    slli a0, a0, 60
 ; RV64IZbb-NEXT:    srai a0, a0, 60
-; RV64IZbb-NEXT:    mulw a1, a1, a2
+; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    slli a1, a1, 60
 ; RV64IZbb-NEXT:    srai a1, a1, 60
 ; RV64IZbb-NEXT:    add a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/sextw-removal.ll b/llvm/test/CodeGen/RISCV/sextw-removal.ll
index ffe64db623508..0fcf3194a3f44 100644
--- a/llvm/test/CodeGen/RISCV/sextw-removal.ll
+++ b/llvm/test/CodeGen/RISCV/sextw-removal.ll
@@ -198,7 +198,7 @@ define void @test5(i32 signext %arg, i32 signext %arg1) nounwind {
 ; RV64I-NEXT:    srli a2, a0, 4
 ; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    and a0, a0, s2
-; RV64I-NEXT:    mulw a0, a0, s3
+; RV64I-NEXT:    mul a0, a0, s3
 ; RV64I-NEXT:    srliw a0, a0, 24
 ; RV64I-NEXT:    bnez a1, .LBB4_1
 ; RV64I-NEXT:  # %bb.2: # %bb7

diff  --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll
index 1b8dee85296b0..24e740fd143d1 100644
--- a/llvm/test/CodeGen/RISCV/srem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll
@@ -51,7 +51,7 @@ define i32 @fold_srem_positive_odd(i32 %x) nounwind {
 ; RV64IM-NEXT:    sraiw a1, a1, 6
 ; RV64IM-NEXT:    add a1, a1, a2
 ; RV64IM-NEXT:    li a2, 95
-; RV64IM-NEXT:    mulw a1, a1, a2
+; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = srem i32 %x, 95
@@ -99,7 +99,7 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
 ; RV64IM-NEXT:    srai a1, a1, 40
 ; RV64IM-NEXT:    add a1, a1, a2
 ; RV64IM-NEXT:    li a2, 1060
-; RV64IM-NEXT:    mulw a1, a1, a2
+; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = srem i32 %x, 1060
@@ -147,7 +147,7 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
 ; RV64IM-NEXT:    srai a1, a1, 40
 ; RV64IM-NEXT:    add a1, a1, a2
 ; RV64IM-NEXT:    li a2, -723
-; RV64IM-NEXT:    mulw a1, a1, a2
+; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = srem i32 %x, -723
@@ -199,7 +199,7 @@ define i32 @fold_srem_negative_even(i32 %x) nounwind {
 ; RV64IM-NEXT:    add a1, a1, a2
 ; RV64IM-NEXT:    lui a2, 1048570
 ; RV64IM-NEXT:    addiw a2, a2, 1595
-; RV64IM-NEXT:    mulw a1, a1, a2
+; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = srem i32 %x, -22981
@@ -277,7 +277,7 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
 ; RV64IM-NEXT:    sraiw a1, a1, 6
 ; RV64IM-NEXT:    add a1, a1, a2
 ; RV64IM-NEXT:    li a2, 95
-; RV64IM-NEXT:    mulw a2, a1, a2
+; RV64IM-NEXT:    mul a2, a1, a2
 ; RV64IM-NEXT:    add a0, a0, a1
 ; RV64IM-NEXT:    subw a0, a0, a2
 ; RV64IM-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 9e00b1282796a..91efe2c223a07 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -64,7 +64,7 @@ define i1 @test_srem_odd(i29 %X) nounwind {
 ; RV64M:       # %bb.0:
 ; RV64M-NEXT:    lui a1, 128424
 ; RV64M-NEXT:    addiw a1, a1, 331
-; RV64M-NEXT:    mulw a0, a0, a1
+; RV64M-NEXT:    mul a0, a0, a1
 ; RV64M-NEXT:    lui a1, 662
 ; RV64M-NEXT:    addiw a1, a1, -83
 ; RV64M-NEXT:    add a0, a0, a1
@@ -94,7 +94,7 @@ define i1 @test_srem_odd(i29 %X) nounwind {
 ; RV64MV:       # %bb.0:
 ; RV64MV-NEXT:    lui a1, 128424
 ; RV64MV-NEXT:    addiw a1, a1, 331
-; RV64MV-NEXT:    mulw a0, a0, a1
+; RV64MV-NEXT:    mul a0, a0, a1
 ; RV64MV-NEXT:    lui a1, 662
 ; RV64MV-NEXT:    addiw a1, a1, -83
 ; RV64MV-NEXT:    add a0, a0, a1
@@ -167,7 +167,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
 ; RV64M-NEXT:    srli a1, a1, 63
 ; RV64M-NEXT:    add a1, a2, a1
 ; RV64M-NEXT:    li a2, 6
-; RV64M-NEXT:    mulw a1, a1, a2
+; RV64M-NEXT:    mul a1, a1, a2
 ; RV64M-NEXT:    subw a0, a0, a1
 ; RV64M-NEXT:    andi a0, a0, 15
 ; RV64M-NEXT:    addi a0, a0, -1
@@ -203,7 +203,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
 ; RV64MV-NEXT:    srli a1, a1, 63
 ; RV64MV-NEXT:    add a1, a2, a1
 ; RV64MV-NEXT:    li a2, 6
-; RV64MV-NEXT:    mulw a1, a1, a2
+; RV64MV-NEXT:    mul a1, a1, a2
 ; RV64MV-NEXT:    subw a0, a0, a1
 ; RV64MV-NEXT:    andi a0, a0, 15
 ; RV64MV-NEXT:    addi a0, a0, -1

diff  --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
index 50d4a55842f1a..231c066de5437 100644
--- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
@@ -159,7 +159,7 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    lui a6, %hi(.LCPI0_1)
 ; RV64IM-NEXT:    ld a6, %lo(.LCPI0_1)(a6)
 ; RV64IM-NEXT:    li a7, 95
-; RV64IM-NEXT:    mulw a3, a3, a7
+; RV64IM-NEXT:    mul a3, a3, a7
 ; RV64IM-NEXT:    subw a2, a2, a3
 ; RV64IM-NEXT:    mulh a3, a1, a6
 ; RV64IM-NEXT:    sub a3, a3, a1
@@ -169,7 +169,7 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    lui a6, %hi(.LCPI0_2)
 ; RV64IM-NEXT:    ld a6, %lo(.LCPI0_2)(a6)
 ; RV64IM-NEXT:    li a7, -124
-; RV64IM-NEXT:    mulw a3, a3, a7
+; RV64IM-NEXT:    mul a3, a3, a7
 ; RV64IM-NEXT:    subw a1, a1, a3
 ; RV64IM-NEXT:    mulh a3, a5, a6
 ; RV64IM-NEXT:    srli a6, a3, 63
@@ -178,14 +178,14 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    lui a6, %hi(.LCPI0_3)
 ; RV64IM-NEXT:    ld a6, %lo(.LCPI0_3)(a6)
 ; RV64IM-NEXT:    li a7, 98
-; RV64IM-NEXT:    mulw a3, a3, a7
+; RV64IM-NEXT:    mul a3, a3, a7
 ; RV64IM-NEXT:    subw a5, a5, a3
 ; RV64IM-NEXT:    mulh a3, a4, a6
 ; RV64IM-NEXT:    srli a6, a3, 63
 ; RV64IM-NEXT:    srli a3, a3, 7
 ; RV64IM-NEXT:    add a3, a3, a6
 ; RV64IM-NEXT:    li a6, -1003
-; RV64IM-NEXT:    mulw a3, a3, a6
+; RV64IM-NEXT:    mul a3, a3, a6
 ; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    sh a4, 6(a0)
 ; RV64IM-NEXT:    sh a5, 4(a0)
@@ -338,28 +338,28 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a6, a6, 6
 ; RV64IM-NEXT:    add a6, a6, a7
 ; RV64IM-NEXT:    li a7, 95
-; RV64IM-NEXT:    mulw a6, a6, a7
+; RV64IM-NEXT:    mul a6, a6, a7
 ; RV64IM-NEXT:    subw a2, a2, a6
 ; RV64IM-NEXT:    mulh a6, a1, a3
 ; RV64IM-NEXT:    add a6, a6, a1
 ; RV64IM-NEXT:    srli t0, a6, 63
 ; RV64IM-NEXT:    srli a6, a6, 6
 ; RV64IM-NEXT:    add a6, a6, t0
-; RV64IM-NEXT:    mulw a6, a6, a7
+; RV64IM-NEXT:    mul a6, a6, a7
 ; RV64IM-NEXT:    subw a1, a1, a6
 ; RV64IM-NEXT:    mulh a6, a5, a3
 ; RV64IM-NEXT:    add a6, a6, a5
 ; RV64IM-NEXT:    srli t0, a6, 63
 ; RV64IM-NEXT:    srli a6, a6, 6
 ; RV64IM-NEXT:    add a6, a6, t0
-; RV64IM-NEXT:    mulw a6, a6, a7
+; RV64IM-NEXT:    mul a6, a6, a7
 ; RV64IM-NEXT:    subw a5, a5, a6
 ; RV64IM-NEXT:    mulh a3, a4, a3
 ; RV64IM-NEXT:    add a3, a3, a4
 ; RV64IM-NEXT:    srli a6, a3, 63
 ; RV64IM-NEXT:    srli a3, a3, 6
 ; RV64IM-NEXT:    add a3, a3, a6
-; RV64IM-NEXT:    mulw a3, a3, a7
+; RV64IM-NEXT:    mul a3, a3, a7
 ; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    sh a4, 6(a0)
 ; RV64IM-NEXT:    sh a5, 4(a0)
@@ -574,25 +574,25 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srai a6, a6, 6
 ; RV64IM-NEXT:    add a6, a6, a7
 ; RV64IM-NEXT:    li a7, 95
-; RV64IM-NEXT:    mulw t0, a6, a7
+; RV64IM-NEXT:    mul t0, a6, a7
 ; RV64IM-NEXT:    mulh t1, a1, a3
 ; RV64IM-NEXT:    add t1, t1, a1
 ; RV64IM-NEXT:    srli t2, t1, 63
 ; RV64IM-NEXT:    srai t1, t1, 6
 ; RV64IM-NEXT:    add t1, t1, t2
-; RV64IM-NEXT:    mulw t2, t1, a7
+; RV64IM-NEXT:    mul t2, t1, a7
 ; RV64IM-NEXT:    mulh t3, a5, a3
 ; RV64IM-NEXT:    add t3, t3, a5
 ; RV64IM-NEXT:    srli t4, t3, 63
 ; RV64IM-NEXT:    srai t3, t3, 6
 ; RV64IM-NEXT:    add t3, t3, t4
-; RV64IM-NEXT:    mulw t4, t3, a7
+; RV64IM-NEXT:    mul t4, t3, a7
 ; RV64IM-NEXT:    mulh a3, a4, a3
 ; RV64IM-NEXT:    add a3, a3, a4
 ; RV64IM-NEXT:    srli t5, a3, 63
 ; RV64IM-NEXT:    srai a3, a3, 6
 ; RV64IM-NEXT:    add a3, a3, t5
-; RV64IM-NEXT:    mulw a7, a3, a7
+; RV64IM-NEXT:    mul a7, a3, a7
 ; RV64IM-NEXT:    add a3, a4, a3
 ; RV64IM-NEXT:    subw a3, a3, a7
 ; RV64IM-NEXT:    add a5, a5, t3
@@ -740,7 +740,7 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    srli a3, a3, 6
 ; RV64IM-NEXT:    add a3, a3, a6
 ; RV64IM-NEXT:    li a6, 95
-; RV64IM-NEXT:    mulw a3, a3, a6
+; RV64IM-NEXT:    mul a3, a3, a6
 ; RV64IM-NEXT:    subw a2, a2, a3
 ; RV64IM-NEXT:    srli a3, a1, 58
 ; RV64IM-NEXT:    add a3, a1, a3
@@ -893,7 +893,7 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    lui a5, %hi(.LCPI4_1)
 ; RV64IM-NEXT:    ld a5, %lo(.LCPI4_1)(a5)
 ; RV64IM-NEXT:    li a6, 23
-; RV64IM-NEXT:    mulw a3, a3, a6
+; RV64IM-NEXT:    mul a3, a3, a6
 ; RV64IM-NEXT:    subw a2, a2, a3
 ; RV64IM-NEXT:    mulh a3, a1, a5
 ; RV64IM-NEXT:    srli a5, a3, 63
@@ -902,7 +902,7 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    lui a5, %hi(.LCPI4_2)
 ; RV64IM-NEXT:    ld a5, %lo(.LCPI4_2)(a5)
 ; RV64IM-NEXT:    li a6, 654
-; RV64IM-NEXT:    mulw a3, a3, a6
+; RV64IM-NEXT:    mul a3, a3, a6
 ; RV64IM-NEXT:    subw a1, a1, a3
 ; RV64IM-NEXT:    mulh a3, a4, a5
 ; RV64IM-NEXT:    srli a5, a3, 63
@@ -910,7 +910,7 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a3, a3, a5
 ; RV64IM-NEXT:    lui a5, 1
 ; RV64IM-NEXT:    addiw a5, a5, 1327
-; RV64IM-NEXT:    mulw a3, a3, a5
+; RV64IM-NEXT:    mul a3, a3, a5
 ; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    sh zero, 0(a0)
 ; RV64IM-NEXT:    sh a4, 6(a0)
@@ -1045,7 +1045,7 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    li a5, 23
 ; RV64IM-NEXT:    lui a6, %hi(.LCPI5_1)
 ; RV64IM-NEXT:    ld a6, %lo(.LCPI5_1)(a6)
-; RV64IM-NEXT:    mulw a3, a3, a5
+; RV64IM-NEXT:    mul a3, a3, a5
 ; RV64IM-NEXT:    lh a1, 8(a1)
 ; RV64IM-NEXT:    subw a2, a2, a3
 ; RV64IM-NEXT:    mulh a3, a4, a6
@@ -1054,7 +1054,7 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    add a3, a3, a5
 ; RV64IM-NEXT:    lui a5, 1
 ; RV64IM-NEXT:    addiw a5, a5, 1327
-; RV64IM-NEXT:    mulw a3, a3, a5
+; RV64IM-NEXT:    mul a3, a3, a5
 ; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    srli a3, a1, 49
 ; RV64IM-NEXT:    add a3, a1, a3

diff  --git a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
index cbc60736a52ca..6ff61ff415216 100644
--- a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
@@ -128,7 +128,7 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srai a0, a0, 48
-; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 48
 ; RV64I-NEXT:    srai a1, a1, 48
 ; RV64I-NEXT:    sub a0, a0, a1
@@ -164,7 +164,7 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
 ; RV64IZbb-LABEL: func16:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    sext.h a0, a0
-; RV64IZbb-NEXT:    mulw a1, a1, a2
+; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    sext.h a1, a1
 ; RV64IZbb-NEXT:    sub a0, a0, a1
 ; RV64IZbb-NEXT:    lui a1, 8
@@ -206,7 +206,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 56
 ; RV64I-NEXT:    srai a0, a0, 56
-; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 56
 ; RV64I-NEXT:    srai a1, a1, 56
 ; RV64I-NEXT:    sub a0, a0, a1
@@ -240,7 +240,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64IZbb-LABEL: func8:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    sext.b a0, a0
-; RV64IZbb-NEXT:    mulw a1, a1, a2
+; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    sext.b a1, a1
 ; RV64IZbb-NEXT:    sub a0, a0, a1
 ; RV64IZbb-NEXT:    li a1, 127
@@ -281,7 +281,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slli a0, a0, 60
 ; RV64I-NEXT:    srai a0, a0, 60
-; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 60
 ; RV64I-NEXT:    srai a1, a1, 60
 ; RV64I-NEXT:    sub a0, a0, a1
@@ -318,7 +318,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    slli a0, a0, 60
 ; RV64IZbb-NEXT:    srai a0, a0, 60
-; RV64IZbb-NEXT:    mulw a1, a1, a2
+; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    slli a1, a1, 60
 ; RV64IZbb-NEXT:    srai a1, a1, 60
 ; RV64IZbb-NEXT:    sub a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
index 7a8c4c04fd0e1..219f0daf270bc 100644
--- a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
@@ -22,7 +22,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ;
 ; RV64I-LABEL: func32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    addw a1, a0, a1
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sltu a0, a1, a0
@@ -144,7 +144,7 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
 ; RV64IZbb-LABEL: func16:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    zext.h a0, a0
-; RV64IZbb-NEXT:    mulw a1, a1, a2
+; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    zext.h a1, a1
 ; RV64IZbb-NEXT:    add a0, a0, a1
 ; RV64IZbb-NEXT:    lui a1, 16
@@ -173,7 +173,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64I-LABEL: func8:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    andi a0, a0, 255
-; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    andi a1, a1, 255
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    li a1, 255
@@ -196,7 +196,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64IZbb-LABEL: func8:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    andi a0, a0, 255
-; RV64IZbb-NEXT:    mulw a1, a1, a2
+; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    andi a1, a1, 255
 ; RV64IZbb-NEXT:    add a0, a0, a1
 ; RV64IZbb-NEXT:    li a1, 255
@@ -224,7 +224,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64I-LABEL: func4:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    andi a0, a0, 15
-; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    andi a1, a1, 15
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    li a1, 15
@@ -247,7 +247,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64IZbb-LABEL: func4:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    andi a0, a0, 15
-; RV64IZbb-NEXT:    mulw a1, a1, a2
+; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    andi a1, a1, 15
 ; RV64IZbb-NEXT:    add a0, a0, a1
 ; RV64IZbb-NEXT:    li a1, 15

diff  --git a/llvm/test/CodeGen/RISCV/urem-lkk.ll b/llvm/test/CodeGen/RISCV/urem-lkk.ll
index 73ec7d23f554f..3d181c3a30d09 100644
--- a/llvm/test/CodeGen/RISCV/urem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-lkk.ll
@@ -53,7 +53,7 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind {
 ; RV64IM-NEXT:    add a1, a2, a1
 ; RV64IM-NEXT:    srli a1, a1, 6
 ; RV64IM-NEXT:    li a2, 95
-; RV64IM-NEXT:    mulw a1, a1, a2
+; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = urem i32 %x, 95
@@ -99,7 +99,7 @@ define i32 @fold_urem_positive_even(i32 %x) nounwind {
 ; RV64IM-NEXT:    mulhu a1, a1, a2
 ; RV64IM-NEXT:    srli a1, a1, 42
 ; RV64IM-NEXT:    li a2, 1060
-; RV64IM-NEXT:    mulw a1, a1, a2
+; RV64IM-NEXT:    mul a1, a1, a2
 ; RV64IM-NEXT:    subw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = urem i32 %x, 1060
@@ -179,7 +179,7 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
 ; RV64IM-NEXT:    add a1, a2, a1
 ; RV64IM-NEXT:    srli a1, a1, 6
 ; RV64IM-NEXT:    li a2, 95
-; RV64IM-NEXT:    mulw a2, a1, a2
+; RV64IM-NEXT:    mul a2, a1, a2
 ; RV64IM-NEXT:    add a0, a0, a1
 ; RV64IM-NEXT:    subw a0, a0, a2
 ; RV64IM-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index 1ac6179f62e45..36d064ea3d505 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -49,7 +49,7 @@ define i1 @test_urem_odd(i13 %X) nounwind {
 ; RV64M:       # %bb.0:
 ; RV64M-NEXT:    lui a1, 1
 ; RV64M-NEXT:    addiw a1, a1, -819
-; RV64M-NEXT:    mulw a0, a0, a1
+; RV64M-NEXT:    mul a0, a0, a1
 ; RV64M-NEXT:    slli a0, a0, 51
 ; RV64M-NEXT:    srli a0, a0, 51
 ; RV64M-NEXT:    sltiu a0, a0, 1639
@@ -69,7 +69,7 @@ define i1 @test_urem_odd(i13 %X) nounwind {
 ; RV64MV:       # %bb.0:
 ; RV64MV-NEXT:    lui a1, 1
 ; RV64MV-NEXT:    addiw a1, a1, -819
-; RV64MV-NEXT:    mulw a0, a0, a1
+; RV64MV-NEXT:    mul a0, a0, a1
 ; RV64MV-NEXT:    slli a0, a0, 51
 ; RV64MV-NEXT:    srli a0, a0, 51
 ; RV64MV-NEXT:    sltiu a0, a0, 1639
@@ -140,7 +140,7 @@ define i1 @test_urem_even(i27 %X) nounwind {
 ; RV64M:       # %bb.0:
 ; RV64M-NEXT:    lui a1, 28087
 ; RV64M-NEXT:    addiw a1, a1, -585
-; RV64M-NEXT:    mulw a0, a0, a1
+; RV64M-NEXT:    mul a0, a0, a1
 ; RV64M-NEXT:    slli a1, a0, 26
 ; RV64M-NEXT:    slli a0, a0, 37
 ; RV64M-NEXT:    srli a0, a0, 38
@@ -172,7 +172,7 @@ define i1 @test_urem_even(i27 %X) nounwind {
 ; RV64MV:       # %bb.0:
 ; RV64MV-NEXT:    lui a1, 28087
 ; RV64MV-NEXT:    addiw a1, a1, -585
-; RV64MV-NEXT:    mulw a0, a0, a1
+; RV64MV-NEXT:    mul a0, a0, a1
 ; RV64MV-NEXT:    slli a1, a0, 26
 ; RV64MV-NEXT:    slli a0, a0, 37
 ; RV64MV-NEXT:    srli a0, a0, 38
@@ -292,7 +292,7 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; RV64M-LABEL: test_urem_negative_odd:
 ; RV64M:       # %bb.0:
 ; RV64M-NEXT:    li a1, 307
-; RV64M-NEXT:    mulw a0, a0, a1
+; RV64M-NEXT:    mul a0, a0, a1
 ; RV64M-NEXT:    andi a0, a0, 511
 ; RV64M-NEXT:    sltiu a0, a0, 2
 ; RV64M-NEXT:    xori a0, a0, 1
@@ -310,7 +310,7 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; RV64MV-LABEL: test_urem_negative_odd:
 ; RV64MV:       # %bb.0:
 ; RV64MV-NEXT:    li a1, 307
-; RV64MV-NEXT:    mulw a0, a0, a1
+; RV64MV-NEXT:    mul a0, a0, a1
 ; RV64MV-NEXT:    andi a0, a0, 511
 ; RV64MV-NEXT:    sltiu a0, a0, 2
 ; RV64MV-NEXT:    xori a0, a0, 1
@@ -487,7 +487,7 @@ define void @test_urem_vec(ptr %X) nounwind {
 ; RV64M-NEXT:    srli a3, a1, 11
 ; RV64M-NEXT:    andi a1, a1, 2047
 ; RV64M-NEXT:    li a4, 683
-; RV64M-NEXT:    mulw a1, a1, a4
+; RV64M-NEXT:    mul a1, a1, a4
 ; RV64M-NEXT:    slli a4, a1, 10
 ; RV64M-NEXT:    slli a1, a1, 53
 ; RV64M-NEXT:    srli a1, a1, 54
@@ -495,12 +495,12 @@ define void @test_urem_vec(ptr %X) nounwind {
 ; RV64M-NEXT:    andi a1, a1, 2047
 ; RV64M-NEXT:    sltiu a1, a1, 342
 ; RV64M-NEXT:    li a4, 1463
-; RV64M-NEXT:    mulw a3, a3, a4
+; RV64M-NEXT:    mul a3, a3, a4
 ; RV64M-NEXT:    addiw a3, a3, -1463
 ; RV64M-NEXT:    andi a3, a3, 2047
 ; RV64M-NEXT:    sltiu a3, a3, 293
 ; RV64M-NEXT:    li a4, 819
-; RV64M-NEXT:    mulw a2, a2, a4
+; RV64M-NEXT:    mul a2, a2, a4
 ; RV64M-NEXT:    addiw a2, a2, -1638
 ; RV64M-NEXT:    andi a2, a2, 2047
 ; RV64M-NEXT:    sltiu a2, a2, 2

diff  --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index b2e50f9fc7c5b..d8f364ec8c00f 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -142,23 +142,23 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    lui a6, %hi(.LCPI0_1)
 ; RV64IM-NEXT:    ld a6, %lo(.LCPI0_1)(a6)
 ; RV64IM-NEXT:    li a7, 95
-; RV64IM-NEXT:    mulw a3, a3, a7
+; RV64IM-NEXT:    mul a3, a3, a7
 ; RV64IM-NEXT:    subw a2, a2, a3
 ; RV64IM-NEXT:    mulhu a3, a1, a6
 ; RV64IM-NEXT:    lui a6, %hi(.LCPI0_2)
 ; RV64IM-NEXT:    ld a6, %lo(.LCPI0_2)(a6)
 ; RV64IM-NEXT:    li a7, 124
-; RV64IM-NEXT:    mulw a3, a3, a7
+; RV64IM-NEXT:    mul a3, a3, a7
 ; RV64IM-NEXT:    subw a1, a1, a3
 ; RV64IM-NEXT:    mulhu a3, a5, a6
 ; RV64IM-NEXT:    lui a6, %hi(.LCPI0_3)
 ; RV64IM-NEXT:    ld a6, %lo(.LCPI0_3)(a6)
 ; RV64IM-NEXT:    li a7, 98
-; RV64IM-NEXT:    mulw a3, a3, a7
+; RV64IM-NEXT:    mul a3, a3, a7
 ; RV64IM-NEXT:    subw a5, a5, a3
 ; RV64IM-NEXT:    mulhu a3, a4, a6
 ; RV64IM-NEXT:    li a6, 1003
-; RV64IM-NEXT:    mulw a3, a3, a6
+; RV64IM-NEXT:    mul a3, a3, a6
 ; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    sh a4, 6(a0)
 ; RV64IM-NEXT:    sh a5, 4(a0)
@@ -291,16 +291,16 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    lhu a1, 8(a1)
 ; RV64IM-NEXT:    mulhu a6, a2, a3
 ; RV64IM-NEXT:    li a7, 95
-; RV64IM-NEXT:    mulw a6, a6, a7
+; RV64IM-NEXT:    mul a6, a6, a7
 ; RV64IM-NEXT:    subw a2, a2, a6
 ; RV64IM-NEXT:    mulhu a6, a1, a3
-; RV64IM-NEXT:    mulw a6, a6, a7
+; RV64IM-NEXT:    mul a6, a6, a7
 ; RV64IM-NEXT:    subw a1, a1, a6
 ; RV64IM-NEXT:    mulhu a6, a5, a3
-; RV64IM-NEXT:    mulw a6, a6, a7
+; RV64IM-NEXT:    mul a6, a6, a7
 ; RV64IM-NEXT:    subw a5, a5, a6
 ; RV64IM-NEXT:    mulhu a3, a4, a3
-; RV64IM-NEXT:    mulw a3, a3, a7
+; RV64IM-NEXT:    mul a3, a3, a7
 ; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    sh a4, 6(a0)
 ; RV64IM-NEXT:    sh a5, 4(a0)
@@ -495,13 +495,13 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    lhu a1, 16(a1)
 ; RV64IM-NEXT:    mulhu a6, a2, a3
 ; RV64IM-NEXT:    li a7, 95
-; RV64IM-NEXT:    mulw t0, a6, a7
+; RV64IM-NEXT:    mul t0, a6, a7
 ; RV64IM-NEXT:    mulhu t1, a1, a3
-; RV64IM-NEXT:    mulw t2, t1, a7
+; RV64IM-NEXT:    mul t2, t1, a7
 ; RV64IM-NEXT:    mulhu t3, a5, a3
-; RV64IM-NEXT:    mulw t4, t3, a7
+; RV64IM-NEXT:    mul t4, t3, a7
 ; RV64IM-NEXT:    mulhu a3, a4, a3
-; RV64IM-NEXT:    mulw a7, a3, a7
+; RV64IM-NEXT:    mul a7, a3, a7
 ; RV64IM-NEXT:    add a3, a4, a3
 ; RV64IM-NEXT:    subw a3, a3, a7
 ; RV64IM-NEXT:    add a5, a5, t3
@@ -616,7 +616,7 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    lhu a1, 0(a1)
 ; RV64IM-NEXT:    mulhu a3, a2, a3
 ; RV64IM-NEXT:    li a6, 95
-; RV64IM-NEXT:    mulw a3, a3, a6
+; RV64IM-NEXT:    mul a3, a3, a6
 ; RV64IM-NEXT:    subw a2, a2, a3
 ; RV64IM-NEXT:    andi a1, a1, 63
 ; RV64IM-NEXT:    andi a5, a5, 31
@@ -745,18 +745,18 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
 ; RV64IM-NEXT:    lui a5, %hi(.LCPI4_1)
 ; RV64IM-NEXT:    ld a5, %lo(.LCPI4_1)(a5)
 ; RV64IM-NEXT:    li a6, 654
-; RV64IM-NEXT:    mulw a3, a3, a6
+; RV64IM-NEXT:    mul a3, a3, a6
 ; RV64IM-NEXT:    subw a2, a2, a3
 ; RV64IM-NEXT:    mulhu a3, a1, a5
 ; RV64IM-NEXT:    lui a5, %hi(.LCPI4_2)
 ; RV64IM-NEXT:    ld a5, %lo(.LCPI4_2)(a5)
 ; RV64IM-NEXT:    li a6, 23
-; RV64IM-NEXT:    mulw a3, a3, a6
+; RV64IM-NEXT:    mul a3, a3, a6
 ; RV64IM-NEXT:    subw a1, a1, a3
 ; RV64IM-NEXT:    mulhu a3, a4, a5
 ; RV64IM-NEXT:    lui a5, 1
 ; RV64IM-NEXT:    addiw a5, a5, 1327
-; RV64IM-NEXT:    mulw a3, a3, a5
+; RV64IM-NEXT:    mul a3, a3, a5
 ; RV64IM-NEXT:    subw a4, a4, a3
 ; RV64IM-NEXT:    sh zero, 0(a0)
 ; RV64IM-NEXT:    sh a4, 6(a0)

diff  --git a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
index 6f868b328b7cc..393466d89e8ca 100644
--- a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
@@ -22,7 +22,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ;
 ; RV64I-LABEL: func32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    subw a1, a0, a1
 ; RV64I-NEXT:    sext.w a0, a0
 ; RV64I-NEXT:    sltu a0, a0, a1
@@ -143,7 +143,7 @@ define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
 ; RV64IZbb-LABEL: func16:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    zext.h a0, a0
-; RV64IZbb-NEXT:    mulw a1, a1, a2
+; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    zext.h a1, a1
 ; RV64IZbb-NEXT:    maxu a0, a0, a1
 ; RV64IZbb-NEXT:    sub a0, a0, a1
@@ -168,7 +168,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64I-LABEL: func8:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    andi a0, a0, 255
-; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    andi a1, a1, 255
 ; RV64I-NEXT:    sub a1, a0, a1
 ; RV64I-NEXT:    sltu a0, a0, a1
@@ -188,7 +188,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; RV64IZbb-LABEL: func8:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    andi a0, a0, 255
-; RV64IZbb-NEXT:    mulw a1, a1, a2
+; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    andi a1, a1, 255
 ; RV64IZbb-NEXT:    maxu a0, a0, a1
 ; RV64IZbb-NEXT:    sub a0, a0, a1
@@ -213,7 +213,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64I-LABEL: func4:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    andi a0, a0, 15
-; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    mul a1, a1, a2
 ; RV64I-NEXT:    andi a1, a1, 15
 ; RV64I-NEXT:    sub a1, a0, a1
 ; RV64I-NEXT:    sltu a0, a0, a1
@@ -233,7 +233,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; RV64IZbb-LABEL: func4:
 ; RV64IZbb:       # %bb.0:
 ; RV64IZbb-NEXT:    andi a0, a0, 15
-; RV64IZbb-NEXT:    mulw a1, a1, a2
+; RV64IZbb-NEXT:    mul a1, a1, a2
 ; RV64IZbb-NEXT:    andi a1, a1, 15
 ; RV64IZbb-NEXT:    maxu a0, a0, a1
 ; RV64IZbb-NEXT:    sub a0, a0, a1


        


More information about the llvm-commits mailing list