[llvm] [RISCV] Expand constant multiplication for targets without M extension (PR #137195)

Iris Shi via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 25 02:15:31 PDT 2025


https://github.com/el-ev updated https://github.com/llvm/llvm-project/pull/137195

>From 8b010e844b3fc5bcefdb155614fc746436be4c2f Mon Sep 17 00:00:00 2001
From: Iris Shi <0.0 at owo.li>
Date: Thu, 24 Apr 2025 23:21:45 +0800
Subject: [PATCH 1/2] pre-commit tests

---
 llvm/test/CodeGen/RISCV/mul-expand.ll | 254 ++++++++++++++++++++++++++
 llvm/test/CodeGen/RISCV/mul.ll        | 171 ++++++++++++++++-
 2 files changed, 421 insertions(+), 4 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/mul-expand.ll

diff --git a/llvm/test/CodeGen/RISCV/mul-expand.ll b/llvm/test/CodeGen/RISCV/mul-expand.ll
new file mode 100644
index 0000000000000..f683a99a42e9e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/mul-expand.ll
@@ -0,0 +1,254 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+
+define i32 @muli32_0x555(i32 %a) nounwind {
+; RV32I-LABEL: muli32_0x555:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 1365
+; RV32I-NEXT:    tail __mulsi3
+;
+; RV64I-LABEL: muli32_0x555:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a1, 1365
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %a1 = mul i32 %a, 1365
+  ret i32 %a1
+}
+
+define i64 @muli64_0x555(i64 %a) nounwind {
+; RV32I-LABEL: muli64_0x555:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 1365
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    call __muldi3
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: muli64_0x555:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 1365
+; RV64I-NEXT:    tail __muldi3
+  %a1 = mul i64 %a, 1365
+  ret i64 %a1
+}
+
+define i32 @muli32_0x33333333(i32 %a) nounwind {
+; RV32I-LABEL: muli32_0x33333333:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a1, 209715
+; RV32I-NEXT:    addi a1, a1, 819
+; RV32I-NEXT:    tail __mulsi3
+;
+; RV64I-LABEL: muli32_0x33333333:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, 209715
+; RV64I-NEXT:    addiw a1, a1, 819
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+    %a1 = mul i32 %a, 858993459
+    ret i32 %a1
+}
+
+define i64 @muli64_0x33333333(i64 %a) nounwind {
+; RV32I-LABEL: muli64_0x33333333:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a2, 209715
+; RV32I-NEXT:    addi a2, a2, 819
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    call __muldi3
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: muli64_0x33333333:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, 209715
+; RV64I-NEXT:    addiw a1, a1, 819
+; RV64I-NEXT:    tail __muldi3
+    %a1 = mul i64 %a, 858993459
+    ret i64 %a1
+}
+
+define i32 @muli32_0xaaaaaaaa(i32 %a) nounwind {
+; RV32I-LABEL: muli32_0xaaaaaaaa:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a1, 699051
+; RV32I-NEXT:    addi a1, a1, -1366
+; RV32I-NEXT:    tail __mulsi3
+;
+; RV64I-LABEL: muli32_0xaaaaaaaa:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, 699051
+; RV64I-NEXT:    addiw a1, a1, -1366
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %a1 = mul i32 %a, -1431655766
+  ret i32 %a1
+}
+
+define i64 @muli64_0xaaaaaaaa(i64 %a) nounwind {
+; RV32I-LABEL: muli64_0xaaaaaaaa:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a2, 699051
+; RV32I-NEXT:    addi a2, a2, -1366
+; RV32I-NEXT:    li a3, -1
+; RV32I-NEXT:    call __muldi3
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: muli64_0xaaaaaaaa:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, 699051
+; RV64I-NEXT:    addiw a1, a1, -1366
+; RV64I-NEXT:    tail __muldi3
+  %a1 = mul i64 %a, -1431655766
+  ret i64 %a1
+}
+
+define i32 @muli32_0x0fffffff(i32 %a) nounwind {
+; RV32I-LABEL: muli32_0x0fffffff:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 28
+; RV32I-NEXT:    sub a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: muli32_0x0fffffff:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 28
+; RV64I-NEXT:    subw a0, a1, a0
+; RV64I-NEXT:    ret
+  %a1 = mul i32 %a, 268435455
+  ret i32 %a1
+}
+
+define i64 @muli64_0x0fffffff(i64 %a) nounwind {
+; RV32I-LABEL: muli64_0x0fffffff:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a2, a0, 28
+; RV32I-NEXT:    srli a3, a0, 4
+; RV32I-NEXT:    slli a4, a1, 28
+; RV32I-NEXT:    sltu a5, a2, a0
+; RV32I-NEXT:    or a3, a4, a3
+; RV32I-NEXT:    sub a1, a3, a1
+; RV32I-NEXT:    sub a1, a1, a5
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: muli64_0x0fffffff:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 28
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+  %a1 = mul i64 %a, 268435455
+  ret i64 %a1
+}
+
+define i32 @muli32_0xf0f0f0f0f0(i32 %a) nounwind {
+; RV32I-LABEL: muli32_0xf0f0f0f0f0:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a1, 986895
+; RV32I-NEXT:    addi a1, a1, 240
+; RV32I-NEXT:    tail __mulsi3
+;
+; RV64I-LABEL: muli32_0xf0f0f0f0f0:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, 986895
+; RV64I-NEXT:    addiw a1, a1, 240
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %a1 = mul i32 %a, -252645136
+  ret i32 %a1
+}
+
+define i64 @muli64_0xf0f0f0f0f0(i64 %a) nounwind {
+; RV32I-LABEL: muli64_0xf0f0f0f0f0:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a2, 986895
+; RV32I-NEXT:    addi a2, a2, 240
+; RV32I-NEXT:    li a3, -1
+; RV32I-NEXT:    call __muldi3
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: muli64_0xf0f0f0f0f0:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, 986895
+; RV64I-NEXT:    addiw a1, a1, 240
+; RV64I-NEXT:    tail __muldi3
+  %a1 = mul i64 %a, -252645136
+  ret i64 %a1
+}
+
+define i32 @muli32_0xf7f7f7f7(i32 %a) nounwind {
+; RV32I-LABEL: muli32_0xf7f7f7f7:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a1, 1015679
+; RV32I-NEXT:    addi a1, a1, 2039
+; RV32I-NEXT:    tail __mulsi3
+;
+; RV64I-LABEL: muli32_0xf7f7f7f7:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    lui a1, 1015679
+; RV64I-NEXT:    addiw a1, a1, 2039
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+  %a1 = mul i32 %a, -134744073
+  ret i32 %a1
+}
+
+define i64 @muli64_0xf7f7f7f7(i64 %a) nounwind {
+; RV32I-LABEL: muli64_0xf7f7f7f7:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    lui a2, 1015679
+; RV32I-NEXT:    addi a2, a2, 2039
+; RV32I-NEXT:    li a3, -1
+; RV32I-NEXT:    call __muldi3
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: muli64_0xf7f7f7f7:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, 1015679
+; RV64I-NEXT:    addiw a1, a1, 2039
+; RV64I-NEXT:    tail __muldi3
+  %a1 = mul i64 %a, -134744073
+  ret i64 %a1
+}
diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index 548c7e1c6ea8c..8dd691946ae5a 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -464,6 +464,37 @@ define i32 @mulhu_constant(i32 %a) nounwind {
   ret i32 %4
 }
 
+define i32 @muli32_p10(i32 %a) nounwind {
+; RV32I-LABEL: muli32_p10:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 10
+; RV32I-NEXT:    tail __mulsi3
+;
+; RV32IM-LABEL: muli32_p10:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    li a1, 10
+; RV32IM-NEXT:    mul a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: muli32_p10:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a1, 10
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: muli32_p10:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    li a1, 10
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, 10
+  ret i32 %1
+}
+
 define i32 @muli32_p14(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p14:
 ; RV32I:       # %bb.0:
@@ -494,6 +525,37 @@ define i32 @muli32_p14(i32 %a) nounwind {
   ret i32 %1
 }
 
+define i32 @muli32_p20(i32 %a) nounwind {
+; RV32I-LABEL: muli32_p20:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 20
+; RV32I-NEXT:    tail __mulsi3
+;
+; RV32IM-LABEL: muli32_p20:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    li a1, 20
+; RV32IM-NEXT:    mul a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: muli32_p20:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    li a1, 20
+; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: muli32_p20:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    li a1, 20
+; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, 20
+  ret i32 %1
+}
+
 define i32 @muli32_p28(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p28:
 ; RV32I:       # %bb.0:
@@ -672,6 +734,34 @@ define i32 @muli32_p65(i32 %a) nounwind {
   ret i32 %1
 }
 
+define i32 @muli32_p66(i32 %a) nounwind {
+; RV32I-LABEL: muli32_p66:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 6
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: muli32_p66:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    slli a1, a0, 6
+; RV32IM-NEXT:    add a0, a1, a0
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: muli32_p66:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    addw a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64IM-LABEL: muli32_p66:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a1, a0, 6
+; RV64IM-NEXT:    addw a0, a1, a0
+; RV64IM-NEXT:    ret
+  %1 = mul i32 %a, 65
+  ret i32 %1
+}
+
 define i32 @muli32_p63(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p63:
 ; RV32I:       # %bb.0:
@@ -778,7 +868,80 @@ define i64 @muli64_p63(i64 %a) nounwind {
   ret i64 %1
 }
 
+define i64 @muli64_p60(i64 %a) nounwind {
+; RV32I-LABEL: muli64_p60:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 60
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    call __muldi3
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: muli64_p60:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    li a2, 60
+; RV32IM-NEXT:    slli a3, a1, 2
+; RV32IM-NEXT:    slli a1, a1, 6
+; RV32IM-NEXT:    sub a1, a1, a3
+; RV32IM-NEXT:    slli a3, a0, 2
+; RV32IM-NEXT:    mulhu a2, a0, a2
+; RV32IM-NEXT:    slli a0, a0, 6
+; RV32IM-NEXT:    add a1, a2, a1
+; RV32IM-NEXT:    sub a0, a0, a3
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: muli64_p60:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 60
+; RV64I-NEXT:    tail __muldi3
+;
+; RV64IM-LABEL: muli64_p60:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slli a1, a0, 2
+; RV64IM-NEXT:    slli a0, a0, 6
+; RV64IM-NEXT:    sub a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i64 %a, 60
+  ret i64 %1
+}
 
+define i64 @muli64_p68(i64 %a) nounwind {
+; RV32I-LABEL: muli64_p68:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    li a2, 68
+; RV32I-NEXT:    li a3, 0
+; RV32I-NEXT:    call __muldi3
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV32IM-LABEL: muli64_p68:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    li a2, 68
+; RV32IM-NEXT:    mul a1, a1, a2
+; RV32IM-NEXT:    mulhu a3, a0, a2
+; RV32IM-NEXT:    add a1, a3, a1
+; RV32IM-NEXT:    mul a0, a0, a2
+; RV32IM-NEXT:    ret
+;
+; RV64I-LABEL: muli64_p68:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 68
+; RV64I-NEXT:    tail __muldi3
+;
+; RV64IM-LABEL: muli64_p68:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    li a1, 68
+; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    ret
+  %1 = mul i64 %a, 68
+  ret i64 %1
+}
 
 define i32 @muli32_m63(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_m63:
@@ -1327,10 +1490,10 @@ define i128 @muli128_m3840(i128 %a) nounwind {
 ; RV32I-NEXT:    sltu a7, a5, a4
 ; RV32I-NEXT:    sub a6, a6, t2
 ; RV32I-NEXT:    mv t1, a7
-; RV32I-NEXT:    beq t0, a3, .LBB36_2
+; RV32I-NEXT:    beq t0, a3, .LBB41_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sltu t1, t0, a3
-; RV32I-NEXT:  .LBB36_2:
+; RV32I-NEXT:  .LBB41_2:
 ; RV32I-NEXT:    sub a2, a2, a1
 ; RV32I-NEXT:    sub a1, t0, a3
 ; RV32I-NEXT:    sub a5, a5, a4
@@ -1441,10 +1604,10 @@ define i128 @muli128_m63(i128 %a) nounwind {
 ; RV32I-NEXT:    sltu a7, a3, a6
 ; RV32I-NEXT:    or t0, t0, a5
 ; RV32I-NEXT:    mv a5, a7
-; RV32I-NEXT:    beq a4, t0, .LBB37_2
+; RV32I-NEXT:    beq a4, t0, .LBB42_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    sltu a5, a4, t0
-; RV32I-NEXT:  .LBB37_2:
+; RV32I-NEXT:  .LBB42_2:
 ; RV32I-NEXT:    srli t1, a4, 26
 ; RV32I-NEXT:    slli t2, a2, 6
 ; RV32I-NEXT:    srli t3, a2, 26

>From faac57c637d4cf5339592f160eb0653051dae5f1 Mon Sep 17 00:00:00 2001
From: Iris Shi <0.0 at owo.li>
Date: Thu, 24 Apr 2025 23:23:49 +0800
Subject: [PATCH 2/2] [RISCV] Expand constant multiplication for targets
 without M extension

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 101 ++++-
 llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll    |  58 ++-
 .../CodeGen/RISCV/ctz_zero_return_test.ll     | 360 +++++++++++-------
 llvm/test/CodeGen/RISCV/mul-expand.ll         | 351 ++++++++++++++---
 llvm/test/CodeGen/RISCV/mul.ll                | 249 +++++++-----
 llvm/test/CodeGen/RISCV/rv32xtheadba.ll       |  55 +--
 llvm/test/CodeGen/RISCV/rv32zba.ll            |  55 +--
 llvm/test/CodeGen/RISCV/rv64xtheadba.ll       |  65 ++--
 llvm/test/CodeGen/RISCV/rv64xtheadbb.ll       | 160 +++++---
 llvm/test/CodeGen/RISCV/rv64zba.ll            | 106 +++---
 llvm/test/CodeGen/RISCV/rv64zbb.ll            | 160 +++++---
 .../CodeGen/RISCV/rvv/calling-conv-fastcc.ll  |  99 +++--
 .../fixed-vectors-strided-load-store-asm.ll   | 140 +++----
 .../CodeGen/RISCV/rvv/known-never-zero.ll     |  33 +-
 .../RISCV/rvv/vreductions-fp-sdnode.ll        |  12 +-
 .../CodeGen/RISCV/srem-seteq-illegal-types.ll | 231 +++++++----
 .../CodeGen/RISCV/urem-seteq-illegal-types.ll | 346 +++++++++--------
 .../CodeGen/RISCV/xqccmp-additional-stack.ll  |  46 +--
 .../CodeGen/RISCV/zcmp-additional-stack.ll    |  46 +--
 19 files changed, 1702 insertions(+), 971 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 02451ee716865..809c329e05c5f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20,6 +20,7 @@
 #include "RISCVSelectionDAGInfo.h"
 #include "RISCVSubtarget.h"
 #include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/MemoryLocation.h"
 #include "llvm/Analysis/ValueTracking.h"
@@ -15436,6 +15437,73 @@ static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
 }
 
+// Try to expand a multiply to a sequence of shifts and add/subs,
+// for a machine w/o native mul instruction.
+static SDValue expandMulToBasicOps(SDNode *N, SelectionDAG &DAG,
+                                   uint64_t MulAmt) {
+  const uint64_t BitWidth = N->getValueType(0).getFixedSizeInBits();
+  SDLoc DL(N);
+
+  if (MulAmt == 0)
+    return DAG.getConstant(0, DL, N->getValueType(0));
+
+  // Find the Non-adjacent form of the multiplier.
+  llvm::SmallVector<std::pair<bool, uint64_t>> Sequence; // {isAdd, shamt}
+  uint64_t E = MulAmt;
+  uint64_t I = 0;
+  while (E > 0) {
+    if (E & 1) {
+      if (I >= BitWidth)
+        break;
+      int8_t Z = ((E & 3) == 1) ? 1 : -1;
+      Sequence.push_back({(Z == 1), I});
+      E -= Z;
+    }
+    E >>= 1;
+    I++;
+  }
+
+  SDValue Result = DAG.getConstant(0, DL, N->getValueType(0));
+  SDValue N0 = N->getOperand(0);
+
+  for (const auto &Op : Sequence) {
+    SDValue ShiftVal;
+    if (Op.second > 0)
+      ShiftVal =
+          DAG.getNode(ISD::SHL, DL, N->getValueType(0), N0,
+                      DAG.getConstant(Op.second, DL, N->getValueType(0)));
+    else
+      ShiftVal = N0;
+
+    ISD::NodeType AddSubOp = Op.first ? ISD::ADD : ISD::SUB;
+    Result = DAG.getNode(AddSubOp, DL, N->getValueType(0), Result, ShiftVal);
+  }
+
+  return Result;
+}
+
+// 2^N +/- 2^M -> (add/sub (shl X, C1), (shl X, C2))
+static SDValue expandMulToAddOrSubOfShl(SDNode *N, SelectionDAG &DAG,
+                                        uint64_t MulAmt) {
+  uint64_t MulAmtLowBit = MulAmt & (-MulAmt);
+  ISD::NodeType Op;
+  if (isPowerOf2_64(MulAmt + MulAmtLowBit))
+    Op = ISD::SUB;
+  else if (isPowerOf2_64(MulAmt - MulAmtLowBit))
+    Op = ISD::ADD;
+  else
+    return SDValue();
+  uint64_t ShiftAmt1 = MulAmt + MulAmtLowBit;
+  SDLoc DL(N);
+  SDValue Shift1 =
+      DAG.getNode(ISD::SHL, DL, N->getValueType(0), N->getOperand(0),
+                  DAG.getConstant(Log2_64(ShiftAmt1), DL, N->getValueType(0)));
+  SDValue Shift2 = DAG.getNode(
+      ISD::SHL, DL, N->getValueType(0), N->getOperand(0),
+      DAG.getConstant(Log2_64(MulAmtLowBit), DL, N->getValueType(0)));
+  return DAG.getNode(Op, DL, N->getValueType(0), Shift1, Shift2);
+}
+
 // Try to expand a scalar multiply to a faster sequence.
 static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
@@ -15447,20 +15515,23 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
   if (DAG.getMachineFunction().getFunction().hasMinSize())
     return SDValue();
 
-  if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
-    return SDValue();
-
   if (VT != Subtarget.getXLenVT())
     return SDValue();
 
-  const bool HasShlAdd =
-      Subtarget.hasStdExtZba() || Subtarget.hasVendorXTHeadBa();
-
   ConstantSDNode *CNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
   if (!CNode)
     return SDValue();
   uint64_t MulAmt = CNode->getZExtValue();
 
+  if (!Subtarget.hasStdExtM() && !Subtarget.hasStdExtZmmul())
+    return expandMulToBasicOps(N, DAG, MulAmt);
+
+  if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
+    return SDValue();
+
+  const bool HasShlAdd =
+      Subtarget.hasStdExtZba() || Subtarget.hasVendorXTHeadBa();
+
   // WARNING: The code below is knowingly incorrect with regards to undef semantics.
   // We're adding additional uses of X here, and in principle, we should be freezing
   // X before doing so.  However, adding freeze here causes real regressions, and no
@@ -15569,22 +15640,7 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
         return DAG.getNode(ISD::SUB, DL, VT, Shift1, Mul359);
       }
     }
-  }
-
-  // 2^N - 2^M -> (sub (shl X, C1), (shl X, C2))
-  uint64_t MulAmtLowBit = MulAmt & (-MulAmt);
-  if (isPowerOf2_64(MulAmt + MulAmtLowBit)) {
-    uint64_t ShiftAmt1 = MulAmt + MulAmtLowBit;
-    SDLoc DL(N);
-    SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
-                                 DAG.getConstant(Log2_64(ShiftAmt1), DL, VT));
-    SDValue Shift2 =
-        DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
-                    DAG.getConstant(Log2_64(MulAmtLowBit), DL, VT));
-    return DAG.getNode(ISD::SUB, DL, VT, Shift1, Shift2);
-  }
 
-  if (HasShlAdd) {
     for (uint64_t Divisor : {3, 5, 9}) {
       if (MulAmt % Divisor != 0)
         continue;
@@ -15610,6 +15666,9 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
     }
   }
 
+  if (SDValue V = expandMulToAddOrSubOfShl(N, DAG, MulAmt))
+    return V;
+
   return SDValue();
 }
 
diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index a46168f114bb9..5b7856b98eb88 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -262,20 +262,33 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
 ; RV64I-NEXT:    sext.w a1, a0
 ; RV64I-NEXT:    beqz a1, .LBB2_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    neg a1, a0
+; RV64I-NEXT:    negw a1, a0
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    slli a3, a0, 10
+; RV64I-NEXT:    slli a4, a0, 12
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 16
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 18
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 4
+; RV64I-NEXT:    subw a4, a0, a4
+; RV64I-NEXT:    add a1, a4, a1
+; RV64I-NEXT:    slli a4, a0, 14
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 23
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a0, a0, 27
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI2_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI2_0)
 ; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB2_2:
 ; RV64I-NEXT:    li a0, 32
@@ -718,20 +731,33 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
 ;
 ; RV64I-LABEL: test_cttz_i32_zero_undef:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    neg a1, a0
+; RV64I-NEXT:    negw a1, a0
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    slli a3, a0, 10
+; RV64I-NEXT:    slli a4, a0, 12
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 16
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 18
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 4
+; RV64I-NEXT:    subw a4, a0, a4
+; RV64I-NEXT:    add a1, a4, a1
+; RV64I-NEXT:    slli a4, a0, 14
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 23
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a0, a0, 27
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI6_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI6_0)
 ; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV32M-LABEL: test_cttz_i32_zero_undef:
diff --git a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
index 03a6a6b1c4b7d..33907e10730a7 100644
--- a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
+++ b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
@@ -162,27 +162,38 @@ define i64 @ctz_dereferencing_pointer_zext(ptr %b) nounwind {
 ;
 ; RV64I-LABEL: ctz_dereferencing_pointer_zext:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lw s0, 0(a0)
-; RV64I-NEXT:    neg a0, s0
-; RV64I-NEXT:    and a0, s0, a0
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    srliw a0, a0, 27
-; RV64I-NEXT:    lui a1, %hi(.LCPI1_0)
-; RV64I-NEXT:    addi a1, a1, %lo(.LCPI1_0)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    seqz a1, s0
-; RV64I-NEXT:    addi a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    lw a0, 0(a0)
+; RV64I-NEXT:    negw a1, a0
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a2, a1, 6
+; RV64I-NEXT:    slli a3, a1, 8
+; RV64I-NEXT:    slli a4, a1, 10
+; RV64I-NEXT:    slli a5, a1, 12
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a1, 16
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 18
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a5, a1, 4
+; RV64I-NEXT:    subw a5, a1, a5
+; RV64I-NEXT:    add a2, a5, a2
+; RV64I-NEXT:    slli a5, a1, 14
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 23
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a1, a1, 27
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    add a1, a3, a1
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    lui a2, %hi(.LCPI1_0)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI1_0)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    andi a0, a0, 31
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 
 
@@ -237,27 +248,37 @@ define signext i32 @ctz1(i32 signext %x) nounwind {
 ;
 ; RV64I-LABEL: ctz1:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    neg a0, a0
-; RV64I-NEXT:    and a0, s0, a0
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    srliw a0, a0, 27
-; RV64I-NEXT:    lui a1, %hi(.LCPI2_0)
-; RV64I-NEXT:    addi a1, a1, %lo(.LCPI2_0)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    seqz a1, s0
-; RV64I-NEXT:    addi a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    negw a1, a0
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a2, a1, 6
+; RV64I-NEXT:    slli a3, a1, 8
+; RV64I-NEXT:    slli a4, a1, 10
+; RV64I-NEXT:    slli a5, a1, 12
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a1, 16
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 18
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a5, a1, 4
+; RV64I-NEXT:    subw a5, a1, a5
+; RV64I-NEXT:    add a2, a5, a2
+; RV64I-NEXT:    slli a5, a1, 14
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 23
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a1, a1, 27
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    add a1, a3, a1
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    lui a2, %hi(.LCPI2_0)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI2_0)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    andi a0, a0, 31
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 
 
@@ -310,27 +331,37 @@ define signext i32 @ctz1_flipped(i32 signext %x) nounwind {
 ;
 ; RV64I-LABEL: ctz1_flipped:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    neg a0, a0
-; RV64I-NEXT:    and a0, s0, a0
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    srliw a0, a0, 27
-; RV64I-NEXT:    lui a1, %hi(.LCPI3_0)
-; RV64I-NEXT:    addi a1, a1, %lo(.LCPI3_0)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    seqz a1, s0
-; RV64I-NEXT:    addi a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    negw a1, a0
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a2, a1, 6
+; RV64I-NEXT:    slli a3, a1, 8
+; RV64I-NEXT:    slli a4, a1, 10
+; RV64I-NEXT:    slli a5, a1, 12
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a1, 16
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 18
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a5, a1, 4
+; RV64I-NEXT:    subw a5, a1, a5
+; RV64I-NEXT:    add a2, a5, a2
+; RV64I-NEXT:    slli a5, a1, 14
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 23
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a1, a1, 27
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    add a1, a3, a1
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    lui a2, %hi(.LCPI3_0)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI3_0)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    andi a0, a0, 31
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 
 
@@ -381,20 +412,33 @@ define signext i32 @ctz2(i32 signext %x) nounwind {
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    beqz a0, .LBB4_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    neg a1, a0
+; RV64I-NEXT:    negw a1, a0
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    slli a3, a0, 10
+; RV64I-NEXT:    slli a4, a0, 12
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 16
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 18
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 4
+; RV64I-NEXT:    subw a4, a0, a4
+; RV64I-NEXT:    add a1, a4, a1
+; RV64I-NEXT:    slli a4, a0, 14
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 23
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a0, a0, 27
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI4_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI4_0)
 ; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB4_2:
 ; RV64I-NEXT:    li a0, 32
@@ -446,20 +490,33 @@ define signext i32 @ctz3(i32 signext %x) nounwind {
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    beqz a0, .LBB5_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    neg a1, a0
+; RV64I-NEXT:    negw a1, a0
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    slli a3, a0, 10
+; RV64I-NEXT:    slli a4, a0, 12
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 16
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 18
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 4
+; RV64I-NEXT:    subw a4, a0, a4
+; RV64I-NEXT:    add a1, a4, a1
+; RV64I-NEXT:    slli a4, a0, 14
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 23
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a0, a0, 27
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI5_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI5_0)
 ; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB5_2:
 ; RV64I-NEXT:    li a0, 32
@@ -767,27 +824,37 @@ define signext i32 @ctz5(i32 signext %x) nounwind {
 ;
 ; RV64I-LABEL: ctz5:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    neg a0, a0
-; RV64I-NEXT:    and a0, s0, a0
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    srliw a0, a0, 27
-; RV64I-NEXT:    lui a1, %hi(.LCPI8_0)
-; RV64I-NEXT:    addi a1, a1, %lo(.LCPI8_0)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    seqz a1, s0
-; RV64I-NEXT:    addi a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    negw a1, a0
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a2, a1, 6
+; RV64I-NEXT:    slli a3, a1, 8
+; RV64I-NEXT:    slli a4, a1, 10
+; RV64I-NEXT:    slli a5, a1, 12
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a1, 16
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 18
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a5, a1, 4
+; RV64I-NEXT:    subw a5, a1, a5
+; RV64I-NEXT:    add a2, a5, a2
+; RV64I-NEXT:    slli a5, a1, 14
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 23
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a1, a1, 27
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    add a1, a3, a1
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    lui a2, %hi(.LCPI8_0)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI8_0)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    andi a0, a0, 31
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 
 
@@ -840,27 +907,37 @@ define signext i32 @ctz6(i32 signext %x) nounwind {
 ;
 ; RV64I-LABEL: ctz6:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    neg a0, a0
-; RV64I-NEXT:    and a0, s0, a0
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    srliw a0, a0, 27
-; RV64I-NEXT:    lui a1, %hi(.LCPI9_0)
-; RV64I-NEXT:    addi a1, a1, %lo(.LCPI9_0)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    seqz a1, s0
-; RV64I-NEXT:    addi a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    negw a1, a0
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a2, a1, 6
+; RV64I-NEXT:    slli a3, a1, 8
+; RV64I-NEXT:    slli a4, a1, 10
+; RV64I-NEXT:    slli a5, a1, 12
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a1, 16
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 18
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a5, a1, 4
+; RV64I-NEXT:    subw a5, a1, a5
+; RV64I-NEXT:    add a2, a5, a2
+; RV64I-NEXT:    slli a5, a1, 14
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 23
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a1, a1, 27
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    add a1, a3, a1
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    lui a2, %hi(.LCPI9_0)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI9_0)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    andi a0, a0, 31
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 
 
@@ -918,28 +995,39 @@ define signext i32 @globalVar() nounwind {
 ;
 ; RV64I-LABEL: globalVar:
 ; RV64I:       # %bb.0: # %entry
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    lui a0, %hi(global_x)
-; RV64I-NEXT:    lw s0, %lo(global_x)(a0)
-; RV64I-NEXT:    neg a0, s0
-; RV64I-NEXT:    and a0, s0, a0
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    srliw a0, a0, 27
-; RV64I-NEXT:    lui a1, %hi(.LCPI10_0)
-; RV64I-NEXT:    addi a1, a1, %lo(.LCPI10_0)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    seqz a1, s0
-; RV64I-NEXT:    addi a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    lw a0, %lo(global_x)(a0)
+; RV64I-NEXT:    negw a1, a0
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a2, a1, 6
+; RV64I-NEXT:    slli a3, a1, 8
+; RV64I-NEXT:    slli a4, a1, 10
+; RV64I-NEXT:    slli a5, a1, 12
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a1, 16
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 18
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a5, a1, 4
+; RV64I-NEXT:    subw a5, a1, a5
+; RV64I-NEXT:    add a2, a5, a2
+; RV64I-NEXT:    slli a5, a1, 14
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 23
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a1, a1, 27
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    add a1, a3, a1
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    lui a2, %hi(.LCPI10_0)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI10_0)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    andi a0, a0, 31
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 
 
diff --git a/llvm/test/CodeGen/RISCV/mul-expand.ll b/llvm/test/CodeGen/RISCV/mul-expand.ll
index f683a99a42e9e..bf07fab3f0d74 100644
--- a/llvm/test/CodeGen/RISCV/mul-expand.ll
+++ b/llvm/test/CodeGen/RISCV/mul-expand.ll
@@ -7,17 +7,30 @@
 define i32 @muli32_0x555(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_0x555:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 1365
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 2
+; RV32I-NEXT:    slli a2, a0, 4
+; RV32I-NEXT:    slli a3, a0, 6
+; RV32I-NEXT:    add a2, a2, a3
+; RV32I-NEXT:    slli a3, a0, 8
+; RV32I-NEXT:    add a1, a0, a1
+; RV32I-NEXT:    slli a0, a0, 10
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    add a0, a3, a0
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: muli32_0x555:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    li a1, 1365
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    slli a2, a0, 4
+; RV64I-NEXT:    slli a3, a0, 6
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a0, 8
+; RV64I-NEXT:    add a1, a0, a1
+; RV64I-NEXT:    slli a0, a0, 10
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    add a0, a3, a0
+; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    ret
   %a1 = mul i32 %a, 1365
   ret i32 %a1
@@ -37,8 +50,17 @@ define i64 @muli64_0x555(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: muli64_0x555:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 1365
-; RV64I-NEXT:    tail __muldi3
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    slli a2, a0, 4
+; RV64I-NEXT:    slli a3, a0, 6
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a0, 8
+; RV64I-NEXT:    add a1, a0, a1
+; RV64I-NEXT:    slli a0, a0, 10
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    add a0, a3, a0
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    ret
   %a1 = mul i64 %a, 1365
   ret i64 %a1
 }
@@ -46,19 +68,70 @@ define i64 @muli64_0x555(i64 %a) nounwind {
 define i32 @muli32_0x33333333(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_0x33333333:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 209715
-; RV32I-NEXT:    addi a1, a1, 819
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 4
+; RV32I-NEXT:    slli a2, a0, 6
+; RV32I-NEXT:    slli a3, a0, 8
+; RV32I-NEXT:    slli a4, a0, 10
+; RV32I-NEXT:    slli a5, a0, 14
+; RV32I-NEXT:    sub a1, a1, a2
+; RV32I-NEXT:    slli a2, a0, 16
+; RV32I-NEXT:    sub a3, a3, a4
+; RV32I-NEXT:    slli a4, a0, 22
+; RV32I-NEXT:    sub a5, a5, a2
+; RV32I-NEXT:    slli a2, a0, 24
+; RV32I-NEXT:    sub a4, a4, a2
+; RV32I-NEXT:    slli a2, a0, 2
+; RV32I-NEXT:    sub a2, a2, a0
+; RV32I-NEXT:    sub a2, a2, a1
+; RV32I-NEXT:    slli a1, a0, 12
+; RV32I-NEXT:    add a1, a3, a1
+; RV32I-NEXT:    slli a3, a0, 18
+; RV32I-NEXT:    add a3, a5, a3
+; RV32I-NEXT:    slli a5, a0, 26
+; RV32I-NEXT:    add a4, a4, a5
+; RV32I-NEXT:    sub a2, a2, a1
+; RV32I-NEXT:    slli a1, a0, 20
+; RV32I-NEXT:    sub a3, a3, a1
+; RV32I-NEXT:    slli a1, a0, 28
+; RV32I-NEXT:    sub a4, a4, a1
+; RV32I-NEXT:    slli a0, a0, 30
+; RV32I-NEXT:    add a2, a2, a3
+; RV32I-NEXT:    add a0, a4, a0
+; RV32I-NEXT:    add a0, a2, a0
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: muli32_0x33333333:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 209715
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    slli a1, a0, 4
+; RV64I-NEXT:    slli a2, a0, 6
+; RV64I-NEXT:    slli a3, a0, 8
+; RV64I-NEXT:    slli a4, a0, 10
+; RV64I-NEXT:    slli a5, a0, 14
+; RV64I-NEXT:    sub a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 16
+; RV64I-NEXT:    sub a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 22
+; RV64I-NEXT:    sub a5, a5, a2
+; RV64I-NEXT:    slli a2, a0, 24
+; RV64I-NEXT:    sub a4, a4, a2
+; RV64I-NEXT:    slli a2, a0, 2
+; RV64I-NEXT:    sub a2, a2, a0
+; RV64I-NEXT:    sub a2, a2, a1
+; RV64I-NEXT:    slli a1, a0, 12
+; RV64I-NEXT:    add a1, a3, a1
+; RV64I-NEXT:    slli a3, a0, 18
+; RV64I-NEXT:    add a3, a5, a3
+; RV64I-NEXT:    slli a5, a0, 26
+; RV64I-NEXT:    add a4, a4, a5
+; RV64I-NEXT:    sub a2, a2, a1
+; RV64I-NEXT:    slli a1, a0, 20
+; RV64I-NEXT:    sub a3, a3, a1
+; RV64I-NEXT:    slli a1, a0, 28
+; RV64I-NEXT:    sub a4, a4, a1
+; RV64I-NEXT:    slli a0, a0, 30
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    add a0, a4, a0
+; RV64I-NEXT:    add a0, a2, a0
 ; RV64I-NEXT:    ret
     %a1 = mul i32 %a, 858993459
     ret i32 %a1
@@ -79,9 +152,37 @@ define i64 @muli64_0x33333333(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: muli64_0x33333333:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 209715
-; RV64I-NEXT:    addiw a1, a1, 819
-; RV64I-NEXT:    tail __muldi3
+; RV64I-NEXT:    slli a1, a0, 4
+; RV64I-NEXT:    slli a2, a0, 6
+; RV64I-NEXT:    slli a3, a0, 8
+; RV64I-NEXT:    slli a4, a0, 10
+; RV64I-NEXT:    slli a5, a0, 14
+; RV64I-NEXT:    sub a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 16
+; RV64I-NEXT:    sub a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 22
+; RV64I-NEXT:    sub a5, a5, a2
+; RV64I-NEXT:    slli a2, a0, 24
+; RV64I-NEXT:    sub a4, a4, a2
+; RV64I-NEXT:    slli a2, a0, 2
+; RV64I-NEXT:    sub a2, a2, a0
+; RV64I-NEXT:    sub a2, a2, a1
+; RV64I-NEXT:    slli a1, a0, 12
+; RV64I-NEXT:    add a1, a3, a1
+; RV64I-NEXT:    slli a3, a0, 18
+; RV64I-NEXT:    add a3, a5, a3
+; RV64I-NEXT:    slli a5, a0, 26
+; RV64I-NEXT:    add a4, a4, a5
+; RV64I-NEXT:    sub a2, a2, a1
+; RV64I-NEXT:    slli a1, a0, 20
+; RV64I-NEXT:    sub a3, a3, a1
+; RV64I-NEXT:    slli a1, a0, 28
+; RV64I-NEXT:    sub a4, a4, a1
+; RV64I-NEXT:    slli a0, a0, 30
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    add a0, a4, a0
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    ret
     %a1 = mul i64 %a, 858993459
     ret i64 %a1
 }
@@ -89,19 +190,72 @@ define i64 @muli64_0x33333333(i64 %a) nounwind {
 define i32 @muli32_0xaaaaaaaa(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_0xaaaaaaaa:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 699051
-; RV32I-NEXT:    addi a1, a1, -1366
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 3
+; RV32I-NEXT:    slli a2, a0, 1
+; RV32I-NEXT:    slli a3, a0, 5
+; RV32I-NEXT:    slli a4, a0, 7
+; RV32I-NEXT:    slli a5, a0, 9
+; RV32I-NEXT:    slli a6, a0, 11
+; RV32I-NEXT:    add a1, a2, a1
+; RV32I-NEXT:    slli a2, a0, 15
+; RV32I-NEXT:    add a3, a3, a4
+; RV32I-NEXT:    slli a4, a0, 17
+; RV32I-NEXT:    add a5, a5, a6
+; RV32I-NEXT:    slli a6, a0, 23
+; RV32I-NEXT:    add a2, a2, a4
+; RV32I-NEXT:    slli a4, a0, 25
+; RV32I-NEXT:    add a4, a6, a4
+; RV32I-NEXT:    add a1, a1, a3
+; RV32I-NEXT:    slli a3, a0, 13
+; RV32I-NEXT:    add a3, a5, a3
+; RV32I-NEXT:    slli a5, a0, 19
+; RV32I-NEXT:    add a2, a2, a5
+; RV32I-NEXT:    slli a5, a0, 27
+; RV32I-NEXT:    add a4, a4, a5
+; RV32I-NEXT:    add a1, a1, a3
+; RV32I-NEXT:    slli a3, a0, 21
+; RV32I-NEXT:    add a2, a2, a3
+; RV32I-NEXT:    slli a3, a0, 29
+; RV32I-NEXT:    add a3, a4, a3
+; RV32I-NEXT:    slli a0, a0, 31
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    add a0, a3, a0
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: muli32_0xaaaaaaaa:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 699051
-; RV64I-NEXT:    addiw a1, a1, -1366
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    slli a1, a0, 3
+; RV64I-NEXT:    slli a2, a0, 1
+; RV64I-NEXT:    slli a3, a0, 5
+; RV64I-NEXT:    slli a4, a0, 7
+; RV64I-NEXT:    slli a5, a0, 9
+; RV64I-NEXT:    slli a6, a0, 11
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    slli a2, a0, 15
+; RV64I-NEXT:    add a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 17
+; RV64I-NEXT:    add a5, a5, a6
+; RV64I-NEXT:    slli a6, a0, 23
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 25
+; RV64I-NEXT:    add a4, a6, a4
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    slli a3, a0, 13
+; RV64I-NEXT:    add a3, a5, a3
+; RV64I-NEXT:    slli a5, a0, 19
+; RV64I-NEXT:    add a2, a2, a5
+; RV64I-NEXT:    slli a5, a0, 27
+; RV64I-NEXT:    add a4, a4, a5
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    slli a3, a0, 21
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a0, 29
+; RV64I-NEXT:    add a3, a4, a3
+; RV64I-NEXT:    slli a0, a0, 31
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    sub a0, a3, a0
+; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    ret
   %a1 = mul i32 %a, -1431655766
   ret i32 %a1
@@ -122,9 +276,38 @@ define i64 @muli64_0xaaaaaaaa(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: muli64_0xaaaaaaaa:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 699051
-; RV64I-NEXT:    addiw a1, a1, -1366
-; RV64I-NEXT:    tail __muldi3
+; RV64I-NEXT:    slli a1, a0, 3
+; RV64I-NEXT:    slli a2, a0, 1
+; RV64I-NEXT:    slli a3, a0, 5
+; RV64I-NEXT:    slli a4, a0, 7
+; RV64I-NEXT:    slli a5, a0, 9
+; RV64I-NEXT:    slli a6, a0, 11
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    slli a2, a0, 15
+; RV64I-NEXT:    add a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 17
+; RV64I-NEXT:    add a5, a5, a6
+; RV64I-NEXT:    slli a6, a0, 23
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 25
+; RV64I-NEXT:    add a4, a6, a4
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    slli a3, a0, 13
+; RV64I-NEXT:    add a3, a5, a3
+; RV64I-NEXT:    slli a5, a0, 19
+; RV64I-NEXT:    add a2, a2, a5
+; RV64I-NEXT:    slli a5, a0, 27
+; RV64I-NEXT:    add a4, a4, a5
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    slli a3, a0, 21
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a0, 29
+; RV64I-NEXT:    add a3, a4, a3
+; RV64I-NEXT:    slli a0, a0, 31
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    sub a0, a3, a0
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    ret
   %a1 = mul i64 %a, -1431655766
   ret i64 %a1
 }
@@ -170,19 +353,36 @@ define i64 @muli64_0x0fffffff(i64 %a) nounwind {
 define i32 @muli32_0xf0f0f0f0f0(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_0xf0f0f0f0f0:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 986895
-; RV32I-NEXT:    addi a1, a1, 240
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 4
+; RV32I-NEXT:    slli a2, a0, 8
+; RV32I-NEXT:    slli a3, a0, 12
+; RV32I-NEXT:    slli a4, a0, 16
+; RV32I-NEXT:    sub a2, a2, a1
+; RV32I-NEXT:    slli a1, a0, 20
+; RV32I-NEXT:    sub a3, a3, a4
+; RV32I-NEXT:    slli a4, a0, 24
+; RV32I-NEXT:    sub a1, a1, a4
+; RV32I-NEXT:    slli a0, a0, 28
+; RV32I-NEXT:    sub a2, a2, a3
+; RV32I-NEXT:    add a0, a1, a0
+; RV32I-NEXT:    sub a0, a2, a0
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: muli32_0xf0f0f0f0f0:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 986895
-; RV64I-NEXT:    addiw a1, a1, 240
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    slli a1, a0, 4
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    slli a3, a0, 12
+; RV64I-NEXT:    slli a4, a0, 16
+; RV64I-NEXT:    sub a2, a2, a1
+; RV64I-NEXT:    slli a1, a0, 20
+; RV64I-NEXT:    sub a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 24
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    slli a0, a0, 28
+; RV64I-NEXT:    sub a2, a2, a3
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    sub a0, a2, a0
 ; RV64I-NEXT:    ret
   %a1 = mul i32 %a, -252645136
   ret i32 %a1
@@ -203,9 +403,20 @@ define i64 @muli64_0xf0f0f0f0f0(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: muli64_0xf0f0f0f0f0:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 986895
-; RV64I-NEXT:    addiw a1, a1, 240
-; RV64I-NEXT:    tail __muldi3
+; RV64I-NEXT:    slli a1, a0, 4
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    slli a3, a0, 12
+; RV64I-NEXT:    slli a4, a0, 16
+; RV64I-NEXT:    sub a2, a2, a1
+; RV64I-NEXT:    slli a1, a0, 20
+; RV64I-NEXT:    sub a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 24
+; RV64I-NEXT:    sub a1, a1, a4
+; RV64I-NEXT:    slli a0, a0, 28
+; RV64I-NEXT:    sub a2, a2, a3
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    sub a0, a2, a0
+; RV64I-NEXT:    ret
   %a1 = mul i64 %a, -252645136
   ret i64 %a1
 }
@@ -213,19 +424,28 @@ define i64 @muli64_0xf0f0f0f0f0(i64 %a) nounwind {
 define i32 @muli32_0xf7f7f7f7(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_0xf7f7f7f7:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 1015679
-; RV32I-NEXT:    addi a1, a1, 2039
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 3
+; RV32I-NEXT:    slli a2, a0, 11
+; RV32I-NEXT:    slli a3, a0, 19
+; RV32I-NEXT:    add a1, a0, a1
+; RV32I-NEXT:    slli a0, a0, 27
+; RV32I-NEXT:    add a2, a2, a3
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: muli32_0xf7f7f7f7:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 1015679
-; RV64I-NEXT:    addiw a1, a1, 2039
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    slli a1, a0, 3
+; RV64I-NEXT:    slli a2, a0, 11
+; RV64I-NEXT:    slli a3, a0, 19
+; RV64I-NEXT:    add a1, a0, a1
+; RV64I-NEXT:    slli a0, a0, 27
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    sub a0, a0, a1
 ; RV64I-NEXT:    ret
   %a1 = mul i32 %a, -134744073
   ret i32 %a1
@@ -246,9 +466,16 @@ define i64 @muli64_0xf7f7f7f7(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: muli64_0xf7f7f7f7:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 1015679
-; RV64I-NEXT:    addiw a1, a1, 2039
-; RV64I-NEXT:    tail __muldi3
+; RV64I-NEXT:    slli a1, a0, 3
+; RV64I-NEXT:    slli a2, a0, 11
+; RV64I-NEXT:    slli a3, a0, 19
+; RV64I-NEXT:    add a1, a0, a1
+; RV64I-NEXT:    slli a0, a0, 27
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
   %a1 = mul i64 %a, -134744073
   ret i64 %a1
 }
diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index 8dd691946ae5a..cc771531c5f29 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -467,29 +467,30 @@ define i32 @mulhu_constant(i32 %a) nounwind {
 define i32 @muli32_p10(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p10:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 10
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 3
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: muli32_p10:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    li a1, 10
-; RV32IM-NEXT:    mul a0, a0, a1
+; RV32IM-NEXT:    slli a1, a0, 1
+; RV32IM-NEXT:    slli a0, a0, 3
+; RV32IM-NEXT:    add a0, a0, a1
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: muli32_p10:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    li a1, 10
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    slli a1, a0, 3
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muli32_p10:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    li a1, 10
-; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    slli a1, a0, 1
+; RV64IM-NEXT:    slli a0, a0, 3
+; RV64IM-NEXT:    addw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = mul i32 %a, 10
   ret i32 %1
@@ -498,8 +499,10 @@ define i32 @muli32_p10(i32 %a) nounwind {
 define i32 @muli32_p14(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p14:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 14
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 1
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: muli32_p14:
 ; RV32IM:       # %bb.0:
@@ -528,29 +531,30 @@ define i32 @muli32_p14(i32 %a) nounwind {
 define i32 @muli32_p20(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p20:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 20
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 4
+; RV32I-NEXT:    slli a0, a0, 2
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: muli32_p20:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    li a1, 20
-; RV32IM-NEXT:    mul a0, a0, a1
+; RV32IM-NEXT:    slli a1, a0, 2
+; RV32IM-NEXT:    slli a0, a0, 4
+; RV32IM-NEXT:    add a0, a0, a1
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: muli32_p20:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    li a1, 20
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    slli a1, a0, 4
+; RV64I-NEXT:    slli a0, a0, 2
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muli32_p20:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    li a1, 20
-; RV64IM-NEXT:    mulw a0, a0, a1
+; RV64IM-NEXT:    slli a1, a0, 2
+; RV64IM-NEXT:    slli a0, a0, 4
+; RV64IM-NEXT:    addw a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = mul i32 %a, 20
   ret i32 %1
@@ -559,8 +563,10 @@ define i32 @muli32_p20(i32 %a) nounwind {
 define i32 @muli32_p28(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p28:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 28
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 2
+; RV32I-NEXT:    slli a0, a0, 5
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: muli32_p28:
 ; RV32IM:       # %bb.0:
@@ -589,8 +595,10 @@ define i32 @muli32_p28(i32 %a) nounwind {
 define i32 @muli32_p30(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p30:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 30
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 1
+; RV32I-NEXT:    slli a0, a0, 5
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: muli32_p30:
 ; RV32IM:       # %bb.0:
@@ -619,8 +627,10 @@ define i32 @muli32_p30(i32 %a) nounwind {
 define i32 @muli32_p56(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p56:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 56
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 3
+; RV32I-NEXT:    slli a0, a0, 6
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: muli32_p56:
 ; RV32IM:       # %bb.0:
@@ -649,8 +659,10 @@ define i32 @muli32_p56(i32 %a) nounwind {
 define i32 @muli32_p60(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p60:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 60
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 2
+; RV32I-NEXT:    slli a0, a0, 6
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: muli32_p60:
 ; RV32IM:       # %bb.0:
@@ -679,8 +691,10 @@ define i32 @muli32_p60(i32 %a) nounwind {
 define i32 @muli32_p62(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p62:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 62
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 1
+; RV32I-NEXT:    slli a0, a0, 6
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: muli32_p62:
 ; RV32IM:       # %bb.0:
@@ -895,8 +909,10 @@ define i64 @muli64_p60(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: muli64_p60:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 60
-; RV64I-NEXT:    tail __muldi3
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    slli a0, a0, 6
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muli64_p60:
 ; RV64IM:       # %bb.0:
@@ -923,21 +939,28 @@ define i64 @muli64_p68(i64 %a) nounwind {
 ; RV32IM-LABEL: muli64_p68:
 ; RV32IM:       # %bb.0:
 ; RV32IM-NEXT:    li a2, 68
-; RV32IM-NEXT:    mul a1, a1, a2
-; RV32IM-NEXT:    mulhu a3, a0, a2
-; RV32IM-NEXT:    add a1, a3, a1
-; RV32IM-NEXT:    mul a0, a0, a2
+; RV32IM-NEXT:    slli a3, a1, 2
+; RV32IM-NEXT:    slli a1, a1, 6
+; RV32IM-NEXT:    add a1, a1, a3
+; RV32IM-NEXT:    slli a3, a0, 2
+; RV32IM-NEXT:    mulhu a2, a0, a2
+; RV32IM-NEXT:    slli a0, a0, 6
+; RV32IM-NEXT:    add a1, a2, a1
+; RV32IM-NEXT:    add a0, a0, a3
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: muli64_p68:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 68
-; RV64I-NEXT:    tail __muldi3
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slli a0, a0, 2
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muli64_p68:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    li a1, 68
-; RV64IM-NEXT:    mul a0, a0, a1
+; RV64IM-NEXT:    slli a1, a0, 2
+; RV64IM-NEXT:    slli a0, a0, 6
+; RV64IM-NEXT:    add a0, a0, a1
 ; RV64IM-NEXT:    ret
   %1 = mul i64 %a, 68
   ret i64 %1
@@ -1093,8 +1116,10 @@ define i64 @muli64_m65(i64 %a) nounwind {
 define i32 @muli32_p384(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p384:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 384
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 7
+; RV32I-NEXT:    slli a0, a0, 9
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: muli32_p384:
 ; RV32IM:       # %bb.0:
@@ -1123,8 +1148,10 @@ define i32 @muli32_p384(i32 %a) nounwind {
 define i32 @muli32_p12288(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_p12288:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 3
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 12
+; RV32I-NEXT:    slli a0, a0, 14
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: muli32_p12288:
 ; RV32IM:       # %bb.0:
@@ -1249,9 +1276,11 @@ define i32 @muli32_m3840(i32 %a) nounwind {
 define i32 @muli32_m4352(i32 %a) nounwind {
 ; RV32I-LABEL: muli32_m4352:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, -17
-; RV32I-NEXT:    slli a1, a1, 8
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 12
+; RV32I-NEXT:    slli a0, a0, 8
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    neg a0, a0
+; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: muli32_m4352:
 ; RV32IM:       # %bb.0:
@@ -1262,13 +1291,10 @@ define i32 @muli32_m4352(i32 %a) nounwind {
 ;
 ; RV64I-LABEL: muli32_m4352:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    li a1, -17
-; RV64I-NEXT:    slli a1, a1, 8
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    slli a1, a0, 12
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    neg a0, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muli32_m4352:
@@ -1300,12 +1326,16 @@ define i64 @muli64_p4352(i64 %a) nounwind {
 ;
 ; RV32IM-LABEL: muli64_p4352:
 ; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    slli a2, a1, 8
+; RV32IM-NEXT:    slli a1, a1, 12
+; RV32IM-NEXT:    add a1, a1, a2
 ; RV32IM-NEXT:    li a2, 17
 ; RV32IM-NEXT:    slli a2, a2, 8
-; RV32IM-NEXT:    mul a1, a1, a2
-; RV32IM-NEXT:    mulhu a3, a0, a2
-; RV32IM-NEXT:    add a1, a3, a1
-; RV32IM-NEXT:    mul a0, a0, a2
+; RV32IM-NEXT:    mulhu a2, a0, a2
+; RV32IM-NEXT:    add a1, a2, a1
+; RV32IM-NEXT:    slli a2, a0, 8
+; RV32IM-NEXT:    slli a0, a0, 12
+; RV32IM-NEXT:    add a0, a0, a2
 ; RV32IM-NEXT:    ret
 ;
 ; RV64I-LABEL: muli64_p4352:
@@ -1399,9 +1429,11 @@ define i64 @muli64_m4352(i64 %a) nounwind {
 ;
 ; RV64I-LABEL: muli64_m4352:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, -17
-; RV64I-NEXT:    slli a1, a1, 8
-; RV64I-NEXT:    tail __muldi3
+; RV64I-NEXT:    slli a1, a0, 12
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muli64_m4352:
 ; RV64IM:       # %bb.0:
@@ -2032,8 +2064,10 @@ define i64 @muland_demand(i64 %x) nounwind {
 ; RV64I-NEXT:    li a1, -29
 ; RV64I-NEXT:    srli a1, a1, 2
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    li a1, 12
-; RV64I-NEXT:    tail __muldi3
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    slli a0, a0, 4
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: muland_demand:
 ; RV64IM:       # %bb.0:
@@ -2068,9 +2102,10 @@ define i64 @mulzext_demand(i32 signext %x) nounwind {
 ;
 ; RV64I-LABEL: mulzext_demand:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 3
-; RV64I-NEXT:    slli a1, a1, 32
-; RV64I-NEXT:    tail __muldi3
+; RV64I-NEXT:    slli a1, a0, 32
+; RV64I-NEXT:    slli a0, a0, 34
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: mulzext_demand:
 ; RV64IM:       # %bb.0:
@@ -2087,8 +2122,20 @@ define i32 @mulfshl_demand(i32 signext %x) nounwind {
 ; RV32I-LABEL: mulfshl_demand:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    srli a0, a0, 11
-; RV32I-NEXT:    lui a1, 92808
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 19
+; RV32I-NEXT:    slli a2, a0, 15
+; RV32I-NEXT:    slli a3, a0, 21
+; RV32I-NEXT:    slli a4, a0, 23
+; RV32I-NEXT:    add a1, a2, a1
+; RV32I-NEXT:    slli a2, a0, 25
+; RV32I-NEXT:    add a3, a3, a4
+; RV32I-NEXT:    slli a4, a0, 27
+; RV32I-NEXT:    add a2, a2, a4
+; RV32I-NEXT:    slli a0, a0, 29
+; RV32I-NEXT:    add a1, a1, a3
+; RV32I-NEXT:    sub a2, a2, a0
+; RV32I-NEXT:    sub a0, a1, a2
+; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: mulfshl_demand:
 ; RV32IM:       # %bb.0:
@@ -2099,13 +2146,20 @@ define i32 @mulfshl_demand(i32 signext %x) nounwind {
 ;
 ; RV64I-LABEL: mulfshl_demand:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    srliw a0, a0, 11
-; RV64I-NEXT:    lui a1, 92808
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    slli a1, a0, 19
+; RV64I-NEXT:    slli a2, a0, 15
+; RV64I-NEXT:    slli a3, a0, 21
+; RV64I-NEXT:    slli a4, a0, 23
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    slli a2, a0, 25
+; RV64I-NEXT:    add a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 27
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    slli a0, a0, 29
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    sub a2, a2, a0
+; RV64I-NEXT:    sub a0, a1, a2
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: mulfshl_demand:
@@ -2122,8 +2176,20 @@ define i32 @mulfshl_demand(i32 signext %x) nounwind {
 define i32 @mulor_demand(i32 signext %x, i32 signext %y) nounwind {
 ; RV32I-LABEL: mulor_demand:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 92808
-; RV32I-NEXT:    tail __mulsi3
+; RV32I-NEXT:    slli a1, a0, 19
+; RV32I-NEXT:    slli a2, a0, 15
+; RV32I-NEXT:    slli a3, a0, 21
+; RV32I-NEXT:    slli a4, a0, 23
+; RV32I-NEXT:    add a1, a2, a1
+; RV32I-NEXT:    slli a2, a0, 25
+; RV32I-NEXT:    add a3, a3, a4
+; RV32I-NEXT:    slli a4, a0, 27
+; RV32I-NEXT:    add a2, a2, a4
+; RV32I-NEXT:    slli a0, a0, 29
+; RV32I-NEXT:    add a1, a1, a3
+; RV32I-NEXT:    sub a2, a2, a0
+; RV32I-NEXT:    sub a0, a1, a2
+; RV32I-NEXT:    ret
 ;
 ; RV32IM-LABEL: mulor_demand:
 ; RV32IM:       # %bb.0:
@@ -2133,12 +2199,19 @@ define i32 @mulor_demand(i32 signext %x, i32 signext %y) nounwind {
 ;
 ; RV64I-LABEL: mulor_demand:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 92808
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    slli a1, a0, 19
+; RV64I-NEXT:    slli a2, a0, 15
+; RV64I-NEXT:    slli a3, a0, 21
+; RV64I-NEXT:    slli a4, a0, 23
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    slli a2, a0, 25
+; RV64I-NEXT:    add a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 27
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    slli a0, a0, 29
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    sub a2, a2, a0
+; RV64I-NEXT:    sub a0, a1, a2
 ; RV64I-NEXT:    ret
 ;
 ; RV64IM-LABEL: mulor_demand:
diff --git a/llvm/test/CodeGen/RISCV/rv32xtheadba.ll b/llvm/test/CodeGen/RISCV/rv32xtheadba.ll
index 44ab0e1fef6c1..0fc0adbfa83d9 100644
--- a/llvm/test/CodeGen/RISCV/rv32xtheadba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32xtheadba.ll
@@ -116,8 +116,9 @@ define i32 @addmul6(i32 %a, i32 %b) {
 define i32 @addmul10(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul10:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a2, 10
-; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    slli a2, a0, 1
+; RV32I-NEXT:    slli a0, a0, 3
+; RV32I-NEXT:    add a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -153,8 +154,9 @@ define i32 @addmul12(i32 %a, i32 %b) {
 define i32 @addmul18(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul18:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a2, 18
-; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    slli a2, a0, 1
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    add a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -171,8 +173,9 @@ define i32 @addmul18(i32 %a, i32 %b) {
 define i32 @addmul20(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul20:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a2, 20
-; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    slli a2, a0, 2
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    add a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -208,8 +211,9 @@ define i32 @addmul24(i32 %a, i32 %b) {
 define i32 @addmul36(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul36:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a2, 36
-; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    slli a2, a0, 2
+; RV32I-NEXT:    slli a0, a0, 5
+; RV32I-NEXT:    add a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -226,8 +230,9 @@ define i32 @addmul36(i32 %a, i32 %b) {
 define i32 @addmul40(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul40:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a2, 40
-; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    slli a2, a0, 3
+; RV32I-NEXT:    slli a0, a0, 5
+; RV32I-NEXT:    add a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -244,8 +249,9 @@ define i32 @addmul40(i32 %a, i32 %b) {
 define i32 @addmul72(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul72:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a2, 72
-; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    slli a2, a0, 3
+; RV32I-NEXT:    slli a0, a0, 6
+; RV32I-NEXT:    add a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -279,8 +285,9 @@ define i32 @mul96(i32 %a) {
 define i32 @mul160(i32 %a) {
 ; RV32I-LABEL: mul160:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 160
-; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 5
+; RV32I-NEXT:    slli a0, a0, 7
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32XTHEADBA-LABEL: mul160:
@@ -312,8 +319,9 @@ define i32 @mul200(i32 %a) {
 define i32 @mul288(i32 %a) {
 ; RV32I-LABEL: mul288:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 288
-; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 5
+; RV32I-NEXT:    slli a0, a0, 8
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32XTHEADBA-LABEL: mul288:
@@ -328,8 +336,9 @@ define i32 @mul288(i32 %a) {
 define i32 @mul258(i32 %a) {
 ; RV32I-LABEL: mul258:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 258
-; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 1
+; RV32I-NEXT:    slli a0, a0, 8
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32XTHEADBA-LABEL: mul258:
@@ -344,8 +353,9 @@ define i32 @mul258(i32 %a) {
 define i32 @mul260(i32 %a) {
 ; RV32I-LABEL: mul260:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 260
-; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 2
+; RV32I-NEXT:    slli a0, a0, 8
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32XTHEADBA-LABEL: mul260:
@@ -360,8 +370,9 @@ define i32 @mul260(i32 %a) {
 define i32 @mul264(i32 %a) {
 ; RV32I-LABEL: mul264:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 264
-; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 3
+; RV32I-NEXT:    slli a0, a0, 8
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32XTHEADBA-LABEL: mul264:
diff --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll
index fec156ac2be27..f8ca41782c6e1 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -82,8 +82,9 @@ define i32 @addmul6(i32 %a, i32 %b) {
 define i32 @addmul10(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul10:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a2, 10
-; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    slli a2, a0, 1
+; RV32I-NEXT:    slli a0, a0, 3
+; RV32I-NEXT:    add a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -119,8 +120,9 @@ define i32 @addmul12(i32 %a, i32 %b) {
 define i32 @addmul18(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul18:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a2, 18
-; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    slli a2, a0, 1
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    add a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -137,8 +139,9 @@ define i32 @addmul18(i32 %a, i32 %b) {
 define i32 @addmul20(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul20:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a2, 20
-; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    slli a2, a0, 2
+; RV32I-NEXT:    slli a0, a0, 4
+; RV32I-NEXT:    add a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -174,8 +177,9 @@ define i32 @addmul24(i32 %a, i32 %b) {
 define i32 @addmul36(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul36:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a2, 36
-; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    slli a2, a0, 2
+; RV32I-NEXT:    slli a0, a0, 5
+; RV32I-NEXT:    add a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -192,8 +196,9 @@ define i32 @addmul36(i32 %a, i32 %b) {
 define i32 @addmul40(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul40:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a2, 40
-; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    slli a2, a0, 3
+; RV32I-NEXT:    slli a0, a0, 5
+; RV32I-NEXT:    add a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -210,8 +215,9 @@ define i32 @addmul40(i32 %a, i32 %b) {
 define i32 @addmul72(i32 %a, i32 %b) {
 ; RV32I-LABEL: addmul72:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a2, 72
-; RV32I-NEXT:    mul a0, a0, a2
+; RV32I-NEXT:    slli a2, a0, 3
+; RV32I-NEXT:    slli a0, a0, 6
+; RV32I-NEXT:    add a0, a0, a2
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -245,8 +251,9 @@ define i32 @mul96(i32 %a) {
 define i32 @mul160(i32 %a) {
 ; RV32I-LABEL: mul160:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 160
-; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 5
+; RV32I-NEXT:    slli a0, a0, 7
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: mul160:
@@ -261,8 +268,9 @@ define i32 @mul160(i32 %a) {
 define i32 @mul288(i32 %a) {
 ; RV32I-LABEL: mul288:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 288
-; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 5
+; RV32I-NEXT:    slli a0, a0, 8
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: mul288:
@@ -277,8 +285,9 @@ define i32 @mul288(i32 %a) {
 define i32 @mul258(i32 %a) {
 ; RV32I-LABEL: mul258:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 258
-; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 1
+; RV32I-NEXT:    slli a0, a0, 8
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: mul258:
@@ -293,8 +302,9 @@ define i32 @mul258(i32 %a) {
 define i32 @mul260(i32 %a) {
 ; RV32I-LABEL: mul260:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 260
-; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 2
+; RV32I-NEXT:    slli a0, a0, 8
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: mul260:
@@ -309,8 +319,9 @@ define i32 @mul260(i32 %a) {
 define i32 @mul264(i32 %a) {
 ; RV32I-LABEL: mul264:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    li a1, 264
-; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    slli a1, a0, 3
+; RV32I-NEXT:    slli a0, a0, 8
+; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: mul264:
diff --git a/llvm/test/CodeGen/RISCV/rv64xtheadba.ll b/llvm/test/CodeGen/RISCV/rv64xtheadba.ll
index 2272c17bcef03..05396e3355ff6 100644
--- a/llvm/test/CodeGen/RISCV/rv64xtheadba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64xtheadba.ll
@@ -131,8 +131,9 @@ define i64 @disjointormul6(i64 %a, i64 %b) {
 define i64 @addmul10(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul10:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 10
-; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    slli a2, a0, 1
+; RV64I-NEXT:    slli a0, a0, 3
+; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -168,8 +169,9 @@ define i64 @addmul12(i64 %a, i64 %b) {
 define i64 @addmul18(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul18:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 18
-; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    slli a2, a0, 1
+; RV64I-NEXT:    slli a0, a0, 4
+; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -186,8 +188,9 @@ define i64 @addmul18(i64 %a, i64 %b) {
 define i64 @addmul20(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul20:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 20
-; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    slli a2, a0, 2
+; RV64I-NEXT:    slli a0, a0, 4
+; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -235,8 +238,9 @@ define i64 @addmul24(i64 %a, i64 %b) {
 define i64 @addmul36(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul36:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 36
-; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    slli a2, a0, 2
+; RV64I-NEXT:    slli a0, a0, 5
+; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -253,8 +257,9 @@ define i64 @addmul36(i64 %a, i64 %b) {
 define i64 @addmul40(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul40:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 40
-; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    slli a2, a0, 3
+; RV64I-NEXT:    slli a0, a0, 5
+; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -271,8 +276,9 @@ define i64 @addmul40(i64 %a, i64 %b) {
 define i64 @addmul72(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul72:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 72
-; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    slli a2, a0, 3
+; RV64I-NEXT:    slli a0, a0, 6
+; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -632,8 +638,9 @@ define i64 @mul137(i64 %a) {
 define i64 @mul160(i64 %a) {
 ; RV64I-LABEL: mul160:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 160
-; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 5
+; RV64I-NEXT:    slli a0, a0, 7
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64XTHEADBA-LABEL: mul160:
@@ -648,8 +655,9 @@ define i64 @mul160(i64 %a) {
 define i64 @mul288(i64 %a) {
 ; RV64I-LABEL: mul288:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 288
-; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 5
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64XTHEADBA-LABEL: mul288:
@@ -697,8 +705,9 @@ define i64 @sh3add_imm(i64 %0) {
 define i64 @mul258(i64 %a) {
 ; RV64I-LABEL: mul258:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 258
-; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 1
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64XTHEADBA-LABEL: mul258:
@@ -713,8 +722,9 @@ define i64 @mul258(i64 %a) {
 define i64 @mul260(i64 %a) {
 ; RV64I-LABEL: mul260:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 260
-; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64XTHEADBA-LABEL: mul260:
@@ -729,8 +739,9 @@ define i64 @mul260(i64 %a) {
 define i64 @mul264(i64 %a) {
 ; RV64I-LABEL: mul264:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 264
-; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 3
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64XTHEADBA-LABEL: mul264:
@@ -988,8 +999,9 @@ define signext i32 @mulw192(i32 signext %a) {
 define signext i32 @mulw320(i32 signext %a) {
 ; RV64I-LABEL: mulw320:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 320
-; RV64I-NEXT:    mulw a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64XTHEADBA-LABEL: mulw320:
@@ -1004,8 +1016,9 @@ define signext i32 @mulw320(i32 signext %a) {
 define signext i32 @mulw576(i32 signext %a) {
 ; RV64I-LABEL: mulw576:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 576
-; RV64I-NEXT:    mulw a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slli a0, a0, 9
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64XTHEADBA-LABEL: mulw576:
diff --git a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
index d9f7d36127293..10ef3357d4783 100644
--- a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
@@ -357,20 +357,33 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    beqz a0, .LBB6_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    neg a1, a0
+; RV64I-NEXT:    negw a1, a0
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    slli a3, a0, 10
+; RV64I-NEXT:    slli a4, a0, 12
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 16
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 18
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 4
+; RV64I-NEXT:    subw a4, a0, a4
+; RV64I-NEXT:    add a1, a4, a1
+; RV64I-NEXT:    slli a4, a0, 14
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 23
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a0, a0, 27
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI6_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI6_0)
 ; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB6_2:
 ; RV64I-NEXT:    li a0, 32
@@ -397,20 +410,33 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: cttz_zero_undef_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    neg a1, a0
+; RV64I-NEXT:    negw a1, a0
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    slli a3, a0, 10
+; RV64I-NEXT:    slli a4, a0, 12
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 16
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 18
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 4
+; RV64I-NEXT:    subw a4, a0, a4
+; RV64I-NEXT:    add a1, a4, a1
+; RV64I-NEXT:    slli a4, a0, 14
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 23
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a0, a0, 27
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI7_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI7_0)
 ; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV64XTHEADBB-LABEL: cttz_zero_undef_i32:
@@ -429,26 +455,36 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
 define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: findFirstSet_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    neg a0, a0
-; RV64I-NEXT:    and a0, s0, a0
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    srliw a0, a0, 27
-; RV64I-NEXT:    lui a1, %hi(.LCPI8_0)
-; RV64I-NEXT:    addi a1, a1, %lo(.LCPI8_0)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    snez a1, s0
-; RV64I-NEXT:    addi a1, a1, -1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    negw a1, a0
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a2, a1, 6
+; RV64I-NEXT:    slli a3, a1, 8
+; RV64I-NEXT:    slli a4, a1, 10
+; RV64I-NEXT:    slli a5, a1, 12
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a1, 16
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 18
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a5, a1, 4
+; RV64I-NEXT:    subw a5, a1, a5
+; RV64I-NEXT:    add a2, a5, a2
+; RV64I-NEXT:    slli a5, a1, 14
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 23
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a1, a1, 27
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    add a1, a3, a1
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    lui a2, %hi(.LCPI8_0)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI8_0)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64XTHEADBB-LABEL: findFirstSet_i32:
@@ -472,27 +508,37 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: ffs_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    neg a0, a0
-; RV64I-NEXT:    and a0, s0, a0
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    srliw a0, a0, 27
-; RV64I-NEXT:    lui a1, %hi(.LCPI9_0)
-; RV64I-NEXT:    addi a1, a1, %lo(.LCPI9_0)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    seqz a1, s0
-; RV64I-NEXT:    addi a0, a0, 1
-; RV64I-NEXT:    addi a1, a1, -1
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    negw a1, a0
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a2, a1, 6
+; RV64I-NEXT:    slli a3, a1, 8
+; RV64I-NEXT:    slli a4, a1, 10
+; RV64I-NEXT:    slli a5, a1, 12
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a1, 16
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 18
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a5, a1, 4
+; RV64I-NEXT:    subw a5, a1, a5
+; RV64I-NEXT:    add a2, a5, a2
+; RV64I-NEXT:    slli a5, a1, 14
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 23
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    lui a4, %hi(.LCPI9_0)
+; RV64I-NEXT:    addi a4, a4, %lo(.LCPI9_0)
+; RV64I-NEXT:    slli a1, a1, 27
+; RV64I-NEXT:    add a1, a3, a1
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    add a1, a4, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a1, a1, 1
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64XTHEADBB-LABEL: ffs_i32:
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 9760821832b37..e362e5ebd8192 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -414,8 +414,9 @@ define i64 @disjointormul6(i64 %a, i64 %b) {
 define i64 @addmul10(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul10:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 10
-; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    slli a2, a0, 1
+; RV64I-NEXT:    slli a0, a0, 3
+; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -451,8 +452,9 @@ define i64 @addmul12(i64 %a, i64 %b) {
 define i64 @addmul18(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul18:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 18
-; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    slli a2, a0, 1
+; RV64I-NEXT:    slli a0, a0, 4
+; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -469,8 +471,9 @@ define i64 @addmul18(i64 %a, i64 %b) {
 define i64 @addmul20(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul20:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 20
-; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    slli a2, a0, 2
+; RV64I-NEXT:    slli a0, a0, 4
+; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -518,8 +521,9 @@ define i64 @addmul24(i64 %a, i64 %b) {
 define i64 @addmul36(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul36:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 36
-; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    slli a2, a0, 2
+; RV64I-NEXT:    slli a0, a0, 5
+; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -536,8 +540,9 @@ define i64 @addmul36(i64 %a, i64 %b) {
 define i64 @addmul40(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul40:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 40
-; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    slli a2, a0, 3
+; RV64I-NEXT:    slli a0, a0, 5
+; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -554,8 +559,9 @@ define i64 @addmul40(i64 %a, i64 %b) {
 define i64 @addmul72(i64 %a, i64 %b) {
 ; RV64I-LABEL: addmul72:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 72
-; RV64I-NEXT:    mul a0, a0, a2
+; RV64I-NEXT:    slli a2, a0, 3
+; RV64I-NEXT:    slli a0, a0, 6
+; RV64I-NEXT:    add a0, a0, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -915,8 +921,9 @@ define i64 @mul137(i64 %a) {
 define i64 @mul160(i64 %a) {
 ; RV64I-LABEL: mul160:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 160
-; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 5
+; RV64I-NEXT:    slli a0, a0, 7
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: mul160:
@@ -931,8 +938,9 @@ define i64 @mul160(i64 %a) {
 define i64 @mul288(i64 %a) {
 ; RV64I-LABEL: mul288:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 288
-; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 5
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: mul288:
@@ -947,10 +955,10 @@ define i64 @mul288(i64 %a) {
 define i64 @zext_mul68(i32 signext %a) {
 ; RV64I-LABEL: zext_mul68:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 17
-; RV64I-NEXT:    slli a1, a1, 34
 ; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    mulhu a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 30
+; RV64I-NEXT:    srli a0, a0, 26
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: zext_mul68:
@@ -985,10 +993,10 @@ define i64 @zext_mul96(i32 signext %a) {
 define i64 @zext_mul160(i32 signext %a) {
 ; RV64I-LABEL: zext_mul160:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    slli a1, a1, 37
 ; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    mulhu a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 27
+; RV64I-NEXT:    srli a0, a0, 25
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: zext_mul160:
@@ -1004,10 +1012,10 @@ define i64 @zext_mul160(i32 signext %a) {
 define i64 @zext_mul288(i32 signext %a) {
 ; RV64I-LABEL: zext_mul288:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 9
-; RV64I-NEXT:    slli a1, a1, 37
 ; RV64I-NEXT:    slli a0, a0, 32
-; RV64I-NEXT:    mulhu a0, a0, a1
+; RV64I-NEXT:    srli a1, a0, 27
+; RV64I-NEXT:    srli a0, a0, 24
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: zext_mul288:
@@ -1043,9 +1051,9 @@ define i64 @zext_mul12884901888(i32 signext %a) {
 define i64 @zext_mul21474836480(i32 signext %a) {
 ; RV64I-LABEL: zext_mul21474836480:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 5
-; RV64I-NEXT:    slli a1, a1, 32
-; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 32
+; RV64I-NEXT:    slli a0, a0, 34
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: zext_mul21474836480:
@@ -1062,9 +1070,9 @@ define i64 @zext_mul21474836480(i32 signext %a) {
 define i64 @zext_mul38654705664(i32 signext %a) {
 ; RV64I-LABEL: zext_mul38654705664:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 9
-; RV64I-NEXT:    slli a1, a1, 32
-; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 32
+; RV64I-NEXT:    slli a0, a0, 35
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: zext_mul38654705664:
@@ -1188,8 +1196,9 @@ define i64 @adduw_imm(i32 signext %0) nounwind {
 define i64 @mul258(i64 %a) {
 ; RV64I-LABEL: mul258:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 258
-; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 1
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: mul258:
@@ -1204,8 +1213,9 @@ define i64 @mul258(i64 %a) {
 define i64 @mul260(i64 %a) {
 ; RV64I-LABEL: mul260:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 260
-; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: mul260:
@@ -1220,8 +1230,9 @@ define i64 @mul260(i64 %a) {
 define i64 @mul264(i64 %a) {
 ; RV64I-LABEL: mul264:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 264
-; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 3
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: mul264:
@@ -1496,8 +1507,9 @@ define signext i32 @mulw192(i32 signext %a) {
 define signext i32 @mulw320(i32 signext %a) {
 ; RV64I-LABEL: mulw320:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 320
-; RV64I-NEXT:    mulw a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slli a0, a0, 8
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: mulw320:
@@ -1512,8 +1524,9 @@ define signext i32 @mulw320(i32 signext %a) {
 define signext i32 @mulw576(i32 signext %a) {
 ; RV64I-LABEL: mulw576:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 576
-; RV64I-NEXT:    mulw a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slli a0, a0, 9
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: mulw576:
@@ -2977,8 +2990,9 @@ define i64 @bext_mul132(i32 %1, i32 %2) {
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    srlw a0, a0, a1
 ; RV64I-NEXT:    andi a0, a0, 1
-; RV64I-NEXT:    li a1, 132
-; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    slli a1, a0, 2
+; RV64I-NEXT:    slli a0, a0, 7
+; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBANOZBB-LABEL: bext_mul132:
@@ -3015,10 +3029,10 @@ define ptr @gep_lshr_i32(ptr %0, i64 %1) {
 ; RV64I-LABEL: gep_lshr_i32:
 ; RV64I:       # %bb.0: # %entry
 ; RV64I-NEXT:    srli a1, a1, 2
-; RV64I-NEXT:    li a2, 5
-; RV64I-NEXT:    slli a2, a2, 36
 ; RV64I-NEXT:    slli a1, a1, 32
-; RV64I-NEXT:    mulhu a1, a1, a2
+; RV64I-NEXT:    srli a2, a1, 28
+; RV64I-NEXT:    srli a1, a1, 26
+; RV64I-NEXT:    add a1, a1, a2
 ; RV64I-NEXT:    add a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 17eb0817d548a..3cd1931b6ae4c 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -347,20 +347,33 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    beqz a0, .LBB6_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    neg a1, a0
+; RV64I-NEXT:    negw a1, a0
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    slli a3, a0, 10
+; RV64I-NEXT:    slli a4, a0, 12
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 16
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 18
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 4
+; RV64I-NEXT:    subw a4, a0, a4
+; RV64I-NEXT:    add a1, a4, a1
+; RV64I-NEXT:    slli a4, a0, 14
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 23
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a0, a0, 27
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI6_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI6_0)
 ; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ; RV64I-NEXT:  .LBB6_2:
 ; RV64I-NEXT:    li a0, 32
@@ -377,20 +390,33 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
 define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: cttz_zero_undef_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    neg a1, a0
+; RV64I-NEXT:    negw a1, a0
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
+; RV64I-NEXT:    slli a1, a0, 6
+; RV64I-NEXT:    slli a2, a0, 8
+; RV64I-NEXT:    slli a3, a0, 10
+; RV64I-NEXT:    slli a4, a0, 12
+; RV64I-NEXT:    add a1, a1, a2
+; RV64I-NEXT:    slli a2, a0, 16
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 18
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a4, a0, 4
+; RV64I-NEXT:    subw a4, a0, a4
+; RV64I-NEXT:    add a1, a4, a1
+; RV64I-NEXT:    slli a4, a0, 14
+; RV64I-NEXT:    subw a3, a3, a4
+; RV64I-NEXT:    slli a4, a0, 23
+; RV64I-NEXT:    subw a2, a2, a4
+; RV64I-NEXT:    slli a0, a0, 27
+; RV64I-NEXT:    add a1, a1, a3
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    srliw a0, a0, 27
 ; RV64I-NEXT:    lui a1, %hi(.LCPI7_0)
 ; RV64I-NEXT:    addi a1, a1, %lo(.LCPI7_0)
 ; RV64I-NEXT:    add a0, a1, a0
 ; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: cttz_zero_undef_i32:
@@ -404,26 +430,36 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
 define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: findFirstSet_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    neg a0, a0
-; RV64I-NEXT:    and a0, s0, a0
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    srliw a0, a0, 27
-; RV64I-NEXT:    lui a1, %hi(.LCPI8_0)
-; RV64I-NEXT:    addi a1, a1, %lo(.LCPI8_0)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    snez a1, s0
-; RV64I-NEXT:    addi a1, a1, -1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    negw a1, a0
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a2, a1, 6
+; RV64I-NEXT:    slli a3, a1, 8
+; RV64I-NEXT:    slli a4, a1, 10
+; RV64I-NEXT:    slli a5, a1, 12
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a1, 16
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 18
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a5, a1, 4
+; RV64I-NEXT:    subw a5, a1, a5
+; RV64I-NEXT:    add a2, a5, a2
+; RV64I-NEXT:    slli a5, a1, 14
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 23
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a1, a1, 27
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    add a1, a3, a1
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    lui a2, %hi(.LCPI8_0)
+; RV64I-NEXT:    addi a2, a2, %lo(.LCPI8_0)
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: findFirstSet_i32:
@@ -442,27 +478,37 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
 define signext i32 @ffs_i32(i32 signext %a) nounwind {
 ; RV64I-LABEL: ffs_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    neg a0, a0
-; RV64I-NEXT:    and a0, s0, a0
-; RV64I-NEXT:    lui a1, 30667
-; RV64I-NEXT:    addiw a1, a1, 1329
-; RV64I-NEXT:    call __muldi3
-; RV64I-NEXT:    srliw a0, a0, 27
-; RV64I-NEXT:    lui a1, %hi(.LCPI9_0)
-; RV64I-NEXT:    addi a1, a1, %lo(.LCPI9_0)
-; RV64I-NEXT:    add a0, a1, a0
-; RV64I-NEXT:    lbu a0, 0(a0)
-; RV64I-NEXT:    seqz a1, s0
-; RV64I-NEXT:    addi a0, a0, 1
-; RV64I-NEXT:    addi a1, a1, -1
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    negw a1, a0
+; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a2, a1, 6
+; RV64I-NEXT:    slli a3, a1, 8
+; RV64I-NEXT:    slli a4, a1, 10
+; RV64I-NEXT:    slli a5, a1, 12
+; RV64I-NEXT:    add a2, a2, a3
+; RV64I-NEXT:    slli a3, a1, 16
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 18
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    slli a5, a1, 4
+; RV64I-NEXT:    subw a5, a1, a5
+; RV64I-NEXT:    add a2, a5, a2
+; RV64I-NEXT:    slli a5, a1, 14
+; RV64I-NEXT:    subw a4, a4, a5
+; RV64I-NEXT:    slli a5, a1, 23
+; RV64I-NEXT:    subw a3, a3, a5
+; RV64I-NEXT:    add a2, a2, a4
+; RV64I-NEXT:    lui a4, %hi(.LCPI9_0)
+; RV64I-NEXT:    addi a4, a4, %lo(.LCPI9_0)
+; RV64I-NEXT:    slli a1, a1, 27
+; RV64I-NEXT:    add a1, a3, a1
+; RV64I-NEXT:    add a1, a2, a1
+; RV64I-NEXT:    srliw a1, a1, 27
+; RV64I-NEXT:    add a1, a4, a1
+; RV64I-NEXT:    lbu a1, 0(a1)
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a1, a1, 1
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: ffs_i32:
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index 530f9bf19fce7..bd912193c4fed 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -105,87 +105,86 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(ptr %x) {
 ; CHECK-NEXT:    sub sp, sp, a2
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    li a3, 40
 ; CHECK-NEXT:    vl8re32.v v8, (a1)
-; CHECK-NEXT:    csrr a4, vlenb
-; CHECK-NEXT:    slli a4, a4, 5
-; CHECK-NEXT:    add a4, sp, a4
-; CHECK-NEXT:    addi a4, a4, 16
-; CHECK-NEXT:    vs8r.v v8, (a4) # vscale x 64-byte Folded Spill
-; CHECK-NEXT:    slli a4, a2, 3
-; CHECK-NEXT:    slli a5, a2, 5
-; CHECK-NEXT:    slli a6, a2, 4
-; CHECK-NEXT:    slli a7, a2, 6
-; CHECK-NEXT:    mul a2, a2, a3
-; CHECK-NEXT:    sub a3, a5, a4
-; CHECK-NEXT:    sub t0, a7, a6
-; CHECK-NEXT:    sub a7, a7, a4
-; CHECK-NEXT:    add t1, a1, a4
-; CHECK-NEXT:    add t2, a1, a6
-; CHECK-NEXT:    add t3, a1, a5
+; CHECK-NEXT:    csrr a3, vlenb
+; CHECK-NEXT:    slli a3, a3, 5
+; CHECK-NEXT:    add a3, sp, a3
+; CHECK-NEXT:    addi a3, a3, 16
+; CHECK-NEXT:    vs8r.v v8, (a3) # vscale x 64-byte Folded Spill
+; CHECK-NEXT:    slli a3, a2, 3
+; CHECK-NEXT:    slli a4, a2, 5
+; CHECK-NEXT:    slli a5, a2, 4
+; CHECK-NEXT:    slli a2, a2, 6
+; CHECK-NEXT:    sub a6, a4, a3
+; CHECK-NEXT:    add a7, a4, a3
+; CHECK-NEXT:    sub t0, a2, a5
+; CHECK-NEXT:    sub a2, a2, a3
+; CHECK-NEXT:    add t1, a1, a3
+; CHECK-NEXT:    add t2, a1, a5
+; CHECK-NEXT:    add t3, a1, a4
 ; CHECK-NEXT:    vl8re32.v v8, (t1)
 ; CHECK-NEXT:    csrr t1, vlenb
-; CHECK-NEXT:    li t4, 24
-; CHECK-NEXT:    mul t1, t1, t4
+; CHECK-NEXT:    slli t1, t1, 4
 ; CHECK-NEXT:    add t1, sp, t1
 ; CHECK-NEXT:    addi t1, t1, 16
 ; CHECK-NEXT:    vs8r.v v8, (t1) # vscale x 64-byte Folded Spill
-; CHECK-NEXT:    add t1, a1, a2
+; CHECK-NEXT:    add t1, a1, a6
 ; CHECK-NEXT:    vl8re32.v v8, (t2)
-; CHECK-NEXT:    csrr t2, vlenb
-; CHECK-NEXT:    slli t2, t2, 3
-; CHECK-NEXT:    add t2, sp, t2
-; CHECK-NEXT:    addi t2, t2, 16
+; CHECK-NEXT:    addi t2, sp, 16
 ; CHECK-NEXT:    vs8r.v v8, (t2) # vscale x 64-byte Folded Spill
-; CHECK-NEXT:    add t2, a1, a3
+; CHECK-NEXT:    add t2, a1, a7
 ; CHECK-NEXT:    vl8re32.v v16, (t3)
 ; CHECK-NEXT:    add t3, a1, t0
-; CHECK-NEXT:    add a1, a1, a7
+; CHECK-NEXT:    add a1, a1, a2
 ; CHECK-NEXT:    vl8re32.v v8, (t1)
-; CHECK-NEXT:    vl8re32.v v24, (t2)
 ; CHECK-NEXT:    csrr t1, vlenb
-; CHECK-NEXT:    slli t1, t1, 4
+; CHECK-NEXT:    li t4, 24
+; CHECK-NEXT:    mul t1, t1, t4
 ; CHECK-NEXT:    add t1, sp, t1
 ; CHECK-NEXT:    addi t1, t1, 16
-; CHECK-NEXT:    vs8r.v v24, (t1) # vscale x 64-byte Folded Spill
+; CHECK-NEXT:    vs8r.v v8, (t1) # vscale x 64-byte Folded Spill
+; CHECK-NEXT:    vl8re32.v v8, (t2)
+; CHECK-NEXT:    csrr t1, vlenb
+; CHECK-NEXT:    slli t1, t1, 3
+; CHECK-NEXT:    add t1, sp, t1
+; CHECK-NEXT:    addi t1, t1, 16
+; CHECK-NEXT:    vs8r.v v8, (t1) # vscale x 64-byte Folded Spill
 ; CHECK-NEXT:    vl8re32.v v24, (t3)
-; CHECK-NEXT:    addi t1, sp, 16
-; CHECK-NEXT:    vs8r.v v24, (t1) # vscale x 64-byte Folded Spill
-; CHECK-NEXT:    vl8re32.v v24, (a1)
+; CHECK-NEXT:    vl8re32.v v8, (a1)
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    slli a1, a1, 5
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 16
 ; CHECK-NEXT:    vl8r.v v0, (a1) # vscale x 64-byte Folded Reload
 ; CHECK-NEXT:    vs8r.v v0, (a0)
-; CHECK-NEXT:    add a2, a0, a2
-; CHECK-NEXT:    vs8r.v v8, (a2)
+; CHECK-NEXT:    add a4, a0, a4
+; CHECK-NEXT:    vs8r.v v16, (a4)
 ; CHECK-NEXT:    add a5, a0, a5
+; CHECK-NEXT:    addi a1, sp, 16
+; CHECK-NEXT:    vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
 ; CHECK-NEXT:    vs8r.v v16, (a5)
-; CHECK-NEXT:    add a6, a0, a6
+; CHECK-NEXT:    add a3, a0, a3
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    slli a1, a1, 4
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 16
-; CHECK-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
-; CHECK-NEXT:    vs8r.v v8, (a6)
-; CHECK-NEXT:    add a4, a0, a4
+; CHECK-NEXT:    vl8r.v v16, (a1) # vscale x 64-byte Folded Reload
+; CHECK-NEXT:    vs8r.v v16, (a3)
+; CHECK-NEXT:    add a2, a0, a2
+; CHECK-NEXT:    vs8r.v v8, (a2)
+; CHECK-NEXT:    add t0, a0, t0
+; CHECK-NEXT:    vs8r.v v24, (t0)
+; CHECK-NEXT:    add a7, a0, a7
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    li a2, 24
-; CHECK-NEXT:    mul a1, a1, a2
+; CHECK-NEXT:    slli a1, a1, 3
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 16
 ; CHECK-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
-; CHECK-NEXT:    vs8r.v v8, (a4)
-; CHECK-NEXT:    add a7, a0, a7
-; CHECK-NEXT:    vs8r.v v24, (a7)
-; CHECK-NEXT:    add t0, a0, t0
-; CHECK-NEXT:    addi a1, sp, 16
-; CHECK-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
-; CHECK-NEXT:    vs8r.v v8, (t0)
-; CHECK-NEXT:    add a0, a0, a3
+; CHECK-NEXT:    vs8r.v v8, (a7)
+; CHECK-NEXT:    add a0, a0, a6
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 4
+; CHECK-NEXT:    li a2, 24
+; CHECK-NEXT:    mul a1, a1, a2
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 16
 ; CHECK-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
index 29d9a8a9b060c..07aa05f609c40 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll
@@ -653,28 +653,31 @@ define void @gather_of_pointers(ptr noalias nocapture %arg, ptr noalias nocaptur
 ; ZVE32F-LABEL: gather_of_pointers:
 ; ZVE32F:       # %bb.0: # %bb
 ; ZVE32F-NEXT:    li a2, 0
-; ZVE32F-NEXT:    lui a4, 2
-; ZVE32F-NEXT:    li a3, 1
-; ZVE32F-NEXT:    add a4, a0, a4
-; ZVE32F-NEXT:    li a5, 40
+; ZVE32F-NEXT:    lui a3, 2
+; ZVE32F-NEXT:    add a3, a0, a3
+; ZVE32F-NEXT:    li a4, 1
 ; ZVE32F-NEXT:  .LBB12_1: # %bb2
 ; ZVE32F-NEXT:    # =>This Inner Loop Header: Depth=1
-; ZVE32F-NEXT:    mul a6, a3, a5
-; ZVE32F-NEXT:    mul a7, a2, a5
+; ZVE32F-NEXT:    slli a5, a4, 3
+; ZVE32F-NEXT:    slli a6, a4, 5
+; ZVE32F-NEXT:    slli a7, a2, 3
+; ZVE32F-NEXT:    slli t0, a2, 5
 ; ZVE32F-NEXT:    addi a2, a2, 4
-; ZVE32F-NEXT:    add a6, a1, a6
+; ZVE32F-NEXT:    add a5, a6, a5
+; ZVE32F-NEXT:    add a7, t0, a7
+; ZVE32F-NEXT:    add a5, a1, a5
 ; ZVE32F-NEXT:    add a7, a1, a7
-; ZVE32F-NEXT:    ld t0, 0(a7)
-; ZVE32F-NEXT:    ld t1, 0(a6)
+; ZVE32F-NEXT:    ld a6, 0(a7)
+; ZVE32F-NEXT:    ld t0, 0(a5)
 ; ZVE32F-NEXT:    ld a7, 80(a7)
-; ZVE32F-NEXT:    ld a6, 80(a6)
-; ZVE32F-NEXT:    sd t0, 0(a0)
-; ZVE32F-NEXT:    sd t1, 8(a0)
+; ZVE32F-NEXT:    ld a5, 80(a5)
+; ZVE32F-NEXT:    sd a6, 0(a0)
+; ZVE32F-NEXT:    sd t0, 8(a0)
 ; ZVE32F-NEXT:    sd a7, 16(a0)
-; ZVE32F-NEXT:    sd a6, 24(a0)
+; ZVE32F-NEXT:    sd a5, 24(a0)
 ; ZVE32F-NEXT:    addi a0, a0, 32
-; ZVE32F-NEXT:    addi a3, a3, 4
-; ZVE32F-NEXT:    bne a0, a4, .LBB12_1
+; ZVE32F-NEXT:    addi a4, a4, 4
+; ZVE32F-NEXT:    bne a0, a3, .LBB12_1
 ; ZVE32F-NEXT:  # %bb.2: # %bb18
 ; ZVE32F-NEXT:    ret
 ;
@@ -701,28 +704,31 @@ define void @gather_of_pointers(ptr noalias nocapture %arg, ptr noalias nocaptur
 ; OPTV-LABEL: gather_of_pointers:
 ; OPTV:       # %bb.0: # %bb
 ; OPTV-NEXT:    li a2, 0
-; OPTV-NEXT:    lui a4, 2
-; OPTV-NEXT:    li a3, 1
-; OPTV-NEXT:    add a4, a0, a4
-; OPTV-NEXT:    li a5, 40
+; OPTV-NEXT:    lui a3, 2
+; OPTV-NEXT:    add a3, a0, a3
+; OPTV-NEXT:    li a4, 1
 ; OPTV-NEXT:  .LBB12_1: # %bb2
 ; OPTV-NEXT:    # =>This Inner Loop Header: Depth=1
-; OPTV-NEXT:    mul a6, a3, a5
-; OPTV-NEXT:    mul a7, a2, a5
+; OPTV-NEXT:    slli a5, a4, 3
+; OPTV-NEXT:    slli a6, a4, 5
+; OPTV-NEXT:    slli a7, a2, 3
+; OPTV-NEXT:    slli t0, a2, 5
 ; OPTV-NEXT:    addi a2, a2, 4
-; OPTV-NEXT:    add a6, a1, a6
+; OPTV-NEXT:    add a5, a6, a5
+; OPTV-NEXT:    add a7, t0, a7
+; OPTV-NEXT:    add a5, a1, a5
 ; OPTV-NEXT:    add a7, a1, a7
-; OPTV-NEXT:    ld t0, 0(a7)
-; OPTV-NEXT:    ld t1, 0(a6)
+; OPTV-NEXT:    ld a6, 0(a7)
+; OPTV-NEXT:    ld t0, 0(a5)
 ; OPTV-NEXT:    ld a7, 80(a7)
-; OPTV-NEXT:    ld a6, 80(a6)
-; OPTV-NEXT:    sd t0, 0(a0)
-; OPTV-NEXT:    sd t1, 8(a0)
+; OPTV-NEXT:    ld a5, 80(a5)
+; OPTV-NEXT:    sd a6, 0(a0)
+; OPTV-NEXT:    sd t0, 8(a0)
 ; OPTV-NEXT:    sd a7, 16(a0)
-; OPTV-NEXT:    sd a6, 24(a0)
+; OPTV-NEXT:    sd a5, 24(a0)
 ; OPTV-NEXT:    addi a0, a0, 32
-; OPTV-NEXT:    addi a3, a3, 4
-; OPTV-NEXT:    bne a0, a4, .LBB12_1
+; OPTV-NEXT:    addi a4, a4, 4
+; OPTV-NEXT:    bne a0, a3, .LBB12_1
 ; OPTV-NEXT:  # %bb.2: # %bb18
 ; OPTV-NEXT:    ret
 bb:
@@ -778,28 +784,31 @@ define void @scatter_of_pointers(ptr noalias nocapture %arg, ptr noalias nocaptu
 ; ZVE32F-LABEL: scatter_of_pointers:
 ; ZVE32F:       # %bb.0: # %bb
 ; ZVE32F-NEXT:    li a2, 0
-; ZVE32F-NEXT:    lui a4, 2
-; ZVE32F-NEXT:    li a3, 1
-; ZVE32F-NEXT:    add a4, a1, a4
-; ZVE32F-NEXT:    li a5, 40
+; ZVE32F-NEXT:    lui a3, 2
+; ZVE32F-NEXT:    add a3, a1, a3
+; ZVE32F-NEXT:    li a4, 1
 ; ZVE32F-NEXT:  .LBB13_1: # %bb2
 ; ZVE32F-NEXT:    # =>This Inner Loop Header: Depth=1
-; ZVE32F-NEXT:    ld a6, 0(a1)
-; ZVE32F-NEXT:    ld a7, 8(a1)
-; ZVE32F-NEXT:    ld t0, 16(a1)
-; ZVE32F-NEXT:    ld t1, 24(a1)
-; ZVE32F-NEXT:    mul t2, a3, a5
-; ZVE32F-NEXT:    mul t3, a2, a5
+; ZVE32F-NEXT:    ld a5, 0(a1)
+; ZVE32F-NEXT:    ld a6, 8(a1)
+; ZVE32F-NEXT:    ld a7, 16(a1)
+; ZVE32F-NEXT:    ld t0, 24(a1)
+; ZVE32F-NEXT:    slli t1, a4, 3
+; ZVE32F-NEXT:    slli t2, a4, 5
+; ZVE32F-NEXT:    slli t3, a2, 3
+; ZVE32F-NEXT:    add t1, t2, t1
+; ZVE32F-NEXT:    slli t2, a2, 5
 ; ZVE32F-NEXT:    addi a2, a2, 4
 ; ZVE32F-NEXT:    addi a1, a1, 32
+; ZVE32F-NEXT:    add t2, t2, t3
+; ZVE32F-NEXT:    add t1, a0, t1
 ; ZVE32F-NEXT:    add t2, a0, t2
-; ZVE32F-NEXT:    add t3, a0, t3
-; ZVE32F-NEXT:    sd a6, 0(t3)
-; ZVE32F-NEXT:    sd a7, 0(t2)
-; ZVE32F-NEXT:    sd t0, 80(t3)
-; ZVE32F-NEXT:    sd t1, 80(t2)
-; ZVE32F-NEXT:    addi a3, a3, 4
-; ZVE32F-NEXT:    bne a1, a4, .LBB13_1
+; ZVE32F-NEXT:    sd a5, 0(t2)
+; ZVE32F-NEXT:    sd a6, 0(t1)
+; ZVE32F-NEXT:    sd a7, 80(t2)
+; ZVE32F-NEXT:    sd t0, 80(t1)
+; ZVE32F-NEXT:    addi a4, a4, 4
+; ZVE32F-NEXT:    bne a1, a3, .LBB13_1
 ; ZVE32F-NEXT:  # %bb.2: # %bb18
 ; ZVE32F-NEXT:    ret
 ;
@@ -826,28 +835,31 @@ define void @scatter_of_pointers(ptr noalias nocapture %arg, ptr noalias nocaptu
 ; OPTV-LABEL: scatter_of_pointers:
 ; OPTV:       # %bb.0: # %bb
 ; OPTV-NEXT:    li a2, 0
-; OPTV-NEXT:    lui a4, 2
-; OPTV-NEXT:    li a3, 1
-; OPTV-NEXT:    add a4, a1, a4
-; OPTV-NEXT:    li a5, 40
+; OPTV-NEXT:    lui a3, 2
+; OPTV-NEXT:    add a3, a1, a3
+; OPTV-NEXT:    li a4, 1
 ; OPTV-NEXT:  .LBB13_1: # %bb2
 ; OPTV-NEXT:    # =>This Inner Loop Header: Depth=1
-; OPTV-NEXT:    ld a6, 0(a1)
-; OPTV-NEXT:    ld a7, 8(a1)
-; OPTV-NEXT:    ld t0, 16(a1)
-; OPTV-NEXT:    ld t1, 24(a1)
-; OPTV-NEXT:    mul t2, a3, a5
-; OPTV-NEXT:    mul t3, a2, a5
+; OPTV-NEXT:    ld a5, 0(a1)
+; OPTV-NEXT:    ld a6, 8(a1)
+; OPTV-NEXT:    ld a7, 16(a1)
+; OPTV-NEXT:    ld t0, 24(a1)
+; OPTV-NEXT:    slli t1, a4, 3
+; OPTV-NEXT:    slli t2, a4, 5
+; OPTV-NEXT:    slli t3, a2, 3
+; OPTV-NEXT:    add t1, t2, t1
+; OPTV-NEXT:    slli t2, a2, 5
 ; OPTV-NEXT:    addi a2, a2, 4
 ; OPTV-NEXT:    addi a1, a1, 32
+; OPTV-NEXT:    add t2, t2, t3
+; OPTV-NEXT:    add t1, a0, t1
 ; OPTV-NEXT:    add t2, a0, t2
-; OPTV-NEXT:    add t3, a0, t3
-; OPTV-NEXT:    sd a6, 0(t3)
-; OPTV-NEXT:    sd a7, 0(t2)
-; OPTV-NEXT:    sd t0, 80(t3)
-; OPTV-NEXT:    sd t1, 80(t2)
-; OPTV-NEXT:    addi a3, a3, 4
-; OPTV-NEXT:    bne a1, a4, .LBB13_1
+; OPTV-NEXT:    sd a5, 0(t2)
+; OPTV-NEXT:    sd a6, 0(t1)
+; OPTV-NEXT:    sd a7, 80(t2)
+; OPTV-NEXT:    sd t0, 80(t1)
+; OPTV-NEXT:    addi a4, a4, 4
+; OPTV-NEXT:    bne a1, a3, .LBB13_1
 ; OPTV-NEXT:  # %bb.2: # %bb18
 ; OPTV-NEXT:    ret
 bb:
diff --git a/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll b/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll
index 7990c1c1eabc2..4d9a6aeaad2ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll
@@ -9,26 +9,35 @@
 define i32 @vscale_known_nonzero() {
 ; CHECK-LABEL: vscale_known_nonzero:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_offset ra, -8
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    neg a1, a0
+; CHECK-NEXT:    negw a1, a0
 ; CHECK-NEXT:    and a0, a0, a1
-; CHECK-NEXT:    lui a1, 30667
-; CHECK-NEXT:    addiw a1, a1, 1329
-; CHECK-NEXT:    call __muldi3
+; CHECK-NEXT:    slli a1, a0, 6
+; CHECK-NEXT:    slli a2, a0, 8
+; CHECK-NEXT:    slli a3, a0, 10
+; CHECK-NEXT:    slli a4, a0, 12
+; CHECK-NEXT:    add a1, a1, a2
+; CHECK-NEXT:    slli a2, a0, 16
+; CHECK-NEXT:    subw a3, a3, a4
+; CHECK-NEXT:    slli a4, a0, 18
+; CHECK-NEXT:    subw a2, a2, a4
+; CHECK-NEXT:    slli a4, a0, 4
+; CHECK-NEXT:    subw a4, a0, a4
+; CHECK-NEXT:    add a1, a4, a1
+; CHECK-NEXT:    slli a4, a0, 14
+; CHECK-NEXT:    subw a3, a3, a4
+; CHECK-NEXT:    slli a4, a0, 23
+; CHECK-NEXT:    subw a2, a2, a4
+; CHECK-NEXT:    slli a0, a0, 27
+; CHECK-NEXT:    add a1, a1, a3
+; CHECK-NEXT:    add a0, a2, a0
+; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    srliw a0, a0, 27
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI0_0)
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    lbu a0, 0(a0)
-; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    .cfi_restore ra
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    ret
   %x = call i32 @llvm.vscale()
   %r = call i32 @llvm.cttz.i32(i32 %x, i1 false)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
index a78130e8f102f..3da04eb7e6abe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
@@ -925,9 +925,9 @@ define half @vreduce_ord_fadd_nxv10f16(<vscale x 10 x half> %v, half %s) {
 ; CHECK-LABEL: vreduce_ord_fadd_nxv10f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 10
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    srli a1, a0, 3
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vfmv.s.f v12, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -1007,9 +1007,9 @@ define half @vreduce_fmin_nxv10f16(<vscale x 10 x half> %v) {
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI73_0)
 ; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vle16.v v12, (a1)
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    li a1, 10
-; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    srli a1, a0, 3
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    add a0, a0, a1
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfredmin.vs v12, v8, v12
 ; CHECK-NEXT:    vfmv.f.s fa0, v12
diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index c6503813aeed2..17a09bf7dbe6c 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -9,40 +9,62 @@
 define i1 @test_srem_odd(i29 %X) nounwind {
 ; RV32-LABEL: test_srem_odd:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    lui a1, 128424
-; RV32-NEXT:    addi a1, a1, 331
-; RV32-NEXT:    call __mulsi3
-; RV32-NEXT:    lui a1, 662
-; RV32-NEXT:    addi a1, a1, -83
-; RV32-NEXT:    add a0, a0, a1
-; RV32-NEXT:    lui a1, 1324
-; RV32-NEXT:    slli a0, a0, 3
-; RV32-NEXT:    srli a0, a0, 3
-; RV32-NEXT:    addi a1, a1, -165
-; RV32-NEXT:    sltu a0, a0, a1
-; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    slli a1, a0, 4
+; RV32-NEXT:    slli a2, a0, 6
+; RV32-NEXT:    slli a3, a0, 8
+; RV32-NEXT:    slli a4, a0, 15
+; RV32-NEXT:    add a1, a1, a2
+; RV32-NEXT:    slli a2, a0, 19
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    slli a4, a0, 21
+; RV32-NEXT:    add a2, a2, a4
+; RV32-NEXT:    slli a4, a0, 2
+; RV32-NEXT:    add a4, a0, a4
+; RV32-NEXT:    sub a1, a1, a4
+; RV32-NEXT:    slli a4, a0, 17
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    slli a0, a0, 23
+; RV32-NEXT:    add a0, a2, a0
+; RV32-NEXT:    lui a2, 662
+; RV32-NEXT:    add a1, a1, a3
+; RV32-NEXT:    lui a3, 1324
+; RV32-NEXT:    addi a2, a2, -83
+; RV32-NEXT:    sub a0, a0, a2
+; RV32-NEXT:    sub a1, a1, a0
+; RV32-NEXT:    slli a1, a1, 3
+; RV32-NEXT:    srli a1, a1, 3
+; RV32-NEXT:    addi a0, a3, -165
+; RV32-NEXT:    sltu a0, a1, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: test_srem_odd:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    lui a1, 128424
-; RV64-NEXT:    addiw a1, a1, 331
-; RV64-NEXT:    call __muldi3
-; RV64-NEXT:    lui a1, 662
-; RV64-NEXT:    addi a1, a1, -83
-; RV64-NEXT:    add a0, a0, a1
-; RV64-NEXT:    lui a1, 1324
-; RV64-NEXT:    slli a0, a0, 35
-; RV64-NEXT:    srli a0, a0, 35
-; RV64-NEXT:    addiw a1, a1, -165
-; RV64-NEXT:    sltu a0, a0, a1
-; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    slli a1, a0, 4
+; RV64-NEXT:    slli a2, a0, 6
+; RV64-NEXT:    slli a3, a0, 8
+; RV64-NEXT:    slli a4, a0, 15
+; RV64-NEXT:    add a1, a1, a2
+; RV64-NEXT:    slli a2, a0, 19
+; RV64-NEXT:    add a3, a3, a4
+; RV64-NEXT:    slli a4, a0, 21
+; RV64-NEXT:    add a2, a2, a4
+; RV64-NEXT:    slli a4, a0, 2
+; RV64-NEXT:    add a4, a0, a4
+; RV64-NEXT:    subw a1, a1, a4
+; RV64-NEXT:    slli a4, a0, 17
+; RV64-NEXT:    add a3, a3, a4
+; RV64-NEXT:    slli a0, a0, 23
+; RV64-NEXT:    add a0, a2, a0
+; RV64-NEXT:    lui a2, 662
+; RV64-NEXT:    add a1, a1, a3
+; RV64-NEXT:    lui a3, 1324
+; RV64-NEXT:    addi a2, a2, -83
+; RV64-NEXT:    subw a0, a0, a2
+; RV64-NEXT:    subw a1, a1, a0
+; RV64-NEXT:    slli a1, a1, 35
+; RV64-NEXT:    srli a1, a1, 35
+; RV64-NEXT:    addiw a0, a3, -165
+; RV64-NEXT:    sltu a0, a1, a0
 ; RV64-NEXT:    ret
 ;
 ; RV32M-LABEL: test_srem_odd:
@@ -382,65 +404,122 @@ define void @test_srem_vec(ptr %X) nounwind {
 ; RV64-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    mv s0, a0
 ; RV64-NEXT:    lbu a0, 12(a0)
-; RV64-NEXT:    ld a1, 0(s0)
-; RV64-NEXT:    lwu a2, 8(s0)
+; RV64-NEXT:    ld s3, 0(s0)
+; RV64-NEXT:    lwu a1, 8(s0)
 ; RV64-NEXT:    slli a0, a0, 32
-; RV64-NEXT:    srli a3, a1, 2
-; RV64-NEXT:    or a0, a2, a0
-; RV64-NEXT:    slli a2, a2, 62
-; RV64-NEXT:    slli a1, a1, 31
-; RV64-NEXT:    or a2, a2, a3
-; RV64-NEXT:    slli s1, a0, 29
-; RV64-NEXT:    srai a0, a2, 31
-; RV64-NEXT:    srai s1, s1, 31
-; RV64-NEXT:    srai s2, a1, 31
+; RV64-NEXT:    srli a2, s3, 2
+; RV64-NEXT:    or a0, a1, a0
+; RV64-NEXT:    slli a1, a1, 62
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a2, s3, 31
+; RV64-NEXT:    slli a3, a0, 29
+; RV64-NEXT:    srai a0, a1, 31
+; RV64-NEXT:    srai s2, a3, 31
+; RV64-NEXT:    srai s4, a2, 31
 ; RV64-NEXT:    li a1, 7
 ; RV64-NEXT:    call __moddi3
-; RV64-NEXT:    mv s3, a0
-; RV64-NEXT:    li a1, -5
-; RV64-NEXT:    mv a0, s1
-; RV64-NEXT:    call __moddi3
 ; RV64-NEXT:    mv s1, a0
-; RV64-NEXT:    lui a0, 699051
-; RV64-NEXT:    addiw a1, a0, -1365
-; RV64-NEXT:    slli a0, a1, 32
-; RV64-NEXT:    add a1, a1, a0
+; RV64-NEXT:    li a1, -5
 ; RV64-NEXT:    mv a0, s2
-; RV64-NEXT:    call __muldi3
-; RV64-NEXT:    lui a1, %hi(.LCPI3_0)
-; RV64-NEXT:    addi s1, s1, -2
-; RV64-NEXT:    addi s3, s3, -1
-; RV64-NEXT:    ld a1, %lo(.LCPI3_0)(a1)
-; RV64-NEXT:    seqz a2, s1
-; RV64-NEXT:    seqz a3, s3
-; RV64-NEXT:    addi a3, a3, -1
-; RV64-NEXT:    addi a2, a2, -1
-; RV64-NEXT:    slli a4, a2, 2
-; RV64-NEXT:    slli a5, a3, 31
-; RV64-NEXT:    srli a5, a5, 62
-; RV64-NEXT:    add a0, a0, a1
-; RV64-NEXT:    or a4, a5, a4
-; RV64-NEXT:    slli a5, a0, 63
-; RV64-NEXT:    srli a0, a0, 1
-; RV64-NEXT:    or a0, a0, a5
-; RV64-NEXT:    slli a2, a2, 29
-; RV64-NEXT:    slli a3, a3, 33
-; RV64-NEXT:    srli a2, a2, 61
-; RV64-NEXT:    sltu a0, a1, a0
-; RV64-NEXT:    neg a0, a0
-; RV64-NEXT:    slli a0, a0, 31
-; RV64-NEXT:    srli a0, a0, 31
-; RV64-NEXT:    or a0, a0, a3
-; RV64-NEXT:    sd a0, 0(s0)
+; RV64-NEXT:    call __moddi3
+; RV64-NEXT:    slli a1, s4, 4
+; RV64-NEXT:    slli a2, s4, 6
+; RV64-NEXT:    slli a3, s4, 8
+; RV64-NEXT:    slli a4, s4, 10
+; RV64-NEXT:    slli a5, s4, 14
+; RV64-NEXT:    slli a6, s4, 16
+; RV64-NEXT:    slli a7, s4, 22
+; RV64-NEXT:    add a1, a1, a2
+; RV64-NEXT:    slli a2, s4, 24
+; RV64-NEXT:    add a3, a3, a4
+; RV64-NEXT:    slli a4, s3, 32
+; RV64-NEXT:    add a5, a5, a6
+; RV64-NEXT:    slli a6, s3, 34
+; RV64-NEXT:    add a2, a7, a2
+; RV64-NEXT:    slli a7, s3, 48
+; RV64-NEXT:    add a4, a4, a6
+; RV64-NEXT:    slli a6, s3, 50
+; RV64-NEXT:    add a6, a7, a6
+; RV64-NEXT:    slli a7, s4, 2
+; RV64-NEXT:    add a7, s4, a7
+; RV64-NEXT:    add a1, a7, a1
+; RV64-NEXT:    slli a7, s4, 12
+; RV64-NEXT:    add a3, a3, a7
+; RV64-NEXT:    slli a7, s4, 18
+; RV64-NEXT:    add a5, a5, a7
+; RV64-NEXT:    slli a7, s4, 26
+; RV64-NEXT:    add a2, a2, a7
+; RV64-NEXT:    slli a7, s3, 36
+; RV64-NEXT:    add a4, a4, a7
+; RV64-NEXT:    slli a7, s3, 52
+; RV64-NEXT:    add a6, a6, a7
+; RV64-NEXT:    add a1, a1, a3
+; RV64-NEXT:    slli a3, s4, 20
+; RV64-NEXT:    add a3, a5, a3
+; RV64-NEXT:    slli a5, s4, 28
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    slli a5, s3, 38
+; RV64-NEXT:    add a4, a4, a5
+; RV64-NEXT:    slli a5, s3, 54
+; RV64-NEXT:    add a5, a6, a5
+; RV64-NEXT:    add a1, a1, a3
+; RV64-NEXT:    slli s4, s4, 30
+; RV64-NEXT:    add a2, a2, s4
+; RV64-NEXT:    slli a3, s3, 40
+; RV64-NEXT:    add a3, a4, a3
+; RV64-NEXT:    slli a4, s3, 56
+; RV64-NEXT:    add a4, a5, a4
+; RV64-NEXT:    slli a5, s3, 42
+; RV64-NEXT:    add a1, a1, a2
+; RV64-NEXT:    slli a2, s3, 58
+; RV64-NEXT:    addi a0, a0, -2
+; RV64-NEXT:    addi s1, s1, -1
+; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    seqz a6, s1
+; RV64-NEXT:    addi a6, a6, -1
+; RV64-NEXT:    addi a0, a0, -1
+; RV64-NEXT:    add a3, a3, a5
+; RV64-NEXT:    slli a5, a0, 2
+; RV64-NEXT:    add a2, a4, a2
+; RV64-NEXT:    slli a4, a6, 31
+; RV64-NEXT:    srli a4, a4, 62
+; RV64-NEXT:    or a4, a4, a5
+; RV64-NEXT:    slli a5, s3, 44
+; RV64-NEXT:    add a3, a3, a5
+; RV64-NEXT:    slli a5, s3, 60
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    slli a5, s3, 46
+; RV64-NEXT:    add a3, a3, a5
+; RV64-NEXT:    slli s3, s3, 62
+; RV64-NEXT:    add a2, a2, s3
+; RV64-NEXT:    lui a5, %hi(.LCPI3_0)
+; RV64-NEXT:    ld a5, %lo(.LCPI3_0)(a5)
+; RV64-NEXT:    slli a0, a0, 29
+; RV64-NEXT:    slli a6, a6, 33
+; RV64-NEXT:    srli a0, a0, 61
+; RV64-NEXT:    add a1, a1, a3
+; RV64-NEXT:    sub a2, a5, a2
+; RV64-NEXT:    sub a2, a2, a1
+; RV64-NEXT:    slli a1, a2, 63
+; RV64-NEXT:    srli a2, a2, 1
+; RV64-NEXT:    or a1, a2, a1
+; RV64-NEXT:    sltu a1, a5, a1
+; RV64-NEXT:    neg a1, a1
+; RV64-NEXT:    slli a1, a1, 31
+; RV64-NEXT:    srli a1, a1, 31
+; RV64-NEXT:    or a1, a1, a6
+; RV64-NEXT:    sd a1, 0(s0)
 ; RV64-NEXT:    sw a4, 8(s0)
-; RV64-NEXT:    sb a2, 12(s0)
+; RV64-NEXT:    sb a0, 12(s0)
 ; RV64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s4, 0(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 48
 ; RV64-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index c73a18c8869d5..46e250710f9c1 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -9,30 +9,40 @@
 define i1 @test_urem_odd(i13 %X) nounwind {
 ; RV32-LABEL: test_urem_odd:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    lui a1, 1
-; RV32-NEXT:    addi a1, a1, -819
-; RV32-NEXT:    call __mulsi3
+; RV32-NEXT:    slli a1, a0, 4
+; RV32-NEXT:    slli a2, a0, 6
+; RV32-NEXT:    slli a3, a0, 8
+; RV32-NEXT:    sub a1, a1, a2
+; RV32-NEXT:    slli a2, a0, 10
+; RV32-NEXT:    sub a3, a3, a2
+; RV32-NEXT:    slli a2, a0, 2
+; RV32-NEXT:    sub a2, a0, a2
+; RV32-NEXT:    slli a0, a0, 12
+; RV32-NEXT:    add a1, a2, a1
+; RV32-NEXT:    add a0, a3, a0
+; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    slli a0, a0, 19
 ; RV32-NEXT:    srli a0, a0, 19
 ; RV32-NEXT:    sltiu a0, a0, 1639
-; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: test_urem_odd:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    lui a1, 1
-; RV64-NEXT:    addiw a1, a1, -819
-; RV64-NEXT:    call __muldi3
+; RV64-NEXT:    slli a1, a0, 4
+; RV64-NEXT:    slli a2, a0, 6
+; RV64-NEXT:    slli a3, a0, 8
+; RV64-NEXT:    subw a1, a1, a2
+; RV64-NEXT:    slli a2, a0, 10
+; RV64-NEXT:    subw a3, a3, a2
+; RV64-NEXT:    slli a2, a0, 2
+; RV64-NEXT:    subw a2, a0, a2
+; RV64-NEXT:    slli a0, a0, 12
+; RV64-NEXT:    add a1, a2, a1
+; RV64-NEXT:    add a0, a3, a0
+; RV64-NEXT:    add a0, a1, a0
 ; RV64-NEXT:    slli a0, a0, 51
 ; RV64-NEXT:    srli a0, a0, 51
 ; RV64-NEXT:    sltiu a0, a0, 1639
-; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
 ;
 ; RV32M-LABEL: test_urem_odd:
@@ -82,42 +92,64 @@ define i1 @test_urem_odd(i13 %X) nounwind {
 define i1 @test_urem_even(i27 %X) nounwind {
 ; RV32-LABEL: test_urem_even:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    lui a1, 28087
-; RV32-NEXT:    addi a1, a1, -585
-; RV32-NEXT:    call __mulsi3
+; RV32-NEXT:    slli a1, a0, 6
+; RV32-NEXT:    slli a2, a0, 9
+; RV32-NEXT:    slli a3, a0, 12
+; RV32-NEXT:    slli a4, a0, 15
+; RV32-NEXT:    add a1, a1, a2
+; RV32-NEXT:    slli a2, a0, 21
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    slli a4, a0, 24
+; RV32-NEXT:    add a2, a2, a4
+; RV32-NEXT:    slli a4, a0, 3
+; RV32-NEXT:    add a4, a0, a4
+; RV32-NEXT:    add a1, a4, a1
+; RV32-NEXT:    slli a4, a0, 18
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    slli a0, a0, 27
+; RV32-NEXT:    sub a0, a0, a2
+; RV32-NEXT:    lui a2, 2341
+; RV32-NEXT:    add a1, a1, a3
+; RV32-NEXT:    sub a0, a0, a1
 ; RV32-NEXT:    slli a1, a0, 26
 ; RV32-NEXT:    slli a0, a0, 5
 ; RV32-NEXT:    srli a0, a0, 6
 ; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    lui a1, 2341
 ; RV32-NEXT:    slli a0, a0, 5
 ; RV32-NEXT:    srli a0, a0, 5
-; RV32-NEXT:    addi a1, a1, -1755
+; RV32-NEXT:    addi a1, a2, -1755
 ; RV32-NEXT:    sltu a0, a0, a1
-; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: test_urem_even:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    lui a1, 28087
-; RV64-NEXT:    addiw a1, a1, -585
-; RV64-NEXT:    call __muldi3
+; RV64-NEXT:    slli a1, a0, 6
+; RV64-NEXT:    slli a2, a0, 9
+; RV64-NEXT:    slli a3, a0, 12
+; RV64-NEXT:    slli a4, a0, 15
+; RV64-NEXT:    add a1, a1, a2
+; RV64-NEXT:    slli a2, a0, 21
+; RV64-NEXT:    add a3, a3, a4
+; RV64-NEXT:    slli a4, a0, 24
+; RV64-NEXT:    add a2, a2, a4
+; RV64-NEXT:    slli a4, a0, 3
+; RV64-NEXT:    add a4, a0, a4
+; RV64-NEXT:    add a1, a4, a1
+; RV64-NEXT:    slli a4, a0, 18
+; RV64-NEXT:    add a3, a3, a4
+; RV64-NEXT:    slli a0, a0, 27
+; RV64-NEXT:    subw a0, a0, a2
+; RV64-NEXT:    lui a2, 2341
+; RV64-NEXT:    add a1, a1, a3
+; RV64-NEXT:    subw a0, a0, a1
 ; RV64-NEXT:    slli a1, a0, 26
 ; RV64-NEXT:    slli a0, a0, 37
 ; RV64-NEXT:    srli a0, a0, 38
 ; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    lui a1, 2341
 ; RV64-NEXT:    slli a0, a0, 37
 ; RV64-NEXT:    srli a0, a0, 37
-; RV64-NEXT:    addiw a1, a1, -1755
+; RV64-NEXT:    addiw a1, a2, -1755
 ; RV64-NEXT:    sltu a0, a0, a1
-; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
 ;
 ; RV32M-LABEL: test_urem_even:
@@ -256,28 +288,32 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; RV32-LABEL: test_urem_negative_odd:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    li a1, 307
-; RV32-NEXT:    call __mulsi3
+; RV32-NEXT:    slli a1, a0, 2
+; RV32-NEXT:    slli a2, a0, 4
+; RV32-NEXT:    slli a3, a0, 6
+; RV32-NEXT:    sub a1, a1, a0
+; RV32-NEXT:    sub a2, a2, a3
+; RV32-NEXT:    sub a1, a1, a2
+; RV32-NEXT:    slli a0, a0, 8
+; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    andi a0, a0, 511
 ; RV32-NEXT:    sltiu a0, a0, 2
 ; RV32-NEXT:    xori a0, a0, 1
-; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: test_urem_negative_odd:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -16
-; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    li a1, 307
-; RV64-NEXT:    call __muldi3
+; RV64-NEXT:    slli a1, a0, 2
+; RV64-NEXT:    slli a2, a0, 4
+; RV64-NEXT:    slli a3, a0, 6
+; RV64-NEXT:    subw a1, a1, a0
+; RV64-NEXT:    subw a2, a2, a3
+; RV64-NEXT:    subw a1, a1, a2
+; RV64-NEXT:    slli a0, a0, 8
+; RV64-NEXT:    add a0, a1, a0
 ; RV64-NEXT:    andi a0, a0, 511
 ; RV64-NEXT:    sltiu a0, a0, 2
 ; RV64-NEXT:    xori a0, a0, 1
-; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
 ;
 ; RV32M-LABEL: test_urem_negative_odd:
@@ -323,117 +359,127 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 define void @test_urem_vec(ptr %X) nounwind {
 ; RV32-LABEL: test_urem_vec:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
-; RV32-NEXT:    mv s0, a0
-; RV32-NEXT:    lbu a0, 4(a0)
-; RV32-NEXT:    lw a1, 0(s0)
-; RV32-NEXT:    slli a0, a0, 10
-; RV32-NEXT:    srli s1, a1, 22
-; RV32-NEXT:    or s1, s1, a0
-; RV32-NEXT:    srli s2, a1, 11
-; RV32-NEXT:    andi a0, a1, 2047
-; RV32-NEXT:    li a1, 683
-; RV32-NEXT:    call __mulsi3
-; RV32-NEXT:    slli a1, a0, 10
-; RV32-NEXT:    slli a0, a0, 21
-; RV32-NEXT:    srli a0, a0, 22
-; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    andi a0, a0, 2047
-; RV32-NEXT:    sltiu s3, a0, 342
-; RV32-NEXT:    li a1, 819
-; RV32-NEXT:    mv a0, s1
-; RV32-NEXT:    call __mulsi3
-; RV32-NEXT:    addi a0, a0, -1638
-; RV32-NEXT:    andi a0, a0, 2047
-; RV32-NEXT:    sltiu s1, a0, 2
-; RV32-NEXT:    xori s4, s1, 1
-; RV32-NEXT:    li a1, 1463
-; RV32-NEXT:    mv a0, s2
-; RV32-NEXT:    call __mulsi3
-; RV32-NEXT:    addi a0, a0, -1463
-; RV32-NEXT:    addi s3, s3, -1
-; RV32-NEXT:    addi s1, s1, -1
-; RV32-NEXT:    andi a0, a0, 2047
-; RV32-NEXT:    andi a1, s3, 2047
-; RV32-NEXT:    slli s1, s1, 22
-; RV32-NEXT:    sltiu a0, a0, 293
-; RV32-NEXT:    addi a0, a0, -1
-; RV32-NEXT:    andi a0, a0, 2047
-; RV32-NEXT:    slli a0, a0, 11
-; RV32-NEXT:    or a0, a0, s1
-; RV32-NEXT:    or a0, a1, a0
-; RV32-NEXT:    sw a0, 0(s0)
-; RV32-NEXT:    sb s4, 4(s0)
-; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    lbu a1, 4(a0)
+; RV32-NEXT:    lw a2, 0(a0)
+; RV32-NEXT:    slli a1, a1, 10
+; RV32-NEXT:    srli a3, a2, 22
+; RV32-NEXT:    srli a4, a2, 11
+; RV32-NEXT:    andi a2, a2, 2047
+; RV32-NEXT:    or a1, a3, a1
+; RV32-NEXT:    slli a3, a2, 2
+; RV32-NEXT:    slli a5, a2, 4
+; RV32-NEXT:    slli a6, a2, 6
+; RV32-NEXT:    slli a7, a2, 8
+; RV32-NEXT:    slli t0, a2, 10
+; RV32-NEXT:    addi a4, a4, -1
+; RV32-NEXT:    addi a1, a1, -2
+; RV32-NEXT:    add a2, a2, a3
+; RV32-NEXT:    add a5, a5, a6
+; RV32-NEXT:    sub a3, t0, a7
+; RV32-NEXT:    slli a6, a4, 3
+; RV32-NEXT:    slli a7, a4, 6
+; RV32-NEXT:    slli t0, a4, 9
+; RV32-NEXT:    add a2, a2, a5
+; RV32-NEXT:    slli a5, a1, 2
+; RV32-NEXT:    add a4, a4, a6
+; RV32-NEXT:    slli a6, a1, 4
+; RV32-NEXT:    add a7, a7, t0
+; RV32-NEXT:    slli t0, a1, 6
+; RV32-NEXT:    sub a6, a6, t0
+; RV32-NEXT:    slli t0, a1, 8
+; RV32-NEXT:    sub a5, a5, a1
+; RV32-NEXT:    slli a1, a1, 10
+; RV32-NEXT:    sub a1, t0, a1
+; RV32-NEXT:    sub a3, a3, a2
+; RV32-NEXT:    add a4, a4, a7
+; RV32-NEXT:    sub a2, a5, a6
+; RV32-NEXT:    slli a5, a3, 10
+; RV32-NEXT:    slli a3, a3, 21
+; RV32-NEXT:    neg a4, a4
+; RV32-NEXT:    sub a2, a2, a1
+; RV32-NEXT:    srli a3, a3, 22
+; RV32-NEXT:    andi a1, a4, 2047
+; RV32-NEXT:    andi a2, a2, 2047
+; RV32-NEXT:    or a3, a3, a5
+; RV32-NEXT:    sltiu a1, a1, 293
+; RV32-NEXT:    sltiu a2, a2, 2
+; RV32-NEXT:    andi a3, a3, 2047
+; RV32-NEXT:    addi a1, a1, -1
+; RV32-NEXT:    xori a4, a2, 1
+; RV32-NEXT:    sltiu a3, a3, 342
+; RV32-NEXT:    addi a2, a2, -1
+; RV32-NEXT:    andi a1, a1, 2047
+; RV32-NEXT:    addi a3, a3, -1
+; RV32-NEXT:    slli a1, a1, 11
+; RV32-NEXT:    slli a2, a2, 22
+; RV32-NEXT:    andi a3, a3, 2047
+; RV32-NEXT:    or a1, a1, a2
+; RV32-NEXT:    or a1, a3, a1
+; RV32-NEXT:    sw a1, 0(a0)
+; RV32-NEXT:    sb a4, 4(a0)
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: test_urem_vec:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -48
-; RV64-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    mv s0, a0
-; RV64-NEXT:    lbu a0, 4(a0)
-; RV64-NEXT:    lwu a1, 0(s0)
-; RV64-NEXT:    slli a0, a0, 32
-; RV64-NEXT:    or a0, a1, a0
-; RV64-NEXT:    srli s1, a0, 22
-; RV64-NEXT:    srli s2, a0, 11
-; RV64-NEXT:    andi a0, a0, 2047
-; RV64-NEXT:    li a1, 683
-; RV64-NEXT:    call __muldi3
-; RV64-NEXT:    slli a1, a0, 10
-; RV64-NEXT:    slli a0, a0, 53
-; RV64-NEXT:    srli a0, a0, 54
-; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    andi a0, a0, 2047
-; RV64-NEXT:    sltiu s3, a0, 342
-; RV64-NEXT:    li a1, 1463
-; RV64-NEXT:    mv a0, s2
-; RV64-NEXT:    call __muldi3
-; RV64-NEXT:    addi a0, a0, -1463
-; RV64-NEXT:    andi a0, a0, 2047
-; RV64-NEXT:    sltiu s2, a0, 293
-; RV64-NEXT:    li a1, 819
-; RV64-NEXT:    mv a0, s1
-; RV64-NEXT:    call __muldi3
-; RV64-NEXT:    addi a0, a0, -1638
-; RV64-NEXT:    addi s3, s3, -1
-; RV64-NEXT:    addi s2, s2, -1
-; RV64-NEXT:    andi a0, a0, 2047
-; RV64-NEXT:    andi a1, s3, 2047
-; RV64-NEXT:    andi a2, s2, 2047
-; RV64-NEXT:    sltiu a0, a0, 2
+; RV64-NEXT:    lbu a1, 4(a0)
+; RV64-NEXT:    lwu a2, 0(a0)
+; RV64-NEXT:    slli a1, a1, 32
+; RV64-NEXT:    or a1, a2, a1
+; RV64-NEXT:    srli a2, a1, 22
+; RV64-NEXT:    srli a3, a1, 11
+; RV64-NEXT:    andi a1, a1, 2047
+; RV64-NEXT:    slli a4, a1, 2
+; RV64-NEXT:    slli a5, a1, 4
+; RV64-NEXT:    slli a6, a1, 6
+; RV64-NEXT:    slli a7, a1, 8
+; RV64-NEXT:    slli t0, a1, 10
+; RV64-NEXT:    addi a3, a3, -1
+; RV64-NEXT:    addi a2, a2, -2
+; RV64-NEXT:    add a1, a1, a4
+; RV64-NEXT:    add a5, a5, a6
+; RV64-NEXT:    subw a4, t0, a7
+; RV64-NEXT:    slli a6, a3, 3
+; RV64-NEXT:    slli a7, a3, 6
+; RV64-NEXT:    slli t0, a3, 9
+; RV64-NEXT:    add a1, a1, a5
+; RV64-NEXT:    slli a5, a2, 2
+; RV64-NEXT:    add a3, a3, a6
+; RV64-NEXT:    slli a6, a2, 4
+; RV64-NEXT:    add a7, a7, t0
+; RV64-NEXT:    slli t0, a2, 6
+; RV64-NEXT:    subw a6, a6, t0
+; RV64-NEXT:    slli t0, a2, 8
+; RV64-NEXT:    subw a5, a5, a2
+; RV64-NEXT:    slli a2, a2, 10
+; RV64-NEXT:    subw a2, t0, a2
+; RV64-NEXT:    subw a4, a4, a1
+; RV64-NEXT:    add a3, a3, a7
+; RV64-NEXT:    subw a1, a5, a6
+; RV64-NEXT:    slli a5, a4, 10
+; RV64-NEXT:    slli a4, a4, 53
+; RV64-NEXT:    negw a3, a3
+; RV64-NEXT:    subw a1, a1, a2
+; RV64-NEXT:    srli a4, a4, 54
+; RV64-NEXT:    andi a2, a3, 2047
+; RV64-NEXT:    andi a1, a1, 2047
+; RV64-NEXT:    or a4, a4, a5
+; RV64-NEXT:    sltiu a2, a2, 293
+; RV64-NEXT:    sltiu a1, a1, 2
+; RV64-NEXT:    andi a3, a4, 2047
+; RV64-NEXT:    addi a1, a1, -1
+; RV64-NEXT:    addi a2, a2, -1
+; RV64-NEXT:    sltiu a3, a3, 342
+; RV64-NEXT:    andi a2, a2, 2047
+; RV64-NEXT:    slli a1, a1, 22
+; RV64-NEXT:    addi a3, a3, -1
 ; RV64-NEXT:    slli a2, a2, 11
-; RV64-NEXT:    addi a0, a0, -1
-; RV64-NEXT:    slli a0, a0, 22
-; RV64-NEXT:    or a0, a2, a0
-; RV64-NEXT:    or a0, a1, a0
-; RV64-NEXT:    slli a1, a0, 31
-; RV64-NEXT:    srli a1, a1, 63
-; RV64-NEXT:    sw a0, 0(s0)
-; RV64-NEXT:    sb a1, 4(s0)
-; RV64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 48
+; RV64-NEXT:    andi a3, a3, 2047
+; RV64-NEXT:    or a1, a2, a1
+; RV64-NEXT:    or a1, a3, a1
+; RV64-NEXT:    slli a2, a1, 31
+; RV64-NEXT:    srli a2, a2, 63
+; RV64-NEXT:    sw a1, 0(a0)
+; RV64-NEXT:    sb a2, 4(a0)
 ; RV64-NEXT:    ret
 ;
 ; RV32M-LABEL: test_urem_vec:
diff --git a/llvm/test/CodeGen/RISCV/xqccmp-additional-stack.ll b/llvm/test/CodeGen/RISCV/xqccmp-additional-stack.ll
index 14e6b9bddd0a0..f287c19eb654c 100644
--- a/llvm/test/CodeGen/RISCV/xqccmp-additional-stack.ll
+++ b/llvm/test/CodeGen/RISCV/xqccmp-additional-stack.ll
@@ -4,39 +4,25 @@
 define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
 ; RV32-LABEL: func:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    qc.cm.push {ra, s0-s1}, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    .cfi_offset ra, -4
-; RV32-NEXT:    .cfi_offset s0, -8
-; RV32-NEXT:    .cfi_offset s1, -12
-; RV32-NEXT:    addi sp, sp, -8
-; RV32-NEXT:    .cfi_def_cfa_offset 24
-; RV32-NEXT:    sw a4, 4(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a2, 0(sp) # 4-byte Folded Spill
-; RV32-NEXT:    mv a2, a1
-; RV32-NEXT:    mv s1, a0
-; RV32-NEXT:    li a0, 1
-; RV32-NEXT:    andi a3, a3, 1
+; RV32-NEXT:    li a5, 1
+; RV32-NEXT:    andi t0, a3, 1
 ; RV32-NEXT:  .LBB0_1: # %while.body
 ; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    mv s0, a0
-; RV32-NEXT:    li a0, 0
-; RV32-NEXT:    bnez a3, .LBB0_1
+; RV32-NEXT:    mv a3, a5
+; RV32-NEXT:    li a5, 0
+; RV32-NEXT:    bnez t0, .LBB0_1
 ; RV32-NEXT:  # %bb.2: # %while.end
-; RV32-NEXT:    lui a0, 4112
-; RV32-NEXT:    addi a1, a0, 257
-; RV32-NEXT:    mv a0, a2
-; RV32-NEXT:    call __mulsi3
-; RV32-NEXT:    sw a0, 0(zero)
-; RV32-NEXT:    andi s0, s0, 1
-; RV32-NEXT:    lw a0, 0(sp) # 4-byte Folded Reload
-; RV32-NEXT:    add s0, s0, a0
-; RV32-NEXT:    lw a0, 4(sp) # 4-byte Folded Reload
-; RV32-NEXT:    sb a0, 0(s0)
-; RV32-NEXT:    mv a0, s1
-; RV32-NEXT:    addi sp, sp, 8
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    qc.cm.popret {ra, s0-s1}, 16
+; RV32-NEXT:    slli a5, a1, 8
+; RV32-NEXT:    slli t0, a1, 16
+; RV32-NEXT:    add a5, a5, a1
+; RV32-NEXT:    slli a1, a1, 24
+; RV32-NEXT:    andi a3, a3, 1
+; RV32-NEXT:    add a1, a1, t0
+; RV32-NEXT:    add a2, a2, a3
+; RV32-NEXT:    add a1, a1, a5
+; RV32-NEXT:    sw a1, 0(zero)
+; RV32-NEXT:    sb a4, 0(a2)
+; RV32-NEXT:    ret
 entry:
   br label %while.body
 
diff --git a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
index c98b9b80378fd..c58e722deafe9 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
+++ b/llvm/test/CodeGen/RISCV/zcmp-additional-stack.ll
@@ -3,39 +3,25 @@
 define ptr @func(ptr %s, i32 %_c, ptr %incdec.ptr, i1 %0, i8 %conv14) #0 {
 ; RV32-LABEL: func:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    cm.push {ra, s0-s1}, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    .cfi_offset ra, -12
-; RV32-NEXT:    .cfi_offset s0, -8
-; RV32-NEXT:    .cfi_offset s1, -4
-; RV32-NEXT:    addi sp, sp, -8
-; RV32-NEXT:    .cfi_def_cfa_offset 24
-; RV32-NEXT:    sw a4, 4(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a2, 0(sp) # 4-byte Folded Spill
-; RV32-NEXT:    mv a2, a1
-; RV32-NEXT:    mv s1, a0
-; RV32-NEXT:    li a0, 1
-; RV32-NEXT:    andi a3, a3, 1
+; RV32-NEXT:    li a5, 1
+; RV32-NEXT:    andi t0, a3, 1
 ; RV32-NEXT:  .LBB0_1: # %while.body
 ; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    mv s0, a0
-; RV32-NEXT:    li a0, 0
-; RV32-NEXT:    bnez a3, .LBB0_1
+; RV32-NEXT:    mv a3, a5
+; RV32-NEXT:    li a5, 0
+; RV32-NEXT:    bnez t0, .LBB0_1
 ; RV32-NEXT:  # %bb.2: # %while.end
-; RV32-NEXT:    lui a0, 4112
-; RV32-NEXT:    addi a1, a0, 257
-; RV32-NEXT:    mv a0, a2
-; RV32-NEXT:    call __mulsi3
-; RV32-NEXT:    sw a0, 0(zero)
-; RV32-NEXT:    andi s0, s0, 1
-; RV32-NEXT:    lw a0, 0(sp) # 4-byte Folded Reload
-; RV32-NEXT:    add s0, s0, a0
-; RV32-NEXT:    lw a0, 4(sp) # 4-byte Folded Reload
-; RV32-NEXT:    sb a0, 0(s0)
-; RV32-NEXT:    mv a0, s1
-; RV32-NEXT:    addi sp, sp, 8
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    cm.popret {ra, s0-s1}, 16
+; RV32-NEXT:    slli a5, a1, 8
+; RV32-NEXT:    slli t0, a1, 16
+; RV32-NEXT:    add a5, a5, a1
+; RV32-NEXT:    slli a1, a1, 24
+; RV32-NEXT:    andi a3, a3, 1
+; RV32-NEXT:    add a1, a1, t0
+; RV32-NEXT:    add a2, a2, a3
+; RV32-NEXT:    add a1, a1, a5
+; RV32-NEXT:    sw a1, 0(zero)
+; RV32-NEXT:    sb a4, 0(a2)
+; RV32-NEXT:    ret
 entry:
   br label %while.body
 



More information about the llvm-commits mailing list