[llvm] [RISCV][Zba] Optimize mul with SH*ADD (PR #68144)

Alexandr Lecomcev via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 3 11:30:56 PDT 2023


https://github.com/vacmannnn created https://github.com/llvm/llvm-project/pull/68144

This patch does the optimization of mul with a constant by different patterns of SH*ADD.

Optimized multiplication by constants 23, 29, 35, 39, 43, 47, 49, 51, 53, 55, 57, 59, 61, 69, 75, 77, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 117, 121, 123, 125, 135, 137, 145, 147, 149, 153, 155, 157, 161, 163, 165, 169, 171, 173, 181, 185, 189, 201, 203, 205, 217, 219, 225, 243, 293, 297, 301, 305, 325, 329, 333, 361, 365, 369, 405, 585, 593, 649, 657, 729

Example of patterns:
    shXadd a1 a0 a0
    shYadd a0 a1 a1
    shZadd a0 a1 a0
Mathematically, this is the same as multiplying by (2**x + 1)*(2**z) + (2**x + 1)*(2**y + 1). You can go through all X, Y, Z and find those constants that optimization cannot generate.


>From e38c2ea8c4d7f5333bf092607d674aac99a07b1a Mon Sep 17 00:00:00 2001
From: Alexandr <opera9876 at rambler.ru>
Date: Sun, 1 Oct 2023 19:57:01 +0300
Subject: [PATCH] [RISCV][Zba] Optimize mul with SH*ADD

Optimized multiplication by constants 23, 29, 35, 39, 43, 47,
49, 51, 53, 55, 57, 59, 61, 69, 75, 77, 83, 85, 87, 89, 91,
93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 117, 121,
123, 125, 135, 137, 145, 147, 149, 153, 155, 157, 161, 163,
165, 169, 171, 173, 181, 185, 189, 201, 203, 205, 217, 219,
225, 243, 293, 297, 301, 305, 325, 329, 333, 361, 365, 369,
405, 585, 593, 649, 657, 729
---
 llvm/lib/Target/RISCV/RISCVInstrInfoZb.td |  160 +++
 llvm/test/CodeGen/RISCV/rv32zba.ll        | 1292 +++++++++++++++++++++
 llvm/test/CodeGen/RISCV/rv64zba.ll        | 1292 +++++++++++++++++++++
 3 files changed, 2744 insertions(+)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index a21c3d132636bea..e33a73e4c3af890 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -718,12 +718,172 @@ def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 41)),
           (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r)>;
 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 73)),
           (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r)>;
+
 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 27)),
           (SH1ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 45)),
           (SH2ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
 def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 81)),
           (SH3ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
+
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 61)),
+          (SH2ADD (SH2ADD (SH1ADD GPR:$r, GPR:$r), (SH1ADD GPR:$r, GPR:$r)), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 121)),
+          (SH3ADD (SH2ADD (SH1ADD GPR:$r, GPR:$r), (SH1ADD GPR:$r, GPR:$r)), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 55)),
+          (SH1ADD (SH3ADD (SH1ADD GPR:$r, GPR:$r), (SH1ADD GPR:$r, GPR:$r)), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 109)),
+          (SH2ADD (SH3ADD (SH1ADD GPR:$r, GPR:$r), (SH1ADD GPR:$r, GPR:$r)), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 217)),
+          (SH3ADD (SH3ADD (SH1ADD GPR:$r, GPR:$r), (SH1ADD GPR:$r, GPR:$r)), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 51)),
+          (SH1ADD (SH2ADD (SH2ADD GPR:$r, GPR:$r), (SH2ADD GPR:$r, GPR:$r)), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 101)),
+          (SH2ADD (SH2ADD (SH2ADD GPR:$r, GPR:$r), (SH2ADD GPR:$r, GPR:$r)), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 201)),
+          (SH3ADD (SH2ADD (SH2ADD GPR:$r, GPR:$r), (SH2ADD GPR:$r, GPR:$r)), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 91)),
+          (SH1ADD (SH3ADD (SH2ADD GPR:$r, GPR:$r), (SH2ADD GPR:$r, GPR:$r)), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 181)),
+          (SH2ADD (SH3ADD (SH2ADD GPR:$r, GPR:$r), (SH2ADD GPR:$r, GPR:$r)), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 361)),
+          (SH3ADD (SH3ADD (SH2ADD GPR:$r, GPR:$r), (SH2ADD GPR:$r, GPR:$r)), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 163)),
+          (SH1ADD (SH3ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r)), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 325)),
+          (SH2ADD (SH3ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r)), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 649)),
+          (SH3ADD (SH3ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r)), GPR:$r)>;
+
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 29)),
+          (SH2ADD (SH1ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 57)),
+          (SH3ADD (SH1ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 53)),
+          (SH2ADD (SH2ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 105)),
+          (SH3ADD (SH2ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 23)),
+          (SH1ADD (SH1ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 89)),
+          (SH3ADD (SH1ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 43)),
+          (SH1ADD (SH2ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 85)),
+          (SH2ADD (SH2ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 169)),
+          (SH3ADD (SH2ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 83)),
+          (SH1ADD (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 165)),
+          (SH2ADD (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 329)),
+          (SH3ADD (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 39)),
+          (SH1ADD (SH1ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 77)),
+          (SH2ADD (SH1ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 153)),
+          (SH3ADD (SH1ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 75)),
+          (SH1ADD (SH2ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 149)),
+          (SH2ADD (SH2ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 297)),
+          (SH3ADD (SH2ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 147)),
+          (SH1ADD (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r),   GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 293)),
+          (SH2ADD (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 35)),
+          (SH2ADD (SH1ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r), (SH1ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 117)),
+          (SH3ADD (SH2ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r), (SH2ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 125)),
+          (SH2ADD (SH3ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 225)),
+          (SH3ADD (SH3ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 99)),
+          (SH3ADD (SH1ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), (SH1ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 189)),
+          (SH3ADD (SH2ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), (SH2ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 123)),
+          (SH1ADD (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 205)),
+          (SH2ADD (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 369)),
+          (SH3ADD (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 95)),
+          (SH2ADD (SH1ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH1ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 171)),
+          (SH3ADD (SH1ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH1ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 111)),
+          (SH1ADD (SH2ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH2ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 185)),
+          (SH2ADD (SH2ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH2ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 333)),
+          (SH3ADD (SH2ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH2ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 219)),
+          (SH1ADD (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 365)),
+          (SH2ADD (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 657)),
+          (SH3ADD (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r))>;
+
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 59)),
+          (SH3ADD (SH1ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r), (SH1ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 107)),
+          (SH3ADD (SH1ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), (SH2ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 103)),
+          (SH2ADD (SH1ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 203)),
+          (SH3ADD (SH1ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 49)),
+          (SH2ADD (SH2ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r), (SH1ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 93)),
+          (SH3ADD (SH2ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r), (SH1ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 47)),
+          (SH1ADD (SH2ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), (SH2ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 173)),
+          (SH3ADD (SH2ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), (SH2ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 87)),
+          (SH1ADD (SH2ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 161)),
+          (SH3ADD (SH3ADD (SH1ADD GPR:$r, GPR:$r), GPR:$r), (SH1ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 157)),
+          (SH2ADD (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), (SH2ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 305)),
+          (SH3ADD (SH3ADD (SH2ADD GPR:$r, GPR:$r), GPR:$r), (SH2ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 155)),
+          (SH1ADD (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 301)),
+          (SH2ADD (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 593)),
+          (SH3ADD (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), (SH3ADD GPR:$r, GPR:$r))>;
+
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 97)),
+          (SH3ADD (SH3ADD GPR:$r, (SH1ADD GPR:$r, GPR:$r)), (SH1ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 113)),
+          (SH3ADD (SH3ADD GPR:$r, (SH2ADD GPR:$r, GPR:$r)), (SH2ADD GPR:$r, GPR:$r))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 145)),
+          (SH3ADD (SH3ADD GPR:$r, (SH3ADD GPR:$r, GPR:$r)), (SH3ADD GPR:$r, GPR:$r))>;
+
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 585)),
+          (SH3ADD (SH3ADD (SH3ADD GPR:$r, GPR:$r), GPR:$r), GPR:$r)>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 69)),
+          (SH3ADD (SH3ADD GPR:$r, (SH2ADD GPR:$r, GPR:$r)), GPR:$r)>;     
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 137)),
+          (SH3ADD (SH3ADD GPR:$r, (SH2ADD GPR:$r, GPR:$r)), GPR:$r)>; 
+
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 135)),
+          (SH3ADD (SH2ADD (SH1ADD GPR:$r, GPR:$r), (SH1ADD GPR:$r, GPR:$r)), (SH2ADD (SH1ADD GPR:$r, GPR:$r), (SH1ADD GPR:$r, GPR:$r)))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 243)),
+          (SH3ADD (SH3ADD (SH1ADD GPR:$r, GPR:$r), (SH1ADD GPR:$r, GPR:$r)), (SH3ADD (SH1ADD GPR:$r, GPR:$r), (SH1ADD GPR:$r, GPR:$r)))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 405)),
+          (SH3ADD (SH3ADD (SH2ADD GPR:$r, GPR:$r), (SH2ADD GPR:$r, GPR:$r)), (SH3ADD (SH2ADD GPR:$r, GPR:$r), (SH2ADD GPR:$r, GPR:$r)))>;
+def : Pat<(mul_const_oneuse GPR:$r, (XLenVT 729)),
+          (SH3ADD (SH3ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r)), (SH3ADD (SH3ADD GPR:$r, GPR:$r), (SH3ADD GPR:$r, GPR:$r)))>;
 } // Predicates = [HasStdExtZba]
 
 let Predicates = [HasStdExtZba, IsRV64] in {
diff --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll
index 0908a393338c501..7d113f754949963 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -476,6 +476,1298 @@ define i32 @mul81(i32 %a) {
   ret i32 %c
 }
 
+define i32 @mul153(i32 %a) {
+; RV32I-LABEL: mul153:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 153
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul153:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a1, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 153
+  ret i32 %c
+}
+
+define i32 @mul305(i32 %a) {
+; RV32I-LABEL: mul305:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 305
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul305:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 305
+  ret i32 %c
+}
+
+define i32 @mul189(i32 %a) {
+; RV32I-LABEL: mul189:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 189
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul189:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 189
+  ret i32 %c
+}
+
+define i32 @mul51(i32 %a) {
+; RV32I-LABEL: mul51:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 51
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul51:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a1, a1, a1
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 51
+  ret i32 %c
+}
+
+define i32 @mul91(i32 %a) {
+; RV32I-LABEL: mul91:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 91
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul91:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a1
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 91
+  ret i32 %c
+}
+
+define i32 @mul157(i32 %a) {
+; RV32I-LABEL: mul157:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 157
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul157:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 157
+  ret i32 %c
+}
+
+define i32 @mul89(i32 %a) {
+; RV32I-LABEL: mul89:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 89
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul89:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a1, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 89
+  ret i32 %c
+}
+
+define i32 @mul117(i32 %a) {
+; RV32I-LABEL: mul117:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 117
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul117:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 117
+  ret i32 %c
+}
+
+define i32 @mul203(i32 %a) {
+; RV32I-LABEL: mul203:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 203
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul203:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 203
+  ret i32 %c
+}
+
+define i32 @mul293(i32 %a) {
+; RV32I-LABEL: mul293:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 293
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul293:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 293
+  ret i32 %c
+}
+
+define i32 @mul147(i32 %a) {
+; RV32I-LABEL: mul147:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 147
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul147:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 147
+  ret i32 %c
+}
+
+define i32 @mul83(i32 %a) {
+; RV32I-LABEL: mul83:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 83
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul83:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 83
+  ret i32 %c
+}
+
+define i32 @mul163(i32 %a) {
+; RV32I-LABEL: mul163:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 163
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul163:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a1
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 163
+  ret i32 %c
+}
+
+define i32 @mul111(i32 %a) {
+; RV32I-LABEL: mul111:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 111
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul111:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    sh1add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 111
+  ret i32 %c
+}
+
+define i32 @mul217(i32 %a) {
+; RV32I-LABEL: mul217:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 217
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul217:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a1
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 217
+  ret i32 %c
+}
+
+define i32 @mul181(i32 %a) {
+; RV32I-LABEL: mul181:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 181
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul181:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a1
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 181
+  ret i32 %c
+}
+
+define i32 @mul53(i32 %a) {
+; RV32I-LABEL: mul53:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 53
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul53:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a1, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 53
+  ret i32 %c
+}
+
+define i32 @mul103(i32 %a) {
+; RV32I-LABEL: mul103:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 103
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul103:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 103
+  ret i32 %c
+}
+
+define i32 @mul57(i32 %a) {
+; RV32I-LABEL: mul57:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 57
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul57:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a1, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 57
+  ret i32 %c
+}
+
+define i32 @mul35(i32 %a) {
+; RV32I-LABEL: mul35:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 35
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul35:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 35
+  ret i32 %c
+}
+
+define i32 @mul329(i32 %a) {
+; RV32I-LABEL: mul329:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 329
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul329:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 329
+  ret i32 %c
+}
+
+define i32 @mul593(i32 %a) {
+; RV32I-LABEL: mul593:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 593
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul593:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 593
+  ret i32 %c
+}
+
+define i32 @mul301(i32 %a) {
+; RV32I-LABEL: mul301:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 301
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul301:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 301
+  ret i32 %c
+}
+
+define i32 @mul405(i32 %a) {
+; RV32I-LABEL: mul405:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 405
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul405:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a0, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 405
+  ret i32 %c
+}
+
+define i32 @mul585(i32 %a) {
+; RV32I-LABEL: mul585:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 585
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul585:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 585
+  ret i32 %c
+}
+
+define i32 @mul23(i32 %a) {
+; RV32I-LABEL: mul23:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 23
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul23:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a1, a1, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 23
+  ret i32 %c
+}
+
+define i32 @mul225(i32 %a) {
+; RV32I-LABEL: mul225:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 225
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul225:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 225
+  ret i32 %c
+}
+
+define i32 @mul69(i32 %a) {
+; RV32I-LABEL: mul69:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 69
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul69:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a0, a1
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 69
+  ret i32 %c
+}
+
+define i32 @mul77(i32 %a) {
+; RV32I-LABEL: mul77:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 77
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul77:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a1, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 77
+  ret i32 %c
+}
+
+define i32 @mul123(i32 %a) {
+; RV32I-LABEL: mul123:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 123
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul123:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh1add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 123
+  ret i32 %c
+}
+
+define i32 @mul107(i32 %a) {
+; RV32I-LABEL: mul107:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 107
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul107:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 107
+  ret i32 %c
+}
+
+define i32 @mul361(i32 %a) {
+; RV32I-LABEL: mul361:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 361
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul361:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a1
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 361
+  ret i32 %c
+}
+
+define i32 @mul205(i32 %a) {
+; RV32I-LABEL: mul205:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 205
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul205:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 205
+  ret i32 %c
+}
+
+define i32 @mul185(i32 %a) {
+; RV32I-LABEL: mul185:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 185
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul185:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 185
+  ret i32 %c
+}
+
+define i32 @mul55(i32 %a) {
+; RV32I-LABEL: mul55:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 55
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul55:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a1
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 55
+  ret i32 %c
+}
+
+define i32 @mul99(i32 %a) {
+; RV32I-LABEL: mul99:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 99
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul99:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 99
+  ret i32 %c
+}
+
+define i32 @mul85(i32 %a) {
+; RV32I-LABEL: mul85:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 85
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul85:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a1, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 85
+  ret i32 %c
+}
+
+define i32 @mul155(i32 %a) {
+; RV32I-LABEL: mul155:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 155
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul155:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh1add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 155
+  ret i32 %c
+}
+
+define i32 @mul333(i32 %a) {
+; RV32I-LABEL: mul333:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 333
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul333:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 333
+  ret i32 %c
+}
+
+define i32 @mul137(i32 %a) {
+; RV32I-LABEL: mul137:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 137
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul137:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a0, a1
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 137
+  ret i32 %c
+}
+
+define i32 @mul135(i32 %a) {
+; RV32I-LABEL: mul135:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 135
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul135:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a0, a0, a0
+; RV32ZBA-NEXT:    sh2add a0, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 135
+  ret i32 %c
+}
+
+define i32 @mul93(i32 %a) {
+; RV32I-LABEL: mul93:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 93
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul93:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 93
+  ret i32 %c
+}
+
+define i32 @mul109(i32 %a) {
+; RV32I-LABEL: mul109:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 109
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul109:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a1
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 109
+  ret i32 %c
+}
+
+define i32 @mul75(i32 %a) {
+; RV32I-LABEL: mul75:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 75
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul75:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a1, a1, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 75
+  ret i32 %c
+}
+
+define i32 @mul649(i32 %a) {
+; RV32I-LABEL: mul649:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 649
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul649:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a1
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 649
+  ret i32 %c
+}
+
+define i32 @mul113(i32 %a) {
+; RV32I-LABEL: mul113:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 113
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul113:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 113
+  ret i32 %c
+}
+
+define i32 @mul149(i32 %a) {
+; RV32I-LABEL: mul149:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 149
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul149:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a1, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 149
+  ret i32 %c
+}
+
+define i32 @mul87(i32 %a) {
+; RV32I-LABEL: mul87:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 87
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul87:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    sh1add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 87
+  ret i32 %c
+}
+
+define i32 @mul369(i32 %a) {
+; RV32I-LABEL: mul369:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 369
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul369:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 369
+  ret i32 %c
+}
+
+define i32 @mul173(i32 %a) {
+; RV32I-LABEL: mul173:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 173
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul173:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 173
+  ret i32 %c
+}
+
+define i32 @mul97(i32 %a) {
+; RV32I-LABEL: mul97:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 97
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul97:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 97
+  ret i32 %c
+}
+
+define i32 @mul95(i32 %a) {
+; RV32I-LABEL: mul95:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 95
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul95:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 95
+  ret i32 %c
+}
+
+define i32 @mul161(i32 %a) {
+; RV32I-LABEL: mul161:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 161
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul161:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 161
+  ret i32 %c
+}
+
+define i32 @mul121(i32 %a) {
+; RV32I-LABEL: mul121:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 121
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul121:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a1, a1, a1
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 121
+  ret i32 %c
+}
+
+define i32 @mul29(i32 %a) {
+; RV32I-LABEL: mul29:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 29
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul29:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a1, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 29
+  ret i32 %c
+}
+
+define i32 @mul125(i32 %a) {
+; RV32I-LABEL: mul125:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 125
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul125:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 125
+  ret i32 %c
+}
+
+define i32 @mul201(i32 %a) {
+; RV32I-LABEL: mul201:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 201
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul201:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a1, a1, a1
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 201
+  ret i32 %c
+}
+
+define i32 @mul47(i32 %a) {
+; RV32I-LABEL: mul47:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 47
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul47:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    sh1add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 47
+  ret i32 %c
+}
+
+define i32 @mul657(i32 %a) {
+; RV32I-LABEL: mul657:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 657
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul657:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 657
+  ret i32 %c
+}
+
+define i32 @mul49(i32 %a) {
+; RV32I-LABEL: mul49:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 49
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul49:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 49
+  ret i32 %c
+}
+
+define i32 @mul325(i32 %a) {
+; RV32I-LABEL: mul325:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 325
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul325:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a1
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 325
+  ret i32 %c
+}
+
+define i32 @mul43(i32 %a) {
+; RV32I-LABEL: mul43:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 43
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul43:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a1, a1, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 43
+  ret i32 %c
+}
+
+define i32 @mul39(i32 %a) {
+; RV32I-LABEL: mul39:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 39
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul39:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a1, a1, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 39
+  ret i32 %c
+}
+
+define i32 @mul59(i32 %a) {
+; RV32I-LABEL: mul59:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 59
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul59:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 59
+  ret i32 %c
+}
+
+define i32 @mul165(i32 %a) {
+; RV32I-LABEL: mul165:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 165
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul165:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a1, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 165
+  ret i32 %c
+}
+
+define i32 @mul145(i32 %a) {
+; RV32I-LABEL: mul145:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 145
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul145:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    sh3add a0, a0, a1
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 145
+  ret i32 %c
+}
+
+define i32 @mul219(i32 %a) {
+; RV32I-LABEL: mul219:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 219
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul219:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh1add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 219
+  ret i32 %c
+}
+
+define i32 @mul729(i32 %a) {
+; RV32I-LABEL: mul729:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 729
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul729:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 729
+  ret i32 %c
+}
+
+define i32 @mul105(i32 %a) {
+; RV32I-LABEL: mul105:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 105
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul105:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a1, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 105
+  ret i32 %c
+}
+
+define i32 @mul365(i32 %a) {
+; RV32I-LABEL: mul365:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 365
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul365:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    sh2add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 365
+  ret i32 %c
+}
+
+define i32 @mul243(i32 %a) {
+; RV32I-LABEL: mul243:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 243
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul243:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a0, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 243
+  ret i32 %c
+}
+
+define i32 @mul101(i32 %a) {
+; RV32I-LABEL: mul101:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 101
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul101:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a1, a1, a1
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 101
+  ret i32 %c
+}
+
+define i32 @mul297(i32 %a) {
+; RV32I-LABEL: mul297:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 297
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul297:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a1, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 297
+  ret i32 %c
+}
+
+define i32 @mul171(i32 %a) {
+; RV32I-LABEL: mul171:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 171
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul171:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh3add a1, a0, a0
+; RV32ZBA-NEXT:    sh1add a0, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a0, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 171
+  ret i32 %c
+}
+
+define i32 @mul169(i32 %a) {
+; RV32I-LABEL: mul169:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 169
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul169:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh2add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a1, a1, a0
+; RV32ZBA-NEXT:    sh3add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 169
+  ret i32 %c
+}
+
+define i32 @mul61(i32 %a) {
+; RV32I-LABEL: mul61:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    li a1, 61
+; RV32I-NEXT:    mul a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBA-LABEL: mul61:
+; RV32ZBA:       # %bb.0:
+; RV32ZBA-NEXT:    sh1add a1, a0, a0
+; RV32ZBA-NEXT:    sh2add a1, a1, a1
+; RV32ZBA-NEXT:    sh2add a0, a1, a0
+; RV32ZBA-NEXT:    ret
+  %c = mul i32 %a, 61
+  ret i32 %c
+}
+
 define i32 @mul4098(i32 %a) {
 ; RV32I-LABEL: mul4098:
 ; RV32I:       # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 9b472523875e792..7a78c012665a6e8 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -983,6 +983,1298 @@ define i64 @mul81(i64 %a) {
   ret i64 %c
 }
 
+define i64 @mul153(i64 %a) {
+; RV64I-LABEL: mul153:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 153
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul153:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a1, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 153
+  ret i64 %c
+}
+
+define i64 @mul305(i64 %a) {
+; RV64I-LABEL: mul305:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 305
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul305:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 305
+  ret i64 %c
+}
+
+define i64 @mul189(i64 %a) {
+; RV64I-LABEL: mul189:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 189
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul189:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 189
+  ret i64 %c
+}
+
+define i64 @mul51(i64 %a) {
+; RV64I-LABEL: mul51:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 51
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul51:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a1, a1, a1
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 51
+  ret i64 %c
+}
+
+define i64 @mul91(i64 %a) {
+; RV64I-LABEL: mul91:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 91
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul91:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a1
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 91
+  ret i64 %c
+}
+
+define i64 @mul157(i64 %a) {
+; RV64I-LABEL: mul157:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 157
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul157:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 157
+  ret i64 %c
+}
+
+define i64 @mul89(i64 %a) {
+; RV64I-LABEL: mul89:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 89
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul89:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a1, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 89
+  ret i64 %c
+}
+
+define i64 @mul117(i64 %a) {
+; RV64I-LABEL: mul117:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 117
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul117:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 117
+  ret i64 %c
+}
+
+define i64 @mul203(i64 %a) {
+; RV64I-LABEL: mul203:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 203
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul203:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 203
+  ret i64 %c
+}
+
+define i64 @mul293(i64 %a) {
+; RV64I-LABEL: mul293:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 293
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul293:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 293
+  ret i64 %c
+}
+
+define i64 @mul147(i64 %a) {
+; RV64I-LABEL: mul147:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 147
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul147:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 147
+  ret i64 %c
+}
+
+define i64 @mul83(i64 %a) {
+; RV64I-LABEL: mul83:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 83
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul83:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 83
+  ret i64 %c
+}
+
+define i64 @mul163(i64 %a) {
+; RV64I-LABEL: mul163:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 163
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul163:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a1
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 163
+  ret i64 %c
+}
+
+define i64 @mul111(i64 %a) {
+; RV64I-LABEL: mul111:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 111
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul111:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    sh1add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 111
+  ret i64 %c
+}
+
+define i64 @mul217(i64 %a) {
+; RV64I-LABEL: mul217:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 217
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul217:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a1
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 217
+  ret i64 %c
+}
+
+define i64 @mul181(i64 %a) {
+; RV64I-LABEL: mul181:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 181
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul181:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a1
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 181
+  ret i64 %c
+}
+
+define i64 @mul53(i64 %a) {
+; RV64I-LABEL: mul53:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 53
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul53:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a1, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 53
+  ret i64 %c
+}
+
+define i64 @mul103(i64 %a) {
+; RV64I-LABEL: mul103:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 103
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul103:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 103
+  ret i64 %c
+}
+
+define i64 @mul57(i64 %a) {
+; RV64I-LABEL: mul57:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 57
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul57:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a1, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 57
+  ret i64 %c
+}
+
+define i64 @mul35(i64 %a) {
+; RV64I-LABEL: mul35:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 35
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul35:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 35
+  ret i64 %c
+}
+
+define i64 @mul329(i64 %a) {
+; RV64I-LABEL: mul329:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 329
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul329:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 329
+  ret i64 %c
+}
+
+define i64 @mul593(i64 %a) {
+; RV64I-LABEL: mul593:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 593
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul593:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 593
+  ret i64 %c
+}
+
+define i64 @mul301(i64 %a) {
+; RV64I-LABEL: mul301:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 301
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul301:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 301
+  ret i64 %c
+}
+
+define i64 @mul405(i64 %a) {
+; RV64I-LABEL: mul405:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 405
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul405:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a0, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 405
+  ret i64 %c
+}
+
+define i64 @mul585(i64 %a) {
+; RV64I-LABEL: mul585:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 585
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul585:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 585
+  ret i64 %c
+}
+
+define i64 @mul23(i64 %a) {
+; RV64I-LABEL: mul23:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 23
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul23:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a1, a1, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 23
+  ret i64 %c
+}
+
+define i64 @mul225(i64 %a) {
+; RV64I-LABEL: mul225:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 225
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul225:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 225
+  ret i64 %c
+}
+
+define i64 @mul69(i64 %a) {
+; RV64I-LABEL: mul69:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 69
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul69:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a0, a1
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 69
+  ret i64 %c
+}
+
+define i64 @mul77(i64 %a) {
+; RV64I-LABEL: mul77:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 77
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul77:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a1, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 77
+  ret i64 %c
+}
+
+define i64 @mul123(i64 %a) {
+; RV64I-LABEL: mul123:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 123
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul123:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh1add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 123
+  ret i64 %c
+}
+
+define i64 @mul107(i64 %a) {
+; RV64I-LABEL: mul107:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 107
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul107:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 107
+  ret i64 %c
+}
+
+define i64 @mul361(i64 %a) {
+; RV64I-LABEL: mul361:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 361
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul361:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a1
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 361
+  ret i64 %c
+}
+
+define i64 @mul205(i64 %a) {
+; RV64I-LABEL: mul205:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 205
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul205:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 205
+  ret i64 %c
+}
+
+define i64 @mul185(i64 %a) {
+; RV64I-LABEL: mul185:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 185
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul185:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 185
+  ret i64 %c
+}
+
+define i64 @mul55(i64 %a) {
+; RV64I-LABEL: mul55:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 55
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul55:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a1
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 55
+  ret i64 %c
+}
+
+define i64 @mul99(i64 %a) {
+; RV64I-LABEL: mul99:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 99
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul99:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 99
+  ret i64 %c
+}
+
+define i64 @mul85(i64 %a) {
+; RV64I-LABEL: mul85:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 85
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul85:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a1, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 85
+  ret i64 %c
+}
+
+define i64 @mul155(i64 %a) {
+; RV64I-LABEL: mul155:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 155
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul155:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh1add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 155
+  ret i64 %c
+}
+
+define i64 @mul333(i64 %a) {
+; RV64I-LABEL: mul333:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 333
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul333:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 333
+  ret i64 %c
+}
+
+define i64 @mul137(i64 %a) {
+; RV64I-LABEL: mul137:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 137
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul137:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a0, a1
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 137
+  ret i64 %c
+}
+
+define i64 @mul135(i64 %a) {
+; RV64I-LABEL: mul135:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 135
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul135:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a0, a0, a0
+; RV64ZBA-NEXT:    sh2add a0, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 135
+  ret i64 %c
+}
+
+define i64 @mul93(i64 %a) {
+; RV64I-LABEL: mul93:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 93
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul93:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 93
+  ret i64 %c
+}
+
+define i64 @mul109(i64 %a) {
+; RV64I-LABEL: mul109:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 109
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul109:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a1
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 109
+  ret i64 %c
+}
+
+define i64 @mul75(i64 %a) {
+; RV64I-LABEL: mul75:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 75
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul75:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a1, a1, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 75
+  ret i64 %c
+}
+
+define i64 @mul649(i64 %a) {
+; RV64I-LABEL: mul649:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 649
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul649:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a1
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 649
+  ret i64 %c
+}
+
+define i64 @mul113(i64 %a) {
+; RV64I-LABEL: mul113:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 113
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul113:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 113
+  ret i64 %c
+}
+
+define i64 @mul149(i64 %a) {
+; RV64I-LABEL: mul149:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 149
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul149:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a1, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 149
+  ret i64 %c
+}
+
+define i64 @mul87(i64 %a) {
+; RV64I-LABEL: mul87:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 87
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul87:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    sh1add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 87
+  ret i64 %c
+}
+
+define i64 @mul369(i64 %a) {
+; RV64I-LABEL: mul369:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 369
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul369:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 369
+  ret i64 %c
+}
+
+define i64 @mul173(i64 %a) {
+; RV64I-LABEL: mul173:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 173
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul173:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 173
+  ret i64 %c
+}
+
+define i64 @mul97(i64 %a) {
+; RV64I-LABEL: mul97:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 97
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul97:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 97
+  ret i64 %c
+}
+
+define i64 @mul95(i64 %a) {
+; RV64I-LABEL: mul95:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 95
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul95:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 95
+  ret i64 %c
+}
+
+define i64 @mul161(i64 %a) {
+; RV64I-LABEL: mul161:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 161
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul161:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 161
+  ret i64 %c
+}
+
+define i64 @mul121(i64 %a) {
+; RV64I-LABEL: mul121:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 121
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul121:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a1, a1, a1
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 121
+  ret i64 %c
+}
+
+define i64 @mul29(i64 %a) {
+; RV64I-LABEL: mul29:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 29
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul29:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a1, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 29
+  ret i64 %c
+}
+
+define i64 @mul125(i64 %a) {
+; RV64I-LABEL: mul125:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 125
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul125:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 125
+  ret i64 %c
+}
+
+define i64 @mul201(i64 %a) {
+; RV64I-LABEL: mul201:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 201
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul201:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a1, a1, a1
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 201
+  ret i64 %c
+}
+
+define i64 @mul47(i64 %a) {
+; RV64I-LABEL: mul47:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 47
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul47:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    sh1add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 47
+  ret i64 %c
+}
+
+define i64 @mul657(i64 %a) {
+; RV64I-LABEL: mul657:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 657
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul657:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 657
+  ret i64 %c
+}
+
+define i64 @mul49(i64 %a) {
+; RV64I-LABEL: mul49:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 49
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul49:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 49
+  ret i64 %c
+}
+
+define i64 @mul325(i64 %a) {
+; RV64I-LABEL: mul325:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 325
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul325:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a1
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 325
+  ret i64 %c
+}
+
+define i64 @mul43(i64 %a) {
+; RV64I-LABEL: mul43:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 43
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul43:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a1, a1, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 43
+  ret i64 %c
+}
+
+define i64 @mul39(i64 %a) {
+; RV64I-LABEL: mul39:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 39
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul39:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a1, a1, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 39
+  ret i64 %c
+}
+
+define i64 @mul59(i64 %a) {
+; RV64I-LABEL: mul59:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 59
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul59:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 59
+  ret i64 %c
+}
+
+define i64 @mul165(i64 %a) {
+; RV64I-LABEL: mul165:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 165
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul165:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a1, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 165
+  ret i64 %c
+}
+
+define i64 @mul145(i64 %a) {
+; RV64I-LABEL: mul145:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 145
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul145:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    sh3add a0, a0, a1
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 145
+  ret i64 %c
+}
+
+define i64 @mul219(i64 %a) {
+; RV64I-LABEL: mul219:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 219
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul219:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh1add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 219
+  ret i64 %c
+}
+
+define i64 @mul729(i64 %a) {
+; RV64I-LABEL: mul729:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 729
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul729:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 729
+  ret i64 %c
+}
+
+define i64 @mul105(i64 %a) {
+; RV64I-LABEL: mul105:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 105
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul105:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a1, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 105
+  ret i64 %c
+}
+
+define i64 @mul365(i64 %a) {
+; RV64I-LABEL: mul365:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 365
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul365:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    sh2add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 365
+  ret i64 %c
+}
+
+define i64 @mul243(i64 %a) {
+; RV64I-LABEL: mul243:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 243
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul243:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a0, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 243
+  ret i64 %c
+}
+
+define i64 @mul101(i64 %a) {
+; RV64I-LABEL: mul101:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 101
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul101:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a1, a1, a1
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 101
+  ret i64 %c
+}
+
+define i64 @mul297(i64 %a) {
+; RV64I-LABEL: mul297:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 297
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul297:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a1, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 297
+  ret i64 %c
+}
+
+define i64 @mul171(i64 %a) {
+; RV64I-LABEL: mul171:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 171
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul171:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh3add a1, a0, a0
+; RV64ZBA-NEXT:    sh1add a0, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a0, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 171
+  ret i64 %c
+}
+
+define i64 @mul169(i64 %a) {
+; RV64I-LABEL: mul169:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 169
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul169:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh2add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a1, a1, a0
+; RV64ZBA-NEXT:    sh3add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 169
+  ret i64 %c
+}
+
+define i64 @mul61(i64 %a) {
+; RV64I-LABEL: mul61:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a1, 61
+; RV64I-NEXT:    mul a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBA-LABEL: mul61:
+; RV64ZBA:       # %bb.0:
+; RV64ZBA-NEXT:    sh1add a1, a0, a0
+; RV64ZBA-NEXT:    sh2add a1, a1, a1
+; RV64ZBA-NEXT:    sh2add a0, a1, a0
+; RV64ZBA-NEXT:    ret
+  %c = mul i64 %a, 61
+  ret i64 %c
+}
+
 define i64 @mul4098(i64 %a) {
 ; RV64I-LABEL: mul4098:
 ; RV64I:       # %bb.0:



More information about the llvm-commits mailing list