[llvm] [RISCV] Strength reduce mul by 2^N - 2^M (PR #88983)
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Thu May 9 14:38:33 PDT 2024
https://github.com/preames updated https://github.com/llvm/llvm-project/pull/88983
>From ed2a96ec0ed14f3ef95f0451eb890185c0da56c3 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Tue, 16 Apr 2024 11:53:50 -0700
Subject: [PATCH 1/2] [RISCV] Strength reduce mul by 2^N - 2^M
This is a three instruction expansion, and does not depend on zba,
so most of the test changes are in base RV32/64I configurations.
With zba, this gets immediates such as 14, 28, 30, 56, 60, 62..
which aren't covered by our other expansions.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 63 ++++---
llvm/test/CodeGen/RISCV/mul.ll | 164 +++++++++---------
llvm/test/CodeGen/RISCV/rv32xtheadba.ll | 20 ++-
llvm/test/CodeGen/RISCV/rv32zba.ll | 20 ++-
.../CodeGen/RISCV/rv64-legal-i32/rv64zba.ll | 34 ++--
llvm/test/CodeGen/RISCV/rv64xtheadba.ll | 20 ++-
llvm/test/CodeGen/RISCV/rv64zba.ll | 50 +++---
.../CodeGen/RISCV/rvv/calling-conv-fastcc.ll | 98 +++++------
.../CodeGen/RISCV/rvv/extract-subvector.ll | 12 +-
.../CodeGen/RISCV/rvv/mscatter-combine.ll | 8 +-
llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll | 18 +-
llvm/test/CodeGen/RISCV/rvv/stepvector.ll | 13 +-
.../CodeGen/RISCV/srem-seteq-illegal-types.ll | 79 +++++----
llvm/test/CodeGen/RISCV/urem-vector-lkk.ll | 46 ++---
14 files changed, 341 insertions(+), 304 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 7b4bec2f65b74..6a2ca5699c8a4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -13408,8 +13408,8 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
if (VT != Subtarget.getXLenVT())
return SDValue();
- if (!Subtarget.hasStdExtZba() && !Subtarget.hasVendorXTHeadBa())
- return SDValue();
+ const bool HasShlAdd =
+ Subtarget.hasStdExtZba() || Subtarget.hasVendorXTHeadBa();
ConstantSDNode *CNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (!CNode)
@@ -13418,14 +13418,15 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
// 3/5/9 * 2^N -> shXadd (sll X, C), (sll X, C)
// Matched in tablegen, avoid perturbing patterns.
- for (uint64_t Divisor : {3, 5, 9})
- if (MulAmt % Divisor == 0 && isPowerOf2_64(MulAmt / Divisor))
- return SDValue();
+ if (HasShlAdd)
+ for (uint64_t Divisor : {3, 5, 9})
+ if (MulAmt % Divisor == 0 && isPowerOf2_64(MulAmt / Divisor))
+ return SDValue();
// If this is a power 2 + 2/4/8, we can use a shift followed by a single
// shXadd. First check if this a sum of two power of 2s because that's
// easy. Then count how many zeros are up to the first bit.
- if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
+ if (HasShlAdd && isPowerOf2_64(MulAmt & (MulAmt - 1))) {
unsigned ScaleShift = llvm::countr_zero(MulAmt);
if (ScaleShift >= 1 && ScaleShift < 4) {
unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
@@ -13440,26 +13441,27 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
// 2^(1,2,3) * 3,5,9 + 1 -> (shXadd (shYadd x, x), x)
// Matched in tablegen, avoid perturbing patterns.
- switch (MulAmt) {
- case 11:
- case 13:
- case 19:
- case 21:
- case 25:
- case 27:
- case 29:
- case 37:
- case 41:
- case 45:
- case 73:
- case 91:
- return SDValue();
- default:
- break;
- }
+ if (HasShlAdd)
+ switch (MulAmt) {
+ case 11:
+ case 13:
+ case 19:
+ case 21:
+ case 25:
+ case 27:
+ case 29:
+ case 37:
+ case 41:
+ case 45:
+ case 73:
+ case 91:
+ return SDValue();
+ default:
+ break;
+ }
// 2^n + 2/4/8 + 1 -> (add (shl X, C1), (shXadd X, X))
- if (MulAmt > 2 && isPowerOf2_64((MulAmt - 1) & (MulAmt - 2))) {
+ if (HasShlAdd && MulAmt > 2 && isPowerOf2_64((MulAmt - 1) & (MulAmt - 2))) {
unsigned ScaleShift = llvm::countr_zero(MulAmt - 1);
if (ScaleShift >= 1 && ScaleShift < 4) {
unsigned ShiftAmt = Log2_64(((MulAmt - 1) & (MulAmt - 2)));
@@ -13474,6 +13476,19 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
}
}
+ // 2^N - 2^M -> (sub (shl X, C1), (shl X, C2))
+ uint64_t MulAmtLowBit = MulAmt & (-MulAmt);
+ if (isPowerOf2_64(MulAmt + MulAmtLowBit)) {
+ uint64_t ShiftAmt1 = MulAmt + MulAmtLowBit;
+ SDLoc DL(N);
+ SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
+ DAG.getConstant(Log2_64(ShiftAmt1), DL, VT));
+ SDValue Shift2 =
+ DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
+ DAG.getConstant(Log2_64(MulAmtLowBit), DL, VT));
+ return DAG.getNode(ISD::SUB, DL, VT, Shift1, Shift2);
+ }
+
return SDValue();
}
diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index 364e8c7b38dac..395d6024e9203 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -473,24 +473,23 @@ define i32 @muli32_p14(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p14:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 14
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 1
+; RV32IM-NEXT: slli a0, a0, 4
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p14:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 14
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 1
+; RV64I-NEXT: slli a0, a0, 4
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p14:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 14
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 1
+; RV64IM-NEXT: slli a0, a0, 4
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 14
ret i32 %1
@@ -504,24 +503,23 @@ define i32 @muli32_p28(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p28:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 28
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 2
+; RV32IM-NEXT: slli a0, a0, 5
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p28:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 28
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 2
+; RV64I-NEXT: slli a0, a0, 5
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p28:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 28
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 2
+; RV64IM-NEXT: slli a0, a0, 5
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 28
ret i32 %1
@@ -535,24 +533,23 @@ define i32 @muli32_p30(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p30:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 30
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 1
+; RV32IM-NEXT: slli a0, a0, 5
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p30:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 30
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 1
+; RV64I-NEXT: slli a0, a0, 5
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p30:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 30
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 1
+; RV64IM-NEXT: slli a0, a0, 5
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 30
ret i32 %1
@@ -566,24 +563,23 @@ define i32 @muli32_p56(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p56:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 56
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 3
+; RV32IM-NEXT: slli a0, a0, 6
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p56:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 56
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 3
+; RV64I-NEXT: slli a0, a0, 6
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p56:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 56
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 3
+; RV64IM-NEXT: slli a0, a0, 6
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 56
ret i32 %1
@@ -597,24 +593,23 @@ define i32 @muli32_p60(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p60:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 60
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 2
+; RV32IM-NEXT: slli a0, a0, 6
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p60:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 60
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 2
+; RV64I-NEXT: slli a0, a0, 6
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p60:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 60
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 2
+; RV64IM-NEXT: slli a0, a0, 6
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 60
ret i32 %1
@@ -628,24 +623,23 @@ define i32 @muli32_p62(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p62:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 62
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 1
+; RV32IM-NEXT: slli a0, a0, 6
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p62:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 62
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 1
+; RV64I-NEXT: slli a0, a0, 6
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p62:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 62
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 1
+; RV64IM-NEXT: slli a0, a0, 6
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 62
ret i32 %1
@@ -943,24 +937,23 @@ define i32 @muli32_p384(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p384:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 384
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 7
+; RV32IM-NEXT: slli a0, a0, 9
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p384:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 384
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 7
+; RV64I-NEXT: slli a0, a0, 9
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p384:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 384
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 7
+; RV64IM-NEXT: slli a0, a0, 9
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 384
ret i32 %1
@@ -974,24 +967,23 @@ define i32 @muli32_p12288(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p12288:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: lui a1, 3
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 12
+; RV32IM-NEXT: slli a0, a0, 14
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p12288:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lui a1, 3
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 12
+; RV64I-NEXT: slli a0, a0, 14
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p12288:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: lui a1, 3
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 12
+; RV64IM-NEXT: slli a0, a0, 14
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 12288
ret i32 %1
@@ -1191,12 +1183,16 @@ define i64 @muli64_p3840(i64 %a) nounwind {
;
; RV32IM-LABEL: muli64_p3840:
; RV32IM: # %bb.0:
+; RV32IM-NEXT: slli a2, a1, 8
+; RV32IM-NEXT: slli a1, a1, 12
+; RV32IM-NEXT: sub a1, a1, a2
; RV32IM-NEXT: li a2, 15
; RV32IM-NEXT: slli a2, a2, 8
-; RV32IM-NEXT: mul a1, a1, a2
-; RV32IM-NEXT: mulhu a3, a0, a2
-; RV32IM-NEXT: add a1, a3, a1
-; RV32IM-NEXT: mul a0, a0, a2
+; RV32IM-NEXT: mulhu a2, a0, a2
+; RV32IM-NEXT: add a1, a2, a1
+; RV32IM-NEXT: slli a2, a0, 8
+; RV32IM-NEXT: slli a0, a0, 12
+; RV32IM-NEXT: sub a0, a0, a2
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli64_p3840:
diff --git a/llvm/test/CodeGen/RISCV/rv32xtheadba.ll b/llvm/test/CodeGen/RISCV/rv32xtheadba.ll
index 3bf7704dd1836..332e49771bedf 100644
--- a/llvm/test/CodeGen/RISCV/rv32xtheadba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32xtheadba.ll
@@ -97,8 +97,9 @@ define i64 @th_addsl_2_extra_sext(i32 %x, i32 %y, i32 %z) {
define i32 @addmul6(i32 %a, i32 %b) {
; RV32I-LABEL: addmul6:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a2, 6
-; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 1
+; RV32I-NEXT: slli a0, a0, 3
+; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: ret
;
@@ -133,8 +134,9 @@ define i32 @addmul10(i32 %a, i32 %b) {
define i32 @addmul12(i32 %a, i32 %b) {
; RV32I-LABEL: addmul12:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a2, 12
-; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 2
+; RV32I-NEXT: slli a0, a0, 4
+; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: ret
;
@@ -187,8 +189,9 @@ define i32 @addmul20(i32 %a, i32 %b) {
define i32 @addmul24(i32 %a, i32 %b) {
; RV32I-LABEL: addmul24:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a2, 24
-; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 3
+; RV32I-NEXT: slli a0, a0, 5
+; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: ret
;
@@ -259,8 +262,9 @@ define i32 @addmul72(i32 %a, i32 %b) {
define i32 @mul96(i32 %a) {
; RV32I-LABEL: mul96:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a1, 96
-; RV32I-NEXT: mul a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 5
+; RV32I-NEXT: slli a0, a0, 7
+; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV32XTHEADBA-LABEL: mul96:
diff --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll
index cc632a09c8054..9c720223dc06e 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -63,8 +63,9 @@ define i64 @sh3add(i64 %0, ptr %1) {
define i32 @addmul6(i32 %a, i32 %b) {
; RV32I-LABEL: addmul6:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a2, 6
-; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 1
+; RV32I-NEXT: slli a0, a0, 3
+; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: ret
;
@@ -99,8 +100,9 @@ define i32 @addmul10(i32 %a, i32 %b) {
define i32 @addmul12(i32 %a, i32 %b) {
; RV32I-LABEL: addmul12:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a2, 12
-; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 2
+; RV32I-NEXT: slli a0, a0, 4
+; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: ret
;
@@ -153,8 +155,9 @@ define i32 @addmul20(i32 %a, i32 %b) {
define i32 @addmul24(i32 %a, i32 %b) {
; RV32I-LABEL: addmul24:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a2, 24
-; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 3
+; RV32I-NEXT: slli a0, a0, 5
+; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: ret
;
@@ -225,8 +228,9 @@ define i32 @addmul72(i32 %a, i32 %b) {
define i32 @mul96(i32 %a) {
; RV32I-LABEL: mul96:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a1, 96
-; RV32I-NEXT: mul a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 5
+; RV32I-NEXT: slli a0, a0, 7
+; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV32ZBA-LABEL: mul96:
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll
index ee9b73ca82f21..5c1001a063243 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll
@@ -369,8 +369,9 @@ define i64 @sh2add_extra_sext(i32 %x, i32 %y, i32 %z) {
define i64 @addmul6(i64 %a, i64 %b) {
; RV64I-LABEL: addmul6:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 6
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 1
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -405,8 +406,9 @@ define i64 @addmul10(i64 %a, i64 %b) {
define i64 @addmul12(i64 %a, i64 %b) {
; RV64I-LABEL: addmul12:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 12
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 2
+; RV64I-NEXT: slli a0, a0, 4
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -459,8 +461,9 @@ define i64 @addmul20(i64 %a, i64 %b) {
define i64 @addmul24(i64 %a, i64 %b) {
; RV64I-LABEL: addmul24:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 24
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 3
+; RV64I-NEXT: slli a0, a0, 5
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -531,8 +534,9 @@ define i64 @addmul72(i64 %a, i64 %b) {
define i64 @mul96(i64 %a) {
; RV64I-LABEL: mul96:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 96
-; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 5
+; RV64I-NEXT: slli a0, a0, 7
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: mul96:
@@ -579,10 +583,10 @@ define i64 @mul288(i64 %a) {
define i64 @zext_mul96(i32 signext %a) {
; RV64I-LABEL: zext_mul96:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 3
-; RV64I-NEXT: slli a1, a1, 37
; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: mulhu a0, a0, a1
+; RV64I-NEXT: srli a1, a0, 27
+; RV64I-NEXT: srli a0, a0, 25
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: zext_mul96:
@@ -638,11 +642,9 @@ define i64 @zext_mul288(i32 signext %a) {
define i64 @zext_mul12884901888(i32 signext %a) {
; RV64I-LABEL: zext_mul12884901888:
; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: li a1, 3
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: slli a0, a0, 34
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: zext_mul12884901888:
diff --git a/llvm/test/CodeGen/RISCV/rv64xtheadba.ll b/llvm/test/CodeGen/RISCV/rv64xtheadba.ll
index 1450c86c76d05..939211573cac7 100644
--- a/llvm/test/CodeGen/RISCV/rv64xtheadba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64xtheadba.ll
@@ -93,8 +93,9 @@ define i64 @th_addsl_2_extra_sext(i32 %x, i32 %y, i32 %z) {
define i64 @addmul6(i64 %a, i64 %b) {
; RV64I-LABEL: addmul6:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 6
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 1
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -129,8 +130,9 @@ define i64 @addmul10(i64 %a, i64 %b) {
define i64 @addmul12(i64 %a, i64 %b) {
; RV64I-LABEL: addmul12:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 12
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 2
+; RV64I-NEXT: slli a0, a0, 4
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -183,8 +185,9 @@ define i64 @addmul20(i64 %a, i64 %b) {
define i64 @addmul24(i64 %a, i64 %b) {
; RV64I-LABEL: addmul24:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 24
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 3
+; RV64I-NEXT: slli a0, a0, 5
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -255,8 +258,9 @@ define i64 @addmul72(i64 %a, i64 %b) {
define i64 @mul96(i64 %a) {
; RV64I-LABEL: mul96:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 96
-; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 5
+; RV64I-NEXT: slli a0, a0, 7
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul96:
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index b4c80b60e0bad..ba0380755cdab 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -374,8 +374,9 @@ define i64 @sh2add_extra_sext(i32 %x, i32 %y, i32 %z) {
define i64 @addmul6(i64 %a, i64 %b) {
; RV64I-LABEL: addmul6:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 6
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 1
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -392,8 +393,9 @@ define i64 @addmul6(i64 %a, i64 %b) {
define i64 @disjointormul6(i64 %a, i64 %b) {
; RV64I-LABEL: disjointormul6:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 6
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 1
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
@@ -428,8 +430,9 @@ define i64 @addmul10(i64 %a, i64 %b) {
define i64 @addmul12(i64 %a, i64 %b) {
; RV64I-LABEL: addmul12:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 12
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 2
+; RV64I-NEXT: slli a0, a0, 4
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -482,8 +485,9 @@ define i64 @addmul20(i64 %a, i64 %b) {
define i64 @addmul24(i64 %a, i64 %b) {
; RV64I-LABEL: addmul24:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 24
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 3
+; RV64I-NEXT: slli a0, a0, 5
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -554,8 +558,9 @@ define i64 @addmul72(i64 %a, i64 %b) {
define i64 @mul96(i64 %a) {
; RV64I-LABEL: mul96:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 96
-; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 5
+; RV64I-NEXT: slli a0, a0, 7
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: mul96:
@@ -683,10 +688,10 @@ define i64 @mul288(i64 %a) {
define i64 @zext_mul96(i32 signext %a) {
; RV64I-LABEL: zext_mul96:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 3
-; RV64I-NEXT: slli a1, a1, 37
; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: mulhu a0, a0, a1
+; RV64I-NEXT: srli a1, a0, 27
+; RV64I-NEXT: srli a0, a0, 25
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: zext_mul96:
@@ -742,11 +747,9 @@ define i64 @zext_mul288(i32 signext %a) {
define i64 @zext_mul12884901888(i32 signext %a) {
; RV64I-LABEL: zext_mul12884901888:
; RV64I: # %bb.0:
-; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: srli a0, a0, 32
-; RV64I-NEXT: li a1, 3
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: slli a0, a0, 34
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: zext_mul12884901888:
@@ -1208,8 +1211,9 @@ define i64 @mul4104(i64 %a) {
define signext i32 @mulw192(i32 signext %a) {
; RV64I-LABEL: mulw192:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 192
-; RV64I-NEXT: mulw a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 6
+; RV64I-NEXT: slli a0, a0, 8
+; RV64I-NEXT: subw a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: mulw192:
@@ -2517,9 +2521,9 @@ define i64 @regression(i32 signext %x, i32 signext %y) {
; RV64I: # %bb.0:
; RV64I-NEXT: subw a0, a0, a1
; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: li a1, 3
-; RV64I-NEXT: slli a1, a1, 35
-; RV64I-NEXT: mulhu a0, a0, a1
+; RV64I-NEXT: srli a1, a0, 29
+; RV64I-NEXT: srli a0, a0, 27
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: regression:
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index 187f758b78020..31e86c1e68098 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -71,12 +71,12 @@ define fastcc <vscale x 64 x i32> @ret_split_nxv64i32(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a3, a2, 3
-; CHECK-NEXT: add a4, a1, a3
-; CHECK-NEXT: vl8re32.v v8, (a4)
-; CHECK-NEXT: slli a4, a2, 4
-; CHECK-NEXT: li a5, 24
-; CHECK-NEXT: mul a2, a2, a5
+; CHECK-NEXT: slli a4, a2, 5
+; CHECK-NEXT: sub a4, a4, a3
; CHECK-NEXT: add a5, a1, a4
+; CHECK-NEXT: vl8re32.v v8, (a5)
+; CHECK-NEXT: add a5, a1, a3
+; CHECK-NEXT: slli a2, a2, 4
; CHECK-NEXT: vl8re32.v v16, (a1)
; CHECK-NEXT: add a1, a1, a2
; CHECK-NEXT: vl8re32.v v24, (a1)
@@ -84,9 +84,9 @@ define fastcc <vscale x 64 x i32> @ret_split_nxv64i32(ptr %x) {
; CHECK-NEXT: vs8r.v v16, (a0)
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vs8r.v v24, (a2)
-; CHECK-NEXT: add a4, a0, a4
-; CHECK-NEXT: vs8r.v v0, (a4)
-; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vs8r.v v0, (a3)
+; CHECK-NEXT: add a0, a0, a4
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: ret
%v = load <vscale x 64 x i32>, ptr %x
@@ -105,75 +105,73 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(ptr %x) {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a3, a2, 3
-; CHECK-NEXT: add a4, a1, a3
-; CHECK-NEXT: vl8re32.v v8, (a4)
-; CHECK-NEXT: csrr a4, vlenb
-; CHECK-NEXT: li a5, 24
-; CHECK-NEXT: mul a4, a4, a5
-; CHECK-NEXT: add a4, sp, a4
-; CHECK-NEXT: addi a4, a4, 16
-; CHECK-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
-; CHECK-NEXT: slli a4, a2, 4
-; CHECK-NEXT: add a5, a1, a4
-; CHECK-NEXT: vl8re32.v v8, (a5)
-; CHECK-NEXT: csrr a5, vlenb
-; CHECK-NEXT: slli a5, a5, 4
-; CHECK-NEXT: add a5, sp, a5
-; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
-; CHECK-NEXT: li a5, 24
-; CHECK-NEXT: mul a5, a2, a5
+; CHECK-NEXT: slli a4, a2, 5
+; CHECK-NEXT: sub a5, a4, a3
; CHECK-NEXT: add a6, a1, a5
; CHECK-NEXT: vl8re32.v v8, (a6)
; CHECK-NEXT: csrr a6, vlenb
-; CHECK-NEXT: slli a6, a6, 3
+; CHECK-NEXT: li a7, 24
+; CHECK-NEXT: mul a6, a6, a7
; CHECK-NEXT: add a6, sp, a6
; CHECK-NEXT: addi a6, a6, 16
; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
-; CHECK-NEXT: slli a6, a2, 5
-; CHECK-NEXT: add a7, a1, a6
-; CHECK-NEXT: vl8re32.v v8, (a7)
-; CHECK-NEXT: addi a7, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a7) # Unknown-size Folded Spill
-; CHECK-NEXT: li a7, 40
-; CHECK-NEXT: mul a7, a2, a7
-; CHECK-NEXT: add t0, a1, a7
-; CHECK-NEXT: li t1, 48
-; CHECK-NEXT: mul t1, a2, t1
-; CHECK-NEXT: add t2, a1, t1
-; CHECK-NEXT: li t3, 56
+; CHECK-NEXT: slli a6, a2, 4
+; CHECK-NEXT: slli a7, a2, 6
+; CHECK-NEXT: sub t0, a7, a6
+; CHECK-NEXT: add t1, a1, t0
+; CHECK-NEXT: vl8re32.v v8, (t1)
+; CHECK-NEXT: csrr t1, vlenb
+; CHECK-NEXT: slli t1, t1, 4
+; CHECK-NEXT: add t1, sp, t1
+; CHECK-NEXT: addi t1, t1, 16
+; CHECK-NEXT: vs8r.v v8, (t1) # Unknown-size Folded Spill
+; CHECK-NEXT: sub a7, a7, a3
+; CHECK-NEXT: add t1, a1, a7
+; CHECK-NEXT: vl8re32.v v8, (t1)
+; CHECK-NEXT: csrr t1, vlenb
+; CHECK-NEXT: slli t1, t1, 3
+; CHECK-NEXT: add t1, sp, t1
+; CHECK-NEXT: addi t1, t1, 16
+; CHECK-NEXT: vs8r.v v8, (t1) # Unknown-size Folded Spill
+; CHECK-NEXT: add t1, a1, a3
+; CHECK-NEXT: vl8re32.v v8, (t1)
+; CHECK-NEXT: addi t1, sp, 16
+; CHECK-NEXT: vs8r.v v8, (t1) # Unknown-size Folded Spill
+; CHECK-NEXT: add t1, a1, a6
+; CHECK-NEXT: add t2, a1, a4
+; CHECK-NEXT: li t3, 40
; CHECK-NEXT: mul a2, a2, t3
; CHECK-NEXT: add t3, a1, a2
; CHECK-NEXT: vl8re32.v v8, (a1)
-; CHECK-NEXT: vl8re32.v v0, (t0)
+; CHECK-NEXT: vl8re32.v v0, (t1)
; CHECK-NEXT: vl8re32.v v16, (t3)
; CHECK-NEXT: vl8re32.v v24, (t2)
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vs8r.v v16, (a2)
-; CHECK-NEXT: add t1, a0, t1
-; CHECK-NEXT: vs8r.v v24, (t1)
-; CHECK-NEXT: add a7, a0, a7
-; CHECK-NEXT: vs8r.v v0, (a7)
+; CHECK-NEXT: add a4, a0, a4
+; CHECK-NEXT: vs8r.v v24, (a4)
; CHECK-NEXT: add a6, a0, a6
+; CHECK-NEXT: vs8r.v v0, (a6)
+; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vs8r.v v8, (a6)
-; CHECK-NEXT: add a5, a0, a5
+; CHECK-NEXT: vs8r.v v8, (a3)
+; CHECK-NEXT: add a7, a0, a7
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vs8r.v v8, (a5)
-; CHECK-NEXT: add a4, a0, a4
+; CHECK-NEXT: vs8r.v v8, (a7)
+; CHECK-NEXT: add t0, a0, t0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vs8r.v v8, (a4)
-; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: vs8r.v v8, (t0)
+; CHECK-NEXT: add a0, a0, a5
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a2, 24
; CHECK-NEXT: mul a1, a1, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
index e15e6452163b1..e81a8a7e2dcb0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
@@ -274,9 +274,9 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_6(<vscale x 32 x i8> %vec) {
; CHECK-LABEL: extract_nxv32i8_nxv2i8_6:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: li a1, 6
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: srli a1, a0, 3
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a0, a0, a1
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: ret
@@ -297,9 +297,9 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_22(<vscale x 32 x i8> %vec) {
; CHECK-LABEL: extract_nxv32i8_nxv2i8_22:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: li a1, 6
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: srli a1, a0, 3
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a0, a0, a1
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
index c26532d355957..1c3b429202adf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
@@ -80,13 +80,15 @@ define void @strided_store_offset_start(i64 %n, ptr %p) {
;
; RV64-LABEL: strided_store_offset_start:
; RV64: # %bb.0:
-; RV64-NEXT: li a2, 56
-; RV64-NEXT: mul a0, a0, a2
+; RV64-NEXT: slli a2, a0, 3
+; RV64-NEXT: slli a0, a0, 6
+; RV64-NEXT: sub a0, a0, a2
; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: addi a0, a0, 36
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
-; RV64-NEXT: vsse64.v v8, (a0), a2
+; RV64-NEXT: li a1, 56
+; RV64-NEXT: vsse64.v v8, (a0), a1
; RV64-NEXT: ret
%step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
%.splatinsert = insertelement <vscale x 1 x i64> poison, i64 %n, i64 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index 897bfdea69f1f..17d9591e3a015 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -3511,17 +3511,17 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul t2, a3, a1
-; CHECK-NEXT: slli t1, a3, 3
+; CHECK-NEXT: slli t0, a3, 3
+; CHECK-NEXT: slli a1, a3, 5
+; CHECK-NEXT: sub t1, a1, t0
; CHECK-NEXT: srli a4, a3, 2
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v7, v0, a4
; CHECK-NEXT: srli a1, a3, 3
; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
-; CHECK-NEXT: add a5, a2, t1
+; CHECK-NEXT: add a5, a2, t0
; CHECK-NEXT: vl8re64.v v8, (a5)
-; CHECK-NEXT: slli t0, a3, 4
+; CHECK-NEXT: slli t3, a3, 4
; CHECK-NEXT: slli a5, a3, 1
; CHECK-NEXT: vslidedown.vx v0, v0, a1
; CHECK-NEXT: mv a7, a6
@@ -3529,9 +3529,9 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a7, a5
; CHECK-NEXT: .LBB171_2:
-; CHECK-NEXT: add t2, a2, t2
-; CHECK-NEXT: add t1, a0, t1
-; CHECK-NEXT: add t0, a2, t0
+; CHECK-NEXT: add t2, a0, t0
+; CHECK-NEXT: add t1, a2, t1
+; CHECK-NEXT: add t0, a2, t3
; CHECK-NEXT: vl8re64.v v16, (a2)
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 4
@@ -3633,7 +3633,7 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v16, v8, v24, v0.t
+; CHECK-NEXT: vmfeq.vv v16, v24, v8, v0.t
; CHECK-NEXT: slli a0, a1, 1
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: add a1, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
index eff8c26d4d061..b26ebeae63011 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
@@ -640,12 +640,13 @@ define <vscale x 16 x i64> @mul_bigimm_stepvector_nxv16i64() {
; RV32-NEXT: mul a1, a0, a1
; RV32-NEXT: sw a1, 0(sp)
; RV32-NEXT: srli a0, a0, 3
-; RV32-NEXT: li a1, 62
-; RV32-NEXT: mul a1, a0, a1
-; RV32-NEXT: lui a2, 92455
-; RV32-NEXT: addi a2, a2, -1368
-; RV32-NEXT: mulhu a0, a0, a2
-; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: lui a1, 92455
+; RV32-NEXT: addi a1, a1, -1368
+; RV32-NEXT: mulhu a1, a0, a1
+; RV32-NEXT: slli a2, a0, 1
+; RV32-NEXT: slli a0, a0, 6
+; RV32-NEXT: sub a0, a0, a2
+; RV32-NEXT: add a0, a1, a0
; RV32-NEXT: sw a0, 4(sp)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 9ecfa50178316..9c5b29311182b 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -148,9 +148,10 @@ define i1 @test_srem_even(i4 %X) nounwind {
; RV32M-NEXT: slli a1, a1, 24
; RV32M-NEXT: srli a1, a1, 31
; RV32M-NEXT: add a1, a2, a1
-; RV32M-NEXT: li a2, 6
-; RV32M-NEXT: mul a1, a1, a2
-; RV32M-NEXT: sub a0, a0, a1
+; RV32M-NEXT: slli a2, a1, 3
+; RV32M-NEXT: slli a1, a1, 1
+; RV32M-NEXT: sub a1, a1, a2
+; RV32M-NEXT: add a0, a0, a1
; RV32M-NEXT: andi a0, a0, 15
; RV32M-NEXT: addi a0, a0, -1
; RV32M-NEXT: seqz a0, a0
@@ -166,9 +167,10 @@ define i1 @test_srem_even(i4 %X) nounwind {
; RV64M-NEXT: slli a1, a1, 56
; RV64M-NEXT: srli a1, a1, 63
; RV64M-NEXT: add a1, a2, a1
-; RV64M-NEXT: li a2, 6
-; RV64M-NEXT: mul a1, a1, a2
-; RV64M-NEXT: subw a0, a0, a1
+; RV64M-NEXT: slli a2, a1, 3
+; RV64M-NEXT: slli a1, a1, 1
+; RV64M-NEXT: subw a1, a1, a2
+; RV64M-NEXT: add a0, a0, a1
; RV64M-NEXT: andi a0, a0, 15
; RV64M-NEXT: addi a0, a0, -1
; RV64M-NEXT: seqz a0, a0
@@ -184,9 +186,10 @@ define i1 @test_srem_even(i4 %X) nounwind {
; RV32MV-NEXT: slli a1, a1, 24
; RV32MV-NEXT: srli a1, a1, 31
; RV32MV-NEXT: add a1, a2, a1
-; RV32MV-NEXT: li a2, 6
-; RV32MV-NEXT: mul a1, a1, a2
-; RV32MV-NEXT: sub a0, a0, a1
+; RV32MV-NEXT: slli a2, a1, 3
+; RV32MV-NEXT: slli a1, a1, 1
+; RV32MV-NEXT: sub a1, a1, a2
+; RV32MV-NEXT: add a0, a0, a1
; RV32MV-NEXT: andi a0, a0, 15
; RV32MV-NEXT: addi a0, a0, -1
; RV32MV-NEXT: seqz a0, a0
@@ -202,9 +205,10 @@ define i1 @test_srem_even(i4 %X) nounwind {
; RV64MV-NEXT: slli a1, a1, 56
; RV64MV-NEXT: srli a1, a1, 63
; RV64MV-NEXT: add a1, a2, a1
-; RV64MV-NEXT: li a2, 6
-; RV64MV-NEXT: mul a1, a1, a2
-; RV64MV-NEXT: subw a0, a0, a1
+; RV64MV-NEXT: slli a2, a1, 3
+; RV64MV-NEXT: slli a1, a1, 1
+; RV64MV-NEXT: subw a1, a1, a2
+; RV64MV-NEXT: add a0, a0, a1
; RV64MV-NEXT: andi a0, a0, 15
; RV64MV-NEXT: addi a0, a0, -1
; RV64MV-NEXT: seqz a0, a0
@@ -725,21 +729,21 @@ define void @test_srem_vec(ptr %X) nounwind {
;
; RV64MV-LABEL: test_srem_vec:
; RV64MV: # %bb.0:
-; RV64MV-NEXT: ld a1, 0(a0)
+; RV64MV-NEXT: lbu a1, 12(a0)
; RV64MV-NEXT: lwu a2, 8(a0)
-; RV64MV-NEXT: srli a3, a1, 2
-; RV64MV-NEXT: lbu a4, 12(a0)
-; RV64MV-NEXT: slli a5, a2, 62
-; RV64MV-NEXT: or a3, a5, a3
-; RV64MV-NEXT: srai a3, a3, 31
-; RV64MV-NEXT: slli a4, a4, 32
+; RV64MV-NEXT: slli a1, a1, 32
+; RV64MV-NEXT: ld a3, 0(a0)
+; RV64MV-NEXT: or a1, a2, a1
+; RV64MV-NEXT: slli a1, a1, 29
+; RV64MV-NEXT: srai a1, a1, 31
+; RV64MV-NEXT: srli a4, a3, 2
+; RV64MV-NEXT: slli a2, a2, 62
; RV64MV-NEXT: or a2, a2, a4
-; RV64MV-NEXT: slli a2, a2, 29
; RV64MV-NEXT: lui a4, %hi(.LCPI3_0)
; RV64MV-NEXT: ld a4, %lo(.LCPI3_0)(a4)
; RV64MV-NEXT: srai a2, a2, 31
-; RV64MV-NEXT: slli a1, a1, 31
-; RV64MV-NEXT: srai a1, a1, 31
+; RV64MV-NEXT: slli a3, a3, 31
+; RV64MV-NEXT: srai a3, a3, 31
; RV64MV-NEXT: mulh a4, a2, a4
; RV64MV-NEXT: srli a5, a4, 63
; RV64MV-NEXT: srai a4, a4, 1
@@ -747,27 +751,28 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV64MV-NEXT: lui a5, %hi(.LCPI3_1)
; RV64MV-NEXT: ld a5, %lo(.LCPI3_1)(a5)
; RV64MV-NEXT: add a2, a2, a4
-; RV64MV-NEXT: slli a4, a4, 2
-; RV64MV-NEXT: add a2, a2, a4
+; RV64MV-NEXT: slli a4, a4, 3
+; RV64MV-NEXT: sub a2, a2, a4
; RV64MV-NEXT: mulh a4, a3, a5
; RV64MV-NEXT: srli a5, a4, 63
-; RV64MV-NEXT: srai a4, a4, 1
; RV64MV-NEXT: add a4, a4, a5
-; RV64MV-NEXT: lui a5, %hi(.LCPI3_2)
-; RV64MV-NEXT: ld a5, %lo(.LCPI3_2)(a5)
+; RV64MV-NEXT: slli a5, a4, 3
+; RV64MV-NEXT: slli a4, a4, 1
+; RV64MV-NEXT: sub a4, a4, a5
; RV64MV-NEXT: add a3, a3, a4
-; RV64MV-NEXT: slli a4, a4, 3
-; RV64MV-NEXT: sub a3, a3, a4
-; RV64MV-NEXT: mulh a4, a1, a5
-; RV64MV-NEXT: srli a5, a4, 63
-; RV64MV-NEXT: add a4, a4, a5
-; RV64MV-NEXT: li a5, 6
-; RV64MV-NEXT: mul a4, a4, a5
-; RV64MV-NEXT: sub a1, a1, a4
+; RV64MV-NEXT: lui a4, %hi(.LCPI3_2)
+; RV64MV-NEXT: ld a4, %lo(.LCPI3_2)(a4)
; RV64MV-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64MV-NEXT: vmv.v.x v8, a1
-; RV64MV-NEXT: vslide1down.vx v8, v8, a3
+; RV64MV-NEXT: vmv.v.x v8, a3
; RV64MV-NEXT: vslide1down.vx v8, v8, a2
+; RV64MV-NEXT: mulh a2, a1, a4
+; RV64MV-NEXT: srli a3, a2, 63
+; RV64MV-NEXT: srai a2, a2, 1
+; RV64MV-NEXT: add a2, a2, a3
+; RV64MV-NEXT: slli a3, a2, 2
+; RV64MV-NEXT: add a1, a1, a2
+; RV64MV-NEXT: add a1, a1, a3
+; RV64MV-NEXT: vslide1down.vx v8, v8, a1
; RV64MV-NEXT: vslidedown.vi v8, v8, 1
; RV64MV-NEXT: li a1, -1
; RV64MV-NEXT: srli a1, a1, 31
diff --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index 540883fdc517a..c057c656e0fb7 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -56,18 +56,19 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
; RV32IM: # %bb.0:
; RV32IM-NEXT: lhu a2, 12(a1)
; RV32IM-NEXT: lhu a3, 8(a1)
-; RV32IM-NEXT: lhu a4, 0(a1)
-; RV32IM-NEXT: lhu a1, 4(a1)
-; RV32IM-NEXT: lui a5, 11038
-; RV32IM-NEXT: addi a5, a5, -1465
-; RV32IM-NEXT: mulhu a5, a4, a5
-; RV32IM-NEXT: li a6, 95
-; RV32IM-NEXT: mul a5, a5, a6
-; RV32IM-NEXT: sub a4, a4, a5
+; RV32IM-NEXT: lhu a4, 4(a1)
+; RV32IM-NEXT: lhu a1, 0(a1)
; RV32IM-NEXT: lui a5, 8456
; RV32IM-NEXT: addi a5, a5, 1058
+; RV32IM-NEXT: mulhu a5, a4, a5
+; RV32IM-NEXT: slli a6, a5, 7
+; RV32IM-NEXT: slli a5, a5, 2
+; RV32IM-NEXT: sub a5, a5, a6
+; RV32IM-NEXT: add a4, a4, a5
+; RV32IM-NEXT: lui a5, 11038
+; RV32IM-NEXT: addi a5, a5, -1465
; RV32IM-NEXT: mulhu a5, a1, a5
-; RV32IM-NEXT: li a6, 124
+; RV32IM-NEXT: li a6, 95
; RV32IM-NEXT: mul a5, a5, a6
; RV32IM-NEXT: sub a1, a1, a5
; RV32IM-NEXT: lui a5, 10700
@@ -84,8 +85,8 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
; RV32IM-NEXT: sub a2, a2, a5
; RV32IM-NEXT: sh a2, 6(a0)
; RV32IM-NEXT: sh a3, 4(a0)
-; RV32IM-NEXT: sh a1, 2(a0)
-; RV32IM-NEXT: sh a4, 0(a0)
+; RV32IM-NEXT: sh a1, 0(a0)
+; RV32IM-NEXT: sh a4, 2(a0)
; RV32IM-NEXT: ret
;
; RV64I-LABEL: fold_urem_vec_1:
@@ -132,22 +133,23 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
;
; RV64IM-LABEL: fold_urem_vec_1:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: lhu a2, 0(a1)
+; RV64IM-NEXT: lhu a2, 8(a1)
; RV64IM-NEXT: lui a3, %hi(.LCPI0_0)
; RV64IM-NEXT: ld a3, %lo(.LCPI0_0)(a3)
; RV64IM-NEXT: lhu a4, 24(a1)
; RV64IM-NEXT: lhu a5, 16(a1)
-; RV64IM-NEXT: lhu a1, 8(a1)
+; RV64IM-NEXT: lhu a1, 0(a1)
; RV64IM-NEXT: mulhu a3, a2, a3
-; RV64IM-NEXT: lui a6, %hi(.LCPI0_1)
-; RV64IM-NEXT: ld a6, %lo(.LCPI0_1)(a6)
-; RV64IM-NEXT: li a7, 95
-; RV64IM-NEXT: mul a3, a3, a7
-; RV64IM-NEXT: subw a2, a2, a3
-; RV64IM-NEXT: mulhu a3, a1, a6
+; RV64IM-NEXT: slli a6, a3, 7
+; RV64IM-NEXT: lui a7, %hi(.LCPI0_1)
+; RV64IM-NEXT: ld a7, %lo(.LCPI0_1)(a7)
+; RV64IM-NEXT: slli a3, a3, 2
+; RV64IM-NEXT: subw a3, a3, a6
+; RV64IM-NEXT: add a2, a2, a3
+; RV64IM-NEXT: mulhu a3, a1, a7
; RV64IM-NEXT: lui a6, %hi(.LCPI0_2)
; RV64IM-NEXT: ld a6, %lo(.LCPI0_2)(a6)
-; RV64IM-NEXT: li a7, 124
+; RV64IM-NEXT: li a7, 95
; RV64IM-NEXT: mul a3, a3, a7
; RV64IM-NEXT: subw a1, a1, a3
; RV64IM-NEXT: mulhu a3, a5, a6
@@ -162,8 +164,8 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
; RV64IM-NEXT: subw a4, a4, a3
; RV64IM-NEXT: sh a4, 6(a0)
; RV64IM-NEXT: sh a5, 4(a0)
-; RV64IM-NEXT: sh a1, 2(a0)
-; RV64IM-NEXT: sh a2, 0(a0)
+; RV64IM-NEXT: sh a1, 0(a0)
+; RV64IM-NEXT: sh a2, 2(a0)
; RV64IM-NEXT: ret
%1 = urem <4 x i16> %x, <i16 95, i16 124, i16 98, i16 1003>
ret <4 x i16> %1
>From 91c1b53ac2bf201f250e9c8920a6941d076f9366 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Thu, 9 May 2024 14:38:16 -0700
Subject: [PATCH 2/2] Test update after merge
---
llvm/test/CodeGen/RISCV/rv64zba.ll | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 3b588966c1efd..fd91fa71f953e 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -2745,8 +2745,9 @@ define i64 @bext_mul12(i32 %1, i32 %2) {
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srlw a0, a0, a1
; RV64I-NEXT: andi a0, a0, 1
-; RV64I-NEXT: li a1, 12
-; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 2
+; RV64I-NEXT: slli a0, a0, 4
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBANOZBB-LABEL: bext_mul12:
More information about the llvm-commits
mailing list