[llvm] 3e55ac9 - [RISCV] Strength reduce mul by 2^N - 2^M (#88983)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 20 07:36:51 PDT 2024
Author: Philip Reames
Date: 2024-06-20T07:36:48-07:00
New Revision: 3e55ac94c7502d69c71bda0948a8353a6622da6e
URL: https://github.com/llvm/llvm-project/commit/3e55ac94c7502d69c71bda0948a8353a6622da6e
DIFF: https://github.com/llvm/llvm-project/commit/3e55ac94c7502d69c71bda0948a8353a6622da6e.diff
LOG: [RISCV] Strength reduce mul by 2^N - 2^M (#88983)
This is a three instruction expansion, and does not depend on zba, so
most of the test changes are in base RV32/64I configurations.
With zba, this gets immediates such as 14, 28, 30, 56, 60, 62.. which
aren't covered by our other expansions.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/mul.ll
llvm/test/CodeGen/RISCV/rv32xtheadba.ll
llvm/test/CodeGen/RISCV/rv32zba.ll
llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll
llvm/test/CodeGen/RISCV/rv64xtheadba.ll
llvm/test/CodeGen/RISCV/rv64zba.ll
llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
llvm/test/CodeGen/RISCV/rvv/stepvector.ll
llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e9c7215504756..57817832c9b42 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -13706,8 +13706,8 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
if (VT != Subtarget.getXLenVT())
return SDValue();
- if (!Subtarget.hasStdExtZba() && !Subtarget.hasVendorXTHeadBa())
- return SDValue();
+ const bool HasShlAdd =
+ Subtarget.hasStdExtZba() || Subtarget.hasVendorXTHeadBa();
ConstantSDNode *CNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (!CNode)
@@ -13720,107 +13720,123 @@ static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
// other target properly freezes X in these cases either.
SDValue X = N->getOperand(0);
- for (uint64_t Divisor : {3, 5, 9}) {
- if (MulAmt % Divisor != 0)
- continue;
- uint64_t MulAmt2 = MulAmt / Divisor;
- // 3/5/9 * 2^N -> shl (shXadd X, X), N
- if (isPowerOf2_64(MulAmt2)) {
- SDLoc DL(N);
- SDValue X = N->getOperand(0);
- // Put the shift first if we can fold a zext into the
- // shift forming a slli.uw.
- if (X.getOpcode() == ISD::AND && isa<ConstantSDNode>(X.getOperand(1)) &&
- X.getConstantOperandVal(1) == UINT64_C(0xffffffff)) {
- SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, X,
- DAG.getConstant(Log2_64(MulAmt2), DL, VT));
- return DAG.getNode(RISCVISD::SHL_ADD, DL, VT, Shl,
- DAG.getConstant(Log2_64(Divisor - 1), DL, VT), Shl);
+ if (HasShlAdd) {
+ for (uint64_t Divisor : {3, 5, 9}) {
+ if (MulAmt % Divisor != 0)
+ continue;
+ uint64_t MulAmt2 = MulAmt / Divisor;
+ // 3/5/9 * 2^N -> shl (shXadd X, X), N
+ if (isPowerOf2_64(MulAmt2)) {
+ SDLoc DL(N);
+ SDValue X = N->getOperand(0);
+ // Put the shift first if we can fold a zext into the
+ // shift forming a slli.uw.
+ if (X.getOpcode() == ISD::AND && isa<ConstantSDNode>(X.getOperand(1)) &&
+ X.getConstantOperandVal(1) == UINT64_C(0xffffffff)) {
+ SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, X,
+ DAG.getConstant(Log2_64(MulAmt2), DL, VT));
+ return DAG.getNode(RISCVISD::SHL_ADD, DL, VT, Shl,
+ DAG.getConstant(Log2_64(Divisor - 1), DL, VT),
+ Shl);
+ }
+ // Otherwise, put rhe shl second so that it can fold with following
+ // instructions (e.g. sext or add).
+ SDValue Mul359 =
+ DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
+ DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
+ return DAG.getNode(ISD::SHL, DL, VT, Mul359,
+ DAG.getConstant(Log2_64(MulAmt2), DL, VT));
+ }
+
+ // 3/5/9 * 3/5/9 -> shXadd (shYadd X, X), (shYadd X, X)
+ if (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9) {
+ SDLoc DL(N);
+ SDValue Mul359 =
+ DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
+ DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
+ return DAG.getNode(RISCVISD::SHL_ADD, DL, VT, Mul359,
+ DAG.getConstant(Log2_64(MulAmt2 - 1), DL, VT),
+ Mul359);
}
- // Otherwise, put rhe shl second so that it can fold with following
- // instructions (e.g. sext or add).
- SDValue Mul359 =
- DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
- DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
- return DAG.getNode(ISD::SHL, DL, VT, Mul359,
- DAG.getConstant(Log2_64(MulAmt2), DL, VT));
}
- // 3/5/9 * 3/5/9 -> shXadd (shYadd X, X), (shYadd X, X)
- if (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9) {
- SDLoc DL(N);
- SDValue Mul359 =
- DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
- DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
- return DAG.getNode(RISCVISD::SHL_ADD, DL, VT, Mul359,
- DAG.getConstant(Log2_64(MulAmt2 - 1), DL, VT),
- Mul359);
- }
- }
-
- // If this is a power 2 + 2/4/8, we can use a shift followed by a single
- // shXadd. First check if this a sum of two power of 2s because that's
- // easy. Then count how many zeros are up to the first bit.
- if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
- unsigned ScaleShift = llvm::countr_zero(MulAmt);
- if (ScaleShift >= 1 && ScaleShift < 4) {
- unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
- SDLoc DL(N);
- SDValue Shift1 =
- DAG.getNode(ISD::SHL, DL, VT, X, DAG.getConstant(ShiftAmt, DL, VT));
- return DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
- DAG.getConstant(ScaleShift, DL, VT), Shift1);
+ // If this is a power 2 + 2/4/8, we can use a shift followed by a single
+ // shXadd. First check if this a sum of two power of 2s because that's
+ // easy. Then count how many zeros are up to the first bit.
+ if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
+ unsigned ScaleShift = llvm::countr_zero(MulAmt);
+ if (ScaleShift >= 1 && ScaleShift < 4) {
+ unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
+ SDLoc DL(N);
+ SDValue Shift1 =
+ DAG.getNode(ISD::SHL, DL, VT, X, DAG.getConstant(ShiftAmt, DL, VT));
+ return DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
+ DAG.getConstant(ScaleShift, DL, VT), Shift1);
+ }
}
- }
- // 2^(1,2,3) * 3,5,9 + 1 -> (shXadd (shYadd x, x), x)
- // This is the two instruction form, there are also three instruction
- // variants we could implement. e.g.
- // (2^(1,2,3) * 3,5,9 + 1) << C2
- // 2^(C1>3) * 3,5,9 +/- 1
- for (uint64_t Divisor : {3, 5, 9}) {
- uint64_t C = MulAmt - 1;
- if (C <= Divisor)
- continue;
- unsigned TZ = llvm::countr_zero(C);
- if ((C >> TZ) == Divisor && (TZ == 1 || TZ == 2 || TZ == 3)) {
- SDLoc DL(N);
- SDValue Mul359 =
- DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
- DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
- return DAG.getNode(RISCVISD::SHL_ADD, DL, VT, Mul359,
- DAG.getConstant(TZ, DL, VT), X);
+ // 2^(1,2,3) * 3,5,9 + 1 -> (shXadd (shYadd x, x), x)
+ // This is the two instruction form, there are also three instruction
+ // variants we could implement. e.g.
+ // (2^(1,2,3) * 3,5,9 + 1) << C2
+ // 2^(C1>3) * 3,5,9 +/- 1
+ for (uint64_t Divisor : {3, 5, 9}) {
+ uint64_t C = MulAmt - 1;
+ if (C <= Divisor)
+ continue;
+ unsigned TZ = llvm::countr_zero(C);
+ if ((C >> TZ) == Divisor && (TZ == 1 || TZ == 2 || TZ == 3)) {
+ SDLoc DL(N);
+ SDValue Mul359 =
+ DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
+ DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
+ return DAG.getNode(RISCVISD::SHL_ADD, DL, VT, Mul359,
+ DAG.getConstant(TZ, DL, VT), X);
+ }
}
- }
- // 2^n + 2/4/8 + 1 -> (add (shl X, C1), (shXadd X, X))
- if (MulAmt > 2 && isPowerOf2_64((MulAmt - 1) & (MulAmt - 2))) {
- unsigned ScaleShift = llvm::countr_zero(MulAmt - 1);
- if (ScaleShift >= 1 && ScaleShift < 4) {
- unsigned ShiftAmt = Log2_64(((MulAmt - 1) & (MulAmt - 2)));
- SDLoc DL(N);
- SDValue Shift1 =
- DAG.getNode(ISD::SHL, DL, VT, X, DAG.getConstant(ShiftAmt, DL, VT));
- return DAG.getNode(ISD::ADD, DL, VT, Shift1,
- DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
- DAG.getConstant(ScaleShift, DL, VT), X));
+ // 2^n + 2/4/8 + 1 -> (add (shl X, C1), (shXadd X, X))
+ if (MulAmt > 2 && isPowerOf2_64((MulAmt - 1) & (MulAmt - 2))) {
+ unsigned ScaleShift = llvm::countr_zero(MulAmt - 1);
+ if (ScaleShift >= 1 && ScaleShift < 4) {
+ unsigned ShiftAmt = Log2_64(((MulAmt - 1) & (MulAmt - 2)));
+ SDLoc DL(N);
+ SDValue Shift1 =
+ DAG.getNode(ISD::SHL, DL, VT, X, DAG.getConstant(ShiftAmt, DL, VT));
+ return DAG.getNode(ISD::ADD, DL, VT, Shift1,
+ DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
+ DAG.getConstant(ScaleShift, DL, VT), X));
+ }
}
- }
- // 2^N - 3/5/9 --> (sub (shl X, C1), (shXadd X, x))
- for (uint64_t Offset : {3, 5, 9}) {
- if (isPowerOf2_64(MulAmt + Offset)) {
- SDLoc DL(N);
- SDValue Shift1 =
- DAG.getNode(ISD::SHL, DL, VT, X,
- DAG.getConstant(Log2_64(MulAmt + Offset), DL, VT));
- SDValue Mul359 = DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
- DAG.getConstant(Log2_64(Offset - 1), DL, VT),
- X);
- return DAG.getNode(ISD::SUB, DL, VT, Shift1, Mul359);
+ // 2^N - 3/5/9 --> (sub (shl X, C1), (shXadd X, x))
+ for (uint64_t Offset : {3, 5, 9}) {
+ if (isPowerOf2_64(MulAmt + Offset)) {
+ SDLoc DL(N);
+ SDValue Shift1 =
+ DAG.getNode(ISD::SHL, DL, VT, X,
+ DAG.getConstant(Log2_64(MulAmt + Offset), DL, VT));
+ SDValue Mul359 =
+ DAG.getNode(RISCVISD::SHL_ADD, DL, VT, X,
+ DAG.getConstant(Log2_64(Offset - 1), DL, VT), X);
+ return DAG.getNode(ISD::SUB, DL, VT, Shift1, Mul359);
+ }
}
}
+ // 2^N - 2^M -> (sub (shl X, C1), (shl X, C2))
+ uint64_t MulAmtLowBit = MulAmt & (-MulAmt);
+ if (isPowerOf2_64(MulAmt + MulAmtLowBit)) {
+ uint64_t ShiftAmt1 = MulAmt + MulAmtLowBit;
+ SDLoc DL(N);
+ SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
+ DAG.getConstant(Log2_64(ShiftAmt1), DL, VT));
+ SDValue Shift2 =
+ DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
+ DAG.getConstant(Log2_64(MulAmtLowBit), DL, VT));
+ return DAG.getNode(ISD::SUB, DL, VT, Shift1, Shift2);
+ }
+
return SDValue();
}
diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index 42ea425f99c0a..14f2777fdd06d 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -473,24 +473,23 @@ define i32 @muli32_p14(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p14:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 14
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 1
+; RV32IM-NEXT: slli a0, a0, 4
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p14:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 14
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 1
+; RV64I-NEXT: slli a0, a0, 4
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p14:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 14
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 1
+; RV64IM-NEXT: slli a0, a0, 4
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 14
ret i32 %1
@@ -504,24 +503,23 @@ define i32 @muli32_p28(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p28:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 28
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 2
+; RV32IM-NEXT: slli a0, a0, 5
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p28:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 28
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 2
+; RV64I-NEXT: slli a0, a0, 5
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p28:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 28
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 2
+; RV64IM-NEXT: slli a0, a0, 5
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 28
ret i32 %1
@@ -535,24 +533,23 @@ define i32 @muli32_p30(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p30:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 30
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 1
+; RV32IM-NEXT: slli a0, a0, 5
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p30:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 30
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 1
+; RV64I-NEXT: slli a0, a0, 5
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p30:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 30
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 1
+; RV64IM-NEXT: slli a0, a0, 5
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 30
ret i32 %1
@@ -566,24 +563,23 @@ define i32 @muli32_p56(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p56:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 56
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 3
+; RV32IM-NEXT: slli a0, a0, 6
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p56:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 56
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 3
+; RV64I-NEXT: slli a0, a0, 6
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p56:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 56
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 3
+; RV64IM-NEXT: slli a0, a0, 6
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 56
ret i32 %1
@@ -597,24 +593,23 @@ define i32 @muli32_p60(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p60:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 60
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 2
+; RV32IM-NEXT: slli a0, a0, 6
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p60:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 60
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 2
+; RV64I-NEXT: slli a0, a0, 6
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p60:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 60
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 2
+; RV64IM-NEXT: slli a0, a0, 6
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 60
ret i32 %1
@@ -628,24 +623,23 @@ define i32 @muli32_p62(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p62:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 62
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 1
+; RV32IM-NEXT: slli a0, a0, 6
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p62:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 62
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 1
+; RV64I-NEXT: slli a0, a0, 6
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p62:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 62
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 1
+; RV64IM-NEXT: slli a0, a0, 6
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 62
ret i32 %1
@@ -943,24 +937,23 @@ define i32 @muli32_p384(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p384:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: li a1, 384
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 7
+; RV32IM-NEXT: slli a0, a0, 9
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p384:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: li a1, 384
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 7
+; RV64I-NEXT: slli a0, a0, 9
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p384:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 384
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 7
+; RV64IM-NEXT: slli a0, a0, 9
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 384
ret i32 %1
@@ -974,24 +967,23 @@ define i32 @muli32_p12288(i32 %a) nounwind {
;
; RV32IM-LABEL: muli32_p12288:
; RV32IM: # %bb.0:
-; RV32IM-NEXT: lui a1, 3
-; RV32IM-NEXT: mul a0, a0, a1
+; RV32IM-NEXT: slli a1, a0, 12
+; RV32IM-NEXT: slli a0, a0, 14
+; RV32IM-NEXT: sub a0, a0, a1
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli32_p12288:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: lui a1, 3
-; RV64I-NEXT: call __muldi3
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
+; RV64I-NEXT: slli a1, a0, 12
+; RV64I-NEXT: slli a0, a0, 14
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IM-LABEL: muli32_p12288:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: lui a1, 3
-; RV64IM-NEXT: mulw a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 12
+; RV64IM-NEXT: slli a0, a0, 14
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
%1 = mul i32 %a, 12288
ret i32 %1
@@ -1191,12 +1183,16 @@ define i64 @muli64_p3840(i64 %a) nounwind {
;
; RV32IM-LABEL: muli64_p3840:
; RV32IM: # %bb.0:
+; RV32IM-NEXT: slli a2, a1, 8
+; RV32IM-NEXT: slli a1, a1, 12
+; RV32IM-NEXT: sub a1, a1, a2
; RV32IM-NEXT: li a2, 15
; RV32IM-NEXT: slli a2, a2, 8
-; RV32IM-NEXT: mul a1, a1, a2
-; RV32IM-NEXT: mulhu a3, a0, a2
-; RV32IM-NEXT: add a1, a3, a1
-; RV32IM-NEXT: mul a0, a0, a2
+; RV32IM-NEXT: mulhu a2, a0, a2
+; RV32IM-NEXT: add a1, a2, a1
+; RV32IM-NEXT: slli a2, a0, 8
+; RV32IM-NEXT: slli a0, a0, 12
+; RV32IM-NEXT: sub a0, a0, a2
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muli64_p3840:
@@ -1862,11 +1858,15 @@ define i64 @muland_demand(i64 %x) nounwind {
; RV32IM-LABEL: muland_demand:
; RV32IM: # %bb.0:
; RV32IM-NEXT: andi a0, a0, -8
+; RV32IM-NEXT: slli a2, a1, 2
+; RV32IM-NEXT: slli a1, a1, 4
+; RV32IM-NEXT: sub a1, a1, a2
; RV32IM-NEXT: li a2, 12
-; RV32IM-NEXT: mul a1, a1, a2
-; RV32IM-NEXT: mulhu a3, a0, a2
-; RV32IM-NEXT: add a1, a3, a1
-; RV32IM-NEXT: mul a0, a0, a2
+; RV32IM-NEXT: mulhu a2, a0, a2
+; RV32IM-NEXT: add a1, a2, a1
+; RV32IM-NEXT: slli a2, a0, 2
+; RV32IM-NEXT: slli a0, a0, 4
+; RV32IM-NEXT: sub a0, a0, a2
; RV32IM-NEXT: ret
;
; RV64I-LABEL: muland_demand:
@@ -1880,8 +1880,9 @@ define i64 @muland_demand(i64 %x) nounwind {
; RV64IM-LABEL: muland_demand:
; RV64IM: # %bb.0:
; RV64IM-NEXT: andi a0, a0, -8
-; RV64IM-NEXT: li a1, 12
-; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 2
+; RV64IM-NEXT: slli a0, a0, 4
+; RV64IM-NEXT: sub a0, a0, a1
; RV64IM-NEXT: ret
%and = and i64 %x, 4611686018427387896
%mul = mul i64 %and, 12
@@ -1915,9 +1916,9 @@ define i64 @mulzext_demand(i32 signext %x) nounwind {
;
; RV64IM-LABEL: mulzext_demand:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: li a1, 3
-; RV64IM-NEXT: slli a1, a1, 32
-; RV64IM-NEXT: mul a0, a0, a1
+; RV64IM-NEXT: slli a1, a0, 32
+; RV64IM-NEXT: slli a0, a0, 34
+; RV64IM-NEXT: sub a0, a0, a1
; RV64IM-NEXT: ret
%ext = zext i32 %x to i64
%mul = mul i64 %ext, 12884901888
diff --git a/llvm/test/CodeGen/RISCV/rv32xtheadba.ll b/llvm/test/CodeGen/RISCV/rv32xtheadba.ll
index 3bf7704dd1836..332e49771bedf 100644
--- a/llvm/test/CodeGen/RISCV/rv32xtheadba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32xtheadba.ll
@@ -97,8 +97,9 @@ define i64 @th_addsl_2_extra_sext(i32 %x, i32 %y, i32 %z) {
define i32 @addmul6(i32 %a, i32 %b) {
; RV32I-LABEL: addmul6:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a2, 6
-; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 1
+; RV32I-NEXT: slli a0, a0, 3
+; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: ret
;
@@ -133,8 +134,9 @@ define i32 @addmul10(i32 %a, i32 %b) {
define i32 @addmul12(i32 %a, i32 %b) {
; RV32I-LABEL: addmul12:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a2, 12
-; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 2
+; RV32I-NEXT: slli a0, a0, 4
+; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: ret
;
@@ -187,8 +189,9 @@ define i32 @addmul20(i32 %a, i32 %b) {
define i32 @addmul24(i32 %a, i32 %b) {
; RV32I-LABEL: addmul24:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a2, 24
-; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 3
+; RV32I-NEXT: slli a0, a0, 5
+; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: ret
;
@@ -259,8 +262,9 @@ define i32 @addmul72(i32 %a, i32 %b) {
define i32 @mul96(i32 %a) {
; RV32I-LABEL: mul96:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a1, 96
-; RV32I-NEXT: mul a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 5
+; RV32I-NEXT: slli a0, a0, 7
+; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV32XTHEADBA-LABEL: mul96:
diff --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll
index 2a72c1288f65c..89273ef0e50b5 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -63,8 +63,9 @@ define i64 @sh3add(i64 %0, ptr %1) {
define i32 @addmul6(i32 %a, i32 %b) {
; RV32I-LABEL: addmul6:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a2, 6
-; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 1
+; RV32I-NEXT: slli a0, a0, 3
+; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: ret
;
@@ -99,8 +100,9 @@ define i32 @addmul10(i32 %a, i32 %b) {
define i32 @addmul12(i32 %a, i32 %b) {
; RV32I-LABEL: addmul12:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a2, 12
-; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 2
+; RV32I-NEXT: slli a0, a0, 4
+; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: ret
;
@@ -153,8 +155,9 @@ define i32 @addmul20(i32 %a, i32 %b) {
define i32 @addmul24(i32 %a, i32 %b) {
; RV32I-LABEL: addmul24:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a2, 24
-; RV32I-NEXT: mul a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 3
+; RV32I-NEXT: slli a0, a0, 5
+; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: ret
;
@@ -225,8 +228,9 @@ define i32 @addmul72(i32 %a, i32 %b) {
define i32 @mul96(i32 %a) {
; RV32I-LABEL: mul96:
; RV32I: # %bb.0:
-; RV32I-NEXT: li a1, 96
-; RV32I-NEXT: mul a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 5
+; RV32I-NEXT: slli a0, a0, 7
+; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV32ZBA-LABEL: mul96:
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll
index cf7be57ccc901..7e2e57d317681 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll
@@ -369,8 +369,9 @@ define i64 @sh2add_extra_sext(i32 %x, i32 %y, i32 %z) {
define i64 @addmul6(i64 %a, i64 %b) {
; RV64I-LABEL: addmul6:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 6
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 1
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -405,8 +406,9 @@ define i64 @addmul10(i64 %a, i64 %b) {
define i64 @addmul12(i64 %a, i64 %b) {
; RV64I-LABEL: addmul12:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 12
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 2
+; RV64I-NEXT: slli a0, a0, 4
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -459,8 +461,9 @@ define i64 @addmul20(i64 %a, i64 %b) {
define i64 @addmul24(i64 %a, i64 %b) {
; RV64I-LABEL: addmul24:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 24
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 3
+; RV64I-NEXT: slli a0, a0, 5
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -531,8 +534,9 @@ define i64 @addmul72(i64 %a, i64 %b) {
define i64 @mul96(i64 %a) {
; RV64I-LABEL: mul96:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 96
-; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 5
+; RV64I-NEXT: slli a0, a0, 7
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: mul96:
@@ -579,10 +583,10 @@ define i64 @mul288(i64 %a) {
define i64 @zext_mul96(i32 signext %a) {
; RV64I-LABEL: zext_mul96:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 3
-; RV64I-NEXT: slli a1, a1, 37
; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: mulhu a0, a0, a1
+; RV64I-NEXT: srli a1, a0, 27
+; RV64I-NEXT: srli a0, a0, 25
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: zext_mul96:
@@ -637,9 +641,9 @@ define i64 @zext_mul288(i32 signext %a) {
define i64 @zext_mul12884901888(i32 signext %a) {
; RV64I-LABEL: zext_mul12884901888:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 3
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: slli a0, a0, 34
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: zext_mul12884901888:
diff --git a/llvm/test/CodeGen/RISCV/rv64xtheadba.ll b/llvm/test/CodeGen/RISCV/rv64xtheadba.ll
index 7754128370d66..2d44ffbf63749 100644
--- a/llvm/test/CodeGen/RISCV/rv64xtheadba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64xtheadba.ll
@@ -93,8 +93,9 @@ define i64 @th_addsl_2_extra_sext(i32 %x, i32 %y, i32 %z) {
define i64 @addmul6(i64 %a, i64 %b) {
; RV64I-LABEL: addmul6:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 6
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 1
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -129,8 +130,9 @@ define i64 @addmul10(i64 %a, i64 %b) {
define i64 @addmul12(i64 %a, i64 %b) {
; RV64I-LABEL: addmul12:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 12
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 2
+; RV64I-NEXT: slli a0, a0, 4
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -183,8 +185,9 @@ define i64 @addmul20(i64 %a, i64 %b) {
define i64 @addmul24(i64 %a, i64 %b) {
; RV64I-LABEL: addmul24:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 24
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 3
+; RV64I-NEXT: slli a0, a0, 5
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -432,8 +435,9 @@ define i64 @mul81(i64 %a) {
define i64 @mul96(i64 %a) {
; RV64I-LABEL: mul96:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 96
-; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 5
+; RV64I-NEXT: slli a0, a0, 7
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64XTHEADBA-LABEL: mul96:
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 4a568fb2b25c8..7cb2452e1a148 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -376,8 +376,9 @@ define i64 @sh2add_extra_sext(i32 %x, i32 %y, i32 %z) {
define i64 @addmul6(i64 %a, i64 %b) {
; RV64I-LABEL: addmul6:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 6
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 1
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -394,8 +395,9 @@ define i64 @addmul6(i64 %a, i64 %b) {
define i64 @disjointormul6(i64 %a, i64 %b) {
; RV64I-LABEL: disjointormul6:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 6
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 1
+; RV64I-NEXT: slli a0, a0, 3
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
@@ -430,8 +432,9 @@ define i64 @addmul10(i64 %a, i64 %b) {
define i64 @addmul12(i64 %a, i64 %b) {
; RV64I-LABEL: addmul12:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 12
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 2
+; RV64I-NEXT: slli a0, a0, 4
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -496,8 +499,9 @@ define i64 @addmul22(i64 %a, i64 %b) {
define i64 @addmul24(i64 %a, i64 %b) {
; RV64I-LABEL: addmul24:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 24
-; RV64I-NEXT: mul a0, a0, a2
+; RV64I-NEXT: slli a2, a0, 3
+; RV64I-NEXT: slli a0, a0, 5
+; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@@ -629,8 +633,9 @@ define i64 @addmul4230(i64 %a, i64 %b) {
define i64 @mul96(i64 %a) {
; RV64I-LABEL: mul96:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 96
-; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 5
+; RV64I-NEXT: slli a0, a0, 7
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: mul96:
@@ -798,10 +803,10 @@ define i64 @zext_mul68(i32 signext %a) {
define i64 @zext_mul96(i32 signext %a) {
; RV64I-LABEL: zext_mul96:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 3
-; RV64I-NEXT: slli a1, a1, 37
; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: mulhu a0, a0, a1
+; RV64I-NEXT: srli a1, a0, 27
+; RV64I-NEXT: srli a0, a0, 25
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: zext_mul96:
@@ -856,9 +861,9 @@ define i64 @zext_mul288(i32 signext %a) {
define i64 @zext_mul12884901888(i32 signext %a) {
; RV64I-LABEL: zext_mul12884901888:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 3
-; RV64I-NEXT: slli a1, a1, 32
-; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 32
+; RV64I-NEXT: slli a0, a0, 34
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: zext_mul12884901888:
@@ -1311,8 +1316,9 @@ define i64 @mul4104(i64 %a) {
define signext i32 @mulw192(i32 signext %a) {
; RV64I-LABEL: mulw192:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a1, 192
-; RV64I-NEXT: mulw a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 6
+; RV64I-NEXT: slli a0, a0, 8
+; RV64I-NEXT: subw a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: mulw192:
@@ -2620,9 +2626,9 @@ define i64 @regression(i32 signext %x, i32 signext %y) {
; RV64I: # %bb.0:
; RV64I-NEXT: subw a0, a0, a1
; RV64I-NEXT: slli a0, a0, 32
-; RV64I-NEXT: li a1, 3
-; RV64I-NEXT: slli a1, a1, 35
-; RV64I-NEXT: mulhu a0, a0, a1
+; RV64I-NEXT: srli a1, a0, 29
+; RV64I-NEXT: srli a0, a0, 27
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBA-LABEL: regression:
@@ -2735,8 +2741,9 @@ define i64 @bext_mul12(i32 %1, i32 %2) {
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srlw a0, a0, a1
; RV64I-NEXT: andi a0, a0, 1
-; RV64I-NEXT: li a1, 12
-; RV64I-NEXT: mul a0, a0, a1
+; RV64I-NEXT: slli a1, a0, 2
+; RV64I-NEXT: slli a0, a0, 4
+; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBANOZBB-LABEL: bext_mul12:
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index 2f0d5bb6e19c4..ee9f96a45d23e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -71,12 +71,12 @@ define fastcc <vscale x 64 x i32> @ret_split_nxv64i32(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a3, a2, 3
-; CHECK-NEXT: add a4, a1, a3
-; CHECK-NEXT: vl8re32.v v8, (a4)
-; CHECK-NEXT: slli a4, a2, 4
-; CHECK-NEXT: li a5, 24
-; CHECK-NEXT: mul a2, a2, a5
+; CHECK-NEXT: slli a4, a2, 5
+; CHECK-NEXT: sub a4, a4, a3
; CHECK-NEXT: add a5, a1, a4
+; CHECK-NEXT: vl8re32.v v8, (a5)
+; CHECK-NEXT: add a5, a1, a3
+; CHECK-NEXT: slli a2, a2, 4
; CHECK-NEXT: vl8re32.v v16, (a1)
; CHECK-NEXT: add a1, a1, a2
; CHECK-NEXT: vl8re32.v v24, (a1)
@@ -84,9 +84,9 @@ define fastcc <vscale x 64 x i32> @ret_split_nxv64i32(ptr %x) {
; CHECK-NEXT: vs8r.v v16, (a0)
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vs8r.v v24, (a2)
-; CHECK-NEXT: add a4, a0, a4
-; CHECK-NEXT: vs8r.v v0, (a4)
-; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vs8r.v v0, (a3)
+; CHECK-NEXT: add a0, a0, a4
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: ret
%v = load <vscale x 64 x i32>, ptr %x
@@ -105,75 +105,73 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(ptr %x) {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a3, a2, 3
-; CHECK-NEXT: add a4, a1, a3
-; CHECK-NEXT: vl8re32.v v8, (a4)
-; CHECK-NEXT: csrr a4, vlenb
-; CHECK-NEXT: li a5, 24
-; CHECK-NEXT: mul a4, a4, a5
-; CHECK-NEXT: add a4, sp, a4
-; CHECK-NEXT: addi a4, a4, 16
-; CHECK-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
-; CHECK-NEXT: slli a4, a2, 4
-; CHECK-NEXT: add a5, a1, a4
-; CHECK-NEXT: vl8re32.v v8, (a5)
-; CHECK-NEXT: csrr a5, vlenb
-; CHECK-NEXT: slli a5, a5, 4
-; CHECK-NEXT: add a5, sp, a5
-; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
-; CHECK-NEXT: li a5, 24
-; CHECK-NEXT: mul a5, a2, a5
+; CHECK-NEXT: slli a4, a2, 5
+; CHECK-NEXT: sub a5, a4, a3
; CHECK-NEXT: add a6, a1, a5
; CHECK-NEXT: vl8re32.v v8, (a6)
; CHECK-NEXT: csrr a6, vlenb
-; CHECK-NEXT: slli a6, a6, 3
+; CHECK-NEXT: li a7, 24
+; CHECK-NEXT: mul a6, a6, a7
; CHECK-NEXT: add a6, sp, a6
; CHECK-NEXT: addi a6, a6, 16
; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
-; CHECK-NEXT: slli a6, a2, 5
-; CHECK-NEXT: add a7, a1, a6
-; CHECK-NEXT: vl8re32.v v8, (a7)
-; CHECK-NEXT: addi a7, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a7) # Unknown-size Folded Spill
-; CHECK-NEXT: li a7, 40
-; CHECK-NEXT: mul a7, a2, a7
-; CHECK-NEXT: add t0, a1, a7
-; CHECK-NEXT: li t1, 48
-; CHECK-NEXT: mul t1, a2, t1
-; CHECK-NEXT: add t2, a1, t1
-; CHECK-NEXT: li t3, 56
+; CHECK-NEXT: slli a6, a2, 4
+; CHECK-NEXT: slli a7, a2, 6
+; CHECK-NEXT: sub t0, a7, a6
+; CHECK-NEXT: add t1, a1, t0
+; CHECK-NEXT: vl8re32.v v8, (t1)
+; CHECK-NEXT: csrr t1, vlenb
+; CHECK-NEXT: slli t1, t1, 4
+; CHECK-NEXT: add t1, sp, t1
+; CHECK-NEXT: addi t1, t1, 16
+; CHECK-NEXT: vs8r.v v8, (t1) # Unknown-size Folded Spill
+; CHECK-NEXT: sub a7, a7, a3
+; CHECK-NEXT: add t1, a1, a7
+; CHECK-NEXT: vl8re32.v v8, (t1)
+; CHECK-NEXT: csrr t1, vlenb
+; CHECK-NEXT: slli t1, t1, 3
+; CHECK-NEXT: add t1, sp, t1
+; CHECK-NEXT: addi t1, t1, 16
+; CHECK-NEXT: vs8r.v v8, (t1) # Unknown-size Folded Spill
+; CHECK-NEXT: add t1, a1, a3
+; CHECK-NEXT: vl8re32.v v8, (t1)
+; CHECK-NEXT: addi t1, sp, 16
+; CHECK-NEXT: vs8r.v v8, (t1) # Unknown-size Folded Spill
+; CHECK-NEXT: add t1, a1, a6
+; CHECK-NEXT: add t2, a1, a4
+; CHECK-NEXT: li t3, 40
; CHECK-NEXT: mul a2, a2, t3
; CHECK-NEXT: add t3, a1, a2
; CHECK-NEXT: vl8re32.v v8, (a1)
-; CHECK-NEXT: vl8re32.v v0, (t0)
+; CHECK-NEXT: vl8re32.v v0, (t1)
; CHECK-NEXT: vl8re32.v v16, (t3)
; CHECK-NEXT: vl8re32.v v24, (t2)
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vs8r.v v16, (a2)
-; CHECK-NEXT: add t1, a0, t1
-; CHECK-NEXT: vs8r.v v24, (t1)
-; CHECK-NEXT: add a7, a0, a7
-; CHECK-NEXT: vs8r.v v0, (a7)
+; CHECK-NEXT: add a4, a0, a4
+; CHECK-NEXT: vs8r.v v24, (a4)
; CHECK-NEXT: add a6, a0, a6
+; CHECK-NEXT: vs8r.v v0, (a6)
+; CHECK-NEXT: add a3, a0, a3
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vs8r.v v8, (a6)
-; CHECK-NEXT: add a5, a0, a5
+; CHECK-NEXT: vs8r.v v8, (a3)
+; CHECK-NEXT: add a7, a0, a7
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vs8r.v v8, (a5)
-; CHECK-NEXT: add a4, a0, a4
+; CHECK-NEXT: vs8r.v v8, (a7)
+; CHECK-NEXT: add t0, a0, t0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vs8r.v v8, (a4)
-; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: vs8r.v v8, (t0)
+; CHECK-NEXT: add a0, a0, a5
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a2, 24
; CHECK-NEXT: mul a1, a1, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
index 4f1fcfbe8cc5d..2a0c9ce8b6299 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
@@ -274,9 +274,9 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_6(<vscale x 32 x i8> %vec) {
; CHECK-LABEL: extract_nxv32i8_nxv2i8_6:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: li a1, 6
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: srli a1, a0, 3
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a0, a0, a1
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: ret
@@ -297,9 +297,9 @@ define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_22(<vscale x 32 x i8> %vec) {
; CHECK-LABEL: extract_nxv32i8_nxv2i8_22:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: srli a0, a0, 3
-; CHECK-NEXT: li a1, 6
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: srli a1, a0, 3
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a0, a0, a1
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
index c26532d355957..1c3b429202adf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
@@ -80,13 +80,15 @@ define void @strided_store_offset_start(i64 %n, ptr %p) {
;
; RV64-LABEL: strided_store_offset_start:
; RV64: # %bb.0:
-; RV64-NEXT: li a2, 56
-; RV64-NEXT: mul a0, a0, a2
+; RV64-NEXT: slli a2, a0, 3
+; RV64-NEXT: slli a0, a0, 6
+; RV64-NEXT: sub a0, a0, a2
; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: addi a0, a0, 36
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
-; RV64-NEXT: vsse64.v v8, (a0), a2
+; RV64-NEXT: li a1, 56
+; RV64-NEXT: vsse64.v v8, (a0), a1
; RV64-NEXT: ret
%step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
%.splatinsert = insertelement <vscale x 1 x i64> poison, i64 %n, i64 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index 3dba881363063..12604711be191 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -3511,14 +3511,14 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a4, vlenb
-; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul t0, a4, a1
-; CHECK-NEXT: slli t1, a4, 3
+; CHECK-NEXT: slli t0, a4, 3
+; CHECK-NEXT: slli a1, a4, 5
+; CHECK-NEXT: sub t1, a1, t0
; CHECK-NEXT: srli a1, a4, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v7, v0, a1
; CHECK-NEXT: srli a3, a4, 3
-; CHECK-NEXT: add a5, a2, t1
+; CHECK-NEXT: add a5, a2, t0
; CHECK-NEXT: vl8re64.v v8, (a5)
; CHECK-NEXT: slli t3, a4, 4
; CHECK-NEXT: slli a5, a4, 1
@@ -3529,8 +3529,8 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a7, a5
; CHECK-NEXT: .LBB171_2:
-; CHECK-NEXT: add t2, a2, t0
-; CHECK-NEXT: add t1, a0, t1
+; CHECK-NEXT: add t2, a0, t0
+; CHECK-NEXT: add t1, a2, t1
; CHECK-NEXT: add t0, a2, t3
; CHECK-NEXT: vl8re64.v v16, (a2)
; CHECK-NEXT: csrr a2, vlenb
@@ -3630,7 +3630,7 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmfeq.vv v18, v8, v24, v0.t
+; CHECK-NEXT: vmfeq.vv v18, v24, v8, v0.t
; CHECK-NEXT: add a0, a1, a3
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
; CHECK-NEXT: vslideup.vx v17, v16, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
index b3150ecea6c0b..064ea816593ac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
@@ -640,12 +640,13 @@ define <vscale x 16 x i64> @mul_bigimm_stepvector_nxv16i64() {
; RV32-NEXT: mul a1, a0, a1
; RV32-NEXT: sw a1, 0(sp)
; RV32-NEXT: srli a0, a0, 3
-; RV32-NEXT: li a1, 62
-; RV32-NEXT: mul a1, a0, a1
-; RV32-NEXT: lui a2, 92455
-; RV32-NEXT: addi a2, a2, -1368
-; RV32-NEXT: mulhu a0, a0, a2
-; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: lui a1, 92455
+; RV32-NEXT: addi a1, a1, -1368
+; RV32-NEXT: mulhu a1, a0, a1
+; RV32-NEXT: slli a2, a0, 1
+; RV32-NEXT: slli a0, a0, 6
+; RV32-NEXT: sub a0, a0, a2
+; RV32-NEXT: add a0, a1, a0
; RV32-NEXT: sw a0, 4(sp)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 56c1ad3527aaa..457d0380ca8a8 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -148,9 +148,10 @@ define i1 @test_srem_even(i4 %X) nounwind {
; RV32M-NEXT: slli a1, a1, 24
; RV32M-NEXT: srli a1, a1, 31
; RV32M-NEXT: add a1, a2, a1
-; RV32M-NEXT: li a2, 6
-; RV32M-NEXT: mul a1, a1, a2
-; RV32M-NEXT: sub a0, a0, a1
+; RV32M-NEXT: slli a2, a1, 3
+; RV32M-NEXT: slli a1, a1, 1
+; RV32M-NEXT: sub a1, a1, a2
+; RV32M-NEXT: add a0, a0, a1
; RV32M-NEXT: andi a0, a0, 15
; RV32M-NEXT: addi a0, a0, -1
; RV32M-NEXT: seqz a0, a0
@@ -166,9 +167,10 @@ define i1 @test_srem_even(i4 %X) nounwind {
; RV64M-NEXT: slli a1, a1, 56
; RV64M-NEXT: srli a1, a1, 63
; RV64M-NEXT: add a1, a2, a1
-; RV64M-NEXT: li a2, 6
-; RV64M-NEXT: mul a1, a1, a2
-; RV64M-NEXT: subw a0, a0, a1
+; RV64M-NEXT: slli a2, a1, 3
+; RV64M-NEXT: slli a1, a1, 1
+; RV64M-NEXT: subw a1, a1, a2
+; RV64M-NEXT: add a0, a0, a1
; RV64M-NEXT: andi a0, a0, 15
; RV64M-NEXT: addi a0, a0, -1
; RV64M-NEXT: seqz a0, a0
@@ -184,9 +186,10 @@ define i1 @test_srem_even(i4 %X) nounwind {
; RV32MV-NEXT: slli a1, a1, 24
; RV32MV-NEXT: srli a1, a1, 31
; RV32MV-NEXT: add a1, a2, a1
-; RV32MV-NEXT: li a2, 6
-; RV32MV-NEXT: mul a1, a1, a2
-; RV32MV-NEXT: sub a0, a0, a1
+; RV32MV-NEXT: slli a2, a1, 3
+; RV32MV-NEXT: slli a1, a1, 1
+; RV32MV-NEXT: sub a1, a1, a2
+; RV32MV-NEXT: add a0, a0, a1
; RV32MV-NEXT: andi a0, a0, 15
; RV32MV-NEXT: addi a0, a0, -1
; RV32MV-NEXT: seqz a0, a0
@@ -202,9 +205,10 @@ define i1 @test_srem_even(i4 %X) nounwind {
; RV64MV-NEXT: slli a1, a1, 56
; RV64MV-NEXT: srli a1, a1, 63
; RV64MV-NEXT: add a1, a2, a1
-; RV64MV-NEXT: li a2, 6
-; RV64MV-NEXT: mul a1, a1, a2
-; RV64MV-NEXT: subw a0, a0, a1
+; RV64MV-NEXT: slli a2, a1, 3
+; RV64MV-NEXT: slli a1, a1, 1
+; RV64MV-NEXT: subw a1, a1, a2
+; RV64MV-NEXT: add a0, a0, a1
; RV64MV-NEXT: andi a0, a0, 15
; RV64MV-NEXT: addi a0, a0, -1
; RV64MV-NEXT: seqz a0, a0
@@ -725,21 +729,21 @@ define void @test_srem_vec(ptr %X) nounwind {
;
; RV64MV-LABEL: test_srem_vec:
; RV64MV: # %bb.0:
-; RV64MV-NEXT: ld a1, 0(a0)
+; RV64MV-NEXT: lbu a1, 12(a0)
; RV64MV-NEXT: lwu a2, 8(a0)
-; RV64MV-NEXT: srli a3, a1, 2
-; RV64MV-NEXT: lbu a4, 12(a0)
-; RV64MV-NEXT: slli a5, a2, 62
-; RV64MV-NEXT: or a3, a5, a3
-; RV64MV-NEXT: srai a3, a3, 31
-; RV64MV-NEXT: slli a4, a4, 32
+; RV64MV-NEXT: slli a1, a1, 32
+; RV64MV-NEXT: ld a3, 0(a0)
+; RV64MV-NEXT: or a1, a2, a1
+; RV64MV-NEXT: slli a1, a1, 29
+; RV64MV-NEXT: srai a1, a1, 31
+; RV64MV-NEXT: srli a4, a3, 2
+; RV64MV-NEXT: slli a2, a2, 62
; RV64MV-NEXT: or a2, a2, a4
-; RV64MV-NEXT: slli a2, a2, 29
; RV64MV-NEXT: lui a4, %hi(.LCPI3_0)
; RV64MV-NEXT: ld a4, %lo(.LCPI3_0)(a4)
; RV64MV-NEXT: srai a2, a2, 31
-; RV64MV-NEXT: slli a1, a1, 31
-; RV64MV-NEXT: srai a1, a1, 31
+; RV64MV-NEXT: slli a3, a3, 31
+; RV64MV-NEXT: srai a3, a3, 31
; RV64MV-NEXT: mulh a4, a2, a4
; RV64MV-NEXT: srli a5, a4, 63
; RV64MV-NEXT: srai a4, a4, 1
@@ -747,27 +751,28 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV64MV-NEXT: lui a5, %hi(.LCPI3_1)
; RV64MV-NEXT: ld a5, %lo(.LCPI3_1)(a5)
; RV64MV-NEXT: add a2, a2, a4
-; RV64MV-NEXT: slli a4, a4, 2
-; RV64MV-NEXT: add a2, a2, a4
+; RV64MV-NEXT: slli a4, a4, 3
+; RV64MV-NEXT: sub a2, a2, a4
; RV64MV-NEXT: mulh a4, a3, a5
; RV64MV-NEXT: srli a5, a4, 63
-; RV64MV-NEXT: srai a4, a4, 1
; RV64MV-NEXT: add a4, a4, a5
+; RV64MV-NEXT: slli a5, a4, 3
+; RV64MV-NEXT: slli a4, a4, 1
+; RV64MV-NEXT: sub a4, a4, a5
; RV64MV-NEXT: lui a5, %hi(.LCPI3_2)
; RV64MV-NEXT: ld a5, %lo(.LCPI3_2)(a5)
; RV64MV-NEXT: add a3, a3, a4
-; RV64MV-NEXT: slli a4, a4, 3
-; RV64MV-NEXT: sub a3, a3, a4
-; RV64MV-NEXT: mulh a4, a1, a5
-; RV64MV-NEXT: srli a5, a4, 63
-; RV64MV-NEXT: add a4, a4, a5
-; RV64MV-NEXT: li a5, 6
-; RV64MV-NEXT: mul a4, a4, a5
-; RV64MV-NEXT: sub a1, a1, a4
; RV64MV-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64MV-NEXT: vmv.v.x v8, a1
-; RV64MV-NEXT: vslide1down.vx v8, v8, a3
+; RV64MV-NEXT: vmv.v.x v8, a3
; RV64MV-NEXT: vslide1down.vx v8, v8, a2
+; RV64MV-NEXT: mulh a2, a1, a5
+; RV64MV-NEXT: srli a3, a2, 63
+; RV64MV-NEXT: srai a2, a2, 1
+; RV64MV-NEXT: add a2, a2, a3
+; RV64MV-NEXT: slli a3, a2, 2
+; RV64MV-NEXT: add a1, a1, a2
+; RV64MV-NEXT: add a1, a1, a3
+; RV64MV-NEXT: vslide1down.vx v8, v8, a1
; RV64MV-NEXT: vslidedown.vi v8, v8, 1
; RV64MV-NEXT: li a1, -1
; RV64MV-NEXT: srli a1, a1, 31
diff --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index 540883fdc517a..c057c656e0fb7 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -56,18 +56,19 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
; RV32IM: # %bb.0:
; RV32IM-NEXT: lhu a2, 12(a1)
; RV32IM-NEXT: lhu a3, 8(a1)
-; RV32IM-NEXT: lhu a4, 0(a1)
-; RV32IM-NEXT: lhu a1, 4(a1)
-; RV32IM-NEXT: lui a5, 11038
-; RV32IM-NEXT: addi a5, a5, -1465
-; RV32IM-NEXT: mulhu a5, a4, a5
-; RV32IM-NEXT: li a6, 95
-; RV32IM-NEXT: mul a5, a5, a6
-; RV32IM-NEXT: sub a4, a4, a5
+; RV32IM-NEXT: lhu a4, 4(a1)
+; RV32IM-NEXT: lhu a1, 0(a1)
; RV32IM-NEXT: lui a5, 8456
; RV32IM-NEXT: addi a5, a5, 1058
+; RV32IM-NEXT: mulhu a5, a4, a5
+; RV32IM-NEXT: slli a6, a5, 7
+; RV32IM-NEXT: slli a5, a5, 2
+; RV32IM-NEXT: sub a5, a5, a6
+; RV32IM-NEXT: add a4, a4, a5
+; RV32IM-NEXT: lui a5, 11038
+; RV32IM-NEXT: addi a5, a5, -1465
; RV32IM-NEXT: mulhu a5, a1, a5
-; RV32IM-NEXT: li a6, 124
+; RV32IM-NEXT: li a6, 95
; RV32IM-NEXT: mul a5, a5, a6
; RV32IM-NEXT: sub a1, a1, a5
; RV32IM-NEXT: lui a5, 10700
@@ -84,8 +85,8 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
; RV32IM-NEXT: sub a2, a2, a5
; RV32IM-NEXT: sh a2, 6(a0)
; RV32IM-NEXT: sh a3, 4(a0)
-; RV32IM-NEXT: sh a1, 2(a0)
-; RV32IM-NEXT: sh a4, 0(a0)
+; RV32IM-NEXT: sh a1, 0(a0)
+; RV32IM-NEXT: sh a4, 2(a0)
; RV32IM-NEXT: ret
;
; RV64I-LABEL: fold_urem_vec_1:
@@ -132,22 +133,23 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
;
; RV64IM-LABEL: fold_urem_vec_1:
; RV64IM: # %bb.0:
-; RV64IM-NEXT: lhu a2, 0(a1)
+; RV64IM-NEXT: lhu a2, 8(a1)
; RV64IM-NEXT: lui a3, %hi(.LCPI0_0)
; RV64IM-NEXT: ld a3, %lo(.LCPI0_0)(a3)
; RV64IM-NEXT: lhu a4, 24(a1)
; RV64IM-NEXT: lhu a5, 16(a1)
-; RV64IM-NEXT: lhu a1, 8(a1)
+; RV64IM-NEXT: lhu a1, 0(a1)
; RV64IM-NEXT: mulhu a3, a2, a3
-; RV64IM-NEXT: lui a6, %hi(.LCPI0_1)
-; RV64IM-NEXT: ld a6, %lo(.LCPI0_1)(a6)
-; RV64IM-NEXT: li a7, 95
-; RV64IM-NEXT: mul a3, a3, a7
-; RV64IM-NEXT: subw a2, a2, a3
-; RV64IM-NEXT: mulhu a3, a1, a6
+; RV64IM-NEXT: slli a6, a3, 7
+; RV64IM-NEXT: lui a7, %hi(.LCPI0_1)
+; RV64IM-NEXT: ld a7, %lo(.LCPI0_1)(a7)
+; RV64IM-NEXT: slli a3, a3, 2
+; RV64IM-NEXT: subw a3, a3, a6
+; RV64IM-NEXT: add a2, a2, a3
+; RV64IM-NEXT: mulhu a3, a1, a7
; RV64IM-NEXT: lui a6, %hi(.LCPI0_2)
; RV64IM-NEXT: ld a6, %lo(.LCPI0_2)(a6)
-; RV64IM-NEXT: li a7, 124
+; RV64IM-NEXT: li a7, 95
; RV64IM-NEXT: mul a3, a3, a7
; RV64IM-NEXT: subw a1, a1, a3
; RV64IM-NEXT: mulhu a3, a5, a6
@@ -162,8 +164,8 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
; RV64IM-NEXT: subw a4, a4, a3
; RV64IM-NEXT: sh a4, 6(a0)
; RV64IM-NEXT: sh a5, 4(a0)
-; RV64IM-NEXT: sh a1, 2(a0)
-; RV64IM-NEXT: sh a2, 0(a0)
+; RV64IM-NEXT: sh a1, 0(a0)
+; RV64IM-NEXT: sh a2, 2(a0)
; RV64IM-NEXT: ret
%1 = urem <4 x i16> %x, <i16 95, i16 124, i16 98, i16 1003>
ret <4 x i16> %1
More information about the llvm-commits
mailing list