[llvm] [TargetLowering] Improve one signature of forceExpandWideMUL. (PR #123991)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 22 11:23:49 PST 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-x86

Author: Craig Topper (topperc)

<details>
<summary>Changes</summary>

We have two forceExpandWideMUL functions. One takes the low and half half of 2 inputs and calculates the low and high half of their product. This does not calculate the full 2x width product.

The other signature takes 2 inputs and calculates the low and high half of their full 2x width product. Previously it did this by sign/zero extending the inputs to create the high bits and then calling the other function.

We can instead copy the algorithm from the other function and use the Signed flag to determine whether we should do SRA or SRL. This avoids the need to multiply the high part of the inputs and add them to the high half of the result. This improves the generated code for signed multiplication.

This should improve the performance of #<!-- -->123262. I don't know yet how close we will get to gcc.

---

Patch is 364.22 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/123991.diff


16 Files Affected:

- (modified) llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp (+65-14) 
- (modified) llvm/test/CodeGen/AArch64/i128-math.ll (+77-101) 
- (modified) llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll (+23-31) 
- (modified) llvm/test/CodeGen/LoongArch/smul-with-overflow.ll (+265-362) 
- (modified) llvm/test/CodeGen/RISCV/xaluo.ll (+339-634) 
- (modified) llvm/test/CodeGen/SPARC/smulo-128-legalisation-lowering.ll (+149-193) 
- (modified) llvm/test/CodeGen/Thumb/smul_fix.ll (+107-158) 
- (modified) llvm/test/CodeGen/Thumb/smul_fix_sat.ll (+195-266) 
- (modified) llvm/test/CodeGen/Thumb/umul_fix.ll (+133-213) 
- (modified) llvm/test/CodeGen/Thumb/umul_fix_sat.ll (+87-160) 
- (modified) llvm/test/CodeGen/X86/muloti.ll (+33-46) 
- (modified) llvm/test/CodeGen/X86/smul-with-overflow.ll (+459-619) 
- (modified) llvm/test/CodeGen/X86/smul_fix_sat.ll (+40-53) 
- (modified) llvm/test/CodeGen/X86/smulo-128-legalisation-lowering.ll (+808-985) 
- (modified) llvm/test/CodeGen/X86/vec_smulo.ll (+428-584) 
- (modified) llvm/test/CodeGen/X86/xmulo.ll (+161-227) 


``````````diff
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 5861a95c090b1d..35b5226591b17c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -10952,22 +10952,73 @@ void TargetLowering::forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl,
                                         SDValue &Hi) const {
   EVT VT = LHS.getValueType();
   assert(RHS.getValueType() == VT && "Mismatching operand types");
+  EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2);
+  // We can fall back to a libcall with an illegal type for the MUL if we
+  // have a libcall big enough.
+  // Also, we can fall back to a division in some cases, but that's a big
+  // performance hit in the general case.
+  RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+  if (WideVT == MVT::i16)
+    LC = RTLIB::MUL_I16;
+  else if (WideVT == MVT::i32)
+    LC = RTLIB::MUL_I32;
+  else if (WideVT == MVT::i64)
+    LC = RTLIB::MUL_I64;
+  else if (WideVT == MVT::i128)
+    LC = RTLIB::MUL_I128;
 
-  SDValue HiLHS;
-  SDValue HiRHS;
-  if (Signed) {
-    // The high part is obtained by SRA'ing all but one of the bits of low
-    // part.
-    unsigned LoSize = VT.getFixedSizeInBits();
-    SDValue Shift = DAG.getShiftAmountConstant(LoSize - 1, VT, dl);
-    HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, Shift);
-    HiRHS = DAG.getNode(ISD::SRA, dl, VT, RHS, Shift);
-  } else {
-    HiLHS = DAG.getConstant(0, dl, VT);
-    HiRHS = DAG.getConstant(0, dl, VT);
+  if (LC != RTLIB::UNKNOWN_LIBCALL && getLibcallName(LC)) {
+    SDValue HiLHS, HiRHS;
+    if (Signed) {
+      // The high part is obtained by SRA'ing all but one of the bits of low
+      // part.
+      unsigned LoSize = VT.getFixedSizeInBits();
+      SDValue Shift = DAG.getShiftAmountConstant(LoSize - 1, VT, dl);
+      HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, Shift);
+      HiRHS = DAG.getNode(ISD::SRA, dl, VT, RHS, Shift);
+    } else {
+      HiLHS = DAG.getConstant(0, dl, VT);
+      HiRHS = DAG.getConstant(0, dl, VT);
+    }
+    forceExpandWideMUL(DAG, dl, Signed, WideVT, LHS, HiLHS, RHS, HiRHS, Lo, Hi);
+    return;
   }
-  EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2);
-  forceExpandWideMUL(DAG, dl, Signed, WideVT, LHS, HiLHS, RHS, HiRHS, Lo, Hi);
+
+  // Expand the multiplication by brute force. This is a generalized-version of
+  // the code from Hacker's Delight (itself derived from Knuth's Algorithm M
+  // from section 4.3.1) combined with the Hacker's delight code
+  // for calculating mulhs.
+  unsigned Bits = VT.getSizeInBits();
+  unsigned HalfBits = Bits / 2;
+  SDValue Mask = DAG.getConstant(APInt::getLowBitsSet(Bits, HalfBits), dl, VT);
+  SDValue LL = DAG.getNode(ISD::AND, dl, VT, LHS, Mask);
+  SDValue RL = DAG.getNode(ISD::AND, dl, VT, RHS, Mask);
+
+  SDValue T = DAG.getNode(ISD::MUL, dl, VT, LL, RL);
+  SDValue TL = DAG.getNode(ISD::AND, dl, VT, T, Mask);
+
+  SDValue Shift = DAG.getShiftAmountConstant(HalfBits, VT, dl);
+  // This is always an unsigned shift.
+  SDValue TH = DAG.getNode(ISD::SRL, dl, VT, T, Shift);
+
+  unsigned ShiftOpc = Signed ? ISD::SRA : ISD::SRL;
+  SDValue LH = DAG.getNode(ShiftOpc, dl, VT, LHS, Shift);
+  SDValue RH = DAG.getNode(ShiftOpc, dl, VT, RHS, Shift);
+
+  SDValue U =
+      DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::MUL, dl, VT, LH, RL), TH);
+  SDValue UL = DAG.getNode(ISD::AND, dl, VT, U, Mask);
+  SDValue UH = DAG.getNode(ShiftOpc, dl, VT, U, Shift);
+
+  SDValue V =
+      DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::MUL, dl, VT, LL, RH), UL);
+  SDValue VH = DAG.getNode(ShiftOpc, dl, VT, V, Shift);
+
+  Lo = DAG.getNode(ISD::ADD, dl, VT, TL,
+                   DAG.getNode(ISD::SHL, dl, VT, V, Shift));
+
+  Hi = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::MUL, dl, VT, LH, RH),
+                   DAG.getNode(ISD::ADD, dl, VT, UH, VH));
 }
 
 SDValue
diff --git a/llvm/test/CodeGen/AArch64/i128-math.ll b/llvm/test/CodeGen/AArch64/i128-math.ll
index 9ae906249826d3..9e1c0c1b115ab6 100644
--- a/llvm/test/CodeGen/AArch64/i128-math.ll
+++ b/llvm/test/CodeGen/AArch64/i128-math.ll
@@ -355,40 +355,32 @@ define i128 @i128_mul(i128 %x, i128 %y) {
 define { i128, i8 } @i128_checked_mul(i128 %x, i128 %y) {
 ; CHECK-LABEL: i128_checked_mul:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    asr x8, x1, #63
-; CHECK-NEXT:    asr x11, x3, #63
-; CHECK-NEXT:    umulh x13, x0, x2
-; CHECK-NEXT:    mul x9, x2, x8
-; CHECK-NEXT:    umulh x10, x2, x8
-; CHECK-NEXT:    umulh x12, x11, x0
-; CHECK-NEXT:    mul x14, x1, x2
-; CHECK-NEXT:    add x10, x10, x9
-; CHECK-NEXT:    madd x8, x3, x8, x10
-; CHECK-NEXT:    madd x10, x11, x1, x12
-; CHECK-NEXT:    mul x11, x11, x0
-; CHECK-NEXT:    umulh x12, x1, x2
-; CHECK-NEXT:    mul x15, x0, x3
-; CHECK-NEXT:    add x10, x10, x11
-; CHECK-NEXT:    adds x9, x11, x9
-; CHECK-NEXT:    umulh x16, x0, x3
-; CHECK-NEXT:    adc x10, x10, x8
-; CHECK-NEXT:    adds x8, x14, x13
-; CHECK-NEXT:    cinc x12, x12, hs
-; CHECK-NEXT:    mul x11, x1, x3
-; CHECK-NEXT:    adds x8, x15, x8
-; CHECK-NEXT:    umulh x13, x1, x3
+; CHECK-NEXT:    asr x9, x1, #63
+; CHECK-NEXT:    umulh x10, x0, x2
+; CHECK-NEXT:    asr x13, x3, #63
+; CHECK-NEXT:    mul x11, x1, x2
+; CHECK-NEXT:    umulh x8, x1, x2
+; CHECK-NEXT:    mul x9, x9, x2
+; CHECK-NEXT:    adds x10, x11, x10
+; CHECK-NEXT:    mul x14, x0, x3
+; CHECK-NEXT:    umulh x12, x0, x3
+; CHECK-NEXT:    adc x9, x8, x9
+; CHECK-NEXT:    mul x13, x0, x13
+; CHECK-NEXT:    adds x8, x14, x10
+; CHECK-NEXT:    mul x15, x1, x3
+; CHECK-NEXT:    smulh x10, x1, x3
 ; CHECK-NEXT:    mov x1, x8
-; CHECK-NEXT:    cinc x14, x16, hs
-; CHECK-NEXT:    adds x12, x12, x14
+; CHECK-NEXT:    adc x11, x12, x13
+; CHECK-NEXT:    asr x12, x9, #63
+; CHECK-NEXT:    asr x13, x11, #63
+; CHECK-NEXT:    adds x9, x9, x11
+; CHECK-NEXT:    asr x11, x8, #63
 ; CHECK-NEXT:    mul x0, x0, x2
-; CHECK-NEXT:    cset w14, hs
-; CHECK-NEXT:    adds x11, x11, x12
-; CHECK-NEXT:    asr x12, x8, #63
-; CHECK-NEXT:    adc x13, x13, x14
-; CHECK-NEXT:    adds x9, x11, x9
-; CHECK-NEXT:    adc x10, x13, x10
-; CHECK-NEXT:    cmp x9, x12
-; CHECK-NEXT:    ccmp x10, x12, #0, eq
+; CHECK-NEXT:    adc x12, x12, x13
+; CHECK-NEXT:    adds x9, x15, x9
+; CHECK-NEXT:    adc x10, x10, x12
+; CHECK-NEXT:    cmp x9, x11
+; CHECK-NEXT:    ccmp x10, x11, #0, eq
 ; CHECK-NEXT:    cset w2, eq
 ; CHECK-NEXT:    ret
   %1 = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %x, i128 %y)
@@ -404,40 +396,32 @@ define { i128, i8 } @i128_checked_mul(i128 %x, i128 %y) {
 define { i128, i8 } @i128_overflowing_mul(i128 %x, i128 %y) {
 ; CHECK-LABEL: i128_overflowing_mul:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    asr x8, x1, #63
-; CHECK-NEXT:    asr x11, x3, #63
-; CHECK-NEXT:    umulh x13, x0, x2
-; CHECK-NEXT:    mul x9, x2, x8
-; CHECK-NEXT:    umulh x10, x2, x8
-; CHECK-NEXT:    umulh x12, x11, x0
-; CHECK-NEXT:    mul x14, x1, x2
-; CHECK-NEXT:    add x10, x10, x9
-; CHECK-NEXT:    madd x8, x3, x8, x10
-; CHECK-NEXT:    madd x10, x11, x1, x12
-; CHECK-NEXT:    mul x11, x11, x0
-; CHECK-NEXT:    umulh x12, x1, x2
-; CHECK-NEXT:    mul x15, x0, x3
-; CHECK-NEXT:    add x10, x10, x11
-; CHECK-NEXT:    adds x9, x11, x9
-; CHECK-NEXT:    umulh x16, x0, x3
-; CHECK-NEXT:    adc x10, x10, x8
-; CHECK-NEXT:    adds x8, x14, x13
-; CHECK-NEXT:    cinc x12, x12, hs
-; CHECK-NEXT:    mul x11, x1, x3
-; CHECK-NEXT:    adds x8, x15, x8
-; CHECK-NEXT:    umulh x13, x1, x3
+; CHECK-NEXT:    asr x9, x1, #63
+; CHECK-NEXT:    umulh x10, x0, x2
+; CHECK-NEXT:    asr x13, x3, #63
+; CHECK-NEXT:    mul x11, x1, x2
+; CHECK-NEXT:    umulh x8, x1, x2
+; CHECK-NEXT:    mul x9, x9, x2
+; CHECK-NEXT:    adds x10, x11, x10
+; CHECK-NEXT:    mul x14, x0, x3
+; CHECK-NEXT:    umulh x12, x0, x3
+; CHECK-NEXT:    adc x9, x8, x9
+; CHECK-NEXT:    mul x13, x0, x13
+; CHECK-NEXT:    adds x8, x14, x10
+; CHECK-NEXT:    mul x15, x1, x3
+; CHECK-NEXT:    smulh x10, x1, x3
 ; CHECK-NEXT:    mov x1, x8
-; CHECK-NEXT:    cinc x14, x16, hs
-; CHECK-NEXT:    adds x12, x12, x14
+; CHECK-NEXT:    adc x11, x12, x13
+; CHECK-NEXT:    asr x12, x9, #63
+; CHECK-NEXT:    asr x13, x11, #63
+; CHECK-NEXT:    adds x9, x9, x11
+; CHECK-NEXT:    asr x11, x8, #63
 ; CHECK-NEXT:    mul x0, x0, x2
-; CHECK-NEXT:    cset w14, hs
-; CHECK-NEXT:    adds x11, x11, x12
-; CHECK-NEXT:    asr x12, x8, #63
-; CHECK-NEXT:    adc x13, x13, x14
-; CHECK-NEXT:    adds x9, x11, x9
-; CHECK-NEXT:    adc x10, x13, x10
-; CHECK-NEXT:    cmp x9, x12
-; CHECK-NEXT:    ccmp x10, x12, #0, eq
+; CHECK-NEXT:    adc x12, x12, x13
+; CHECK-NEXT:    adds x9, x15, x9
+; CHECK-NEXT:    adc x10, x10, x12
+; CHECK-NEXT:    cmp x9, x11
+; CHECK-NEXT:    ccmp x10, x11, #0, eq
 ; CHECK-NEXT:    cset w2, ne
 ; CHECK-NEXT:    ret
   %1 = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %x, i128 %y)
@@ -452,46 +436,38 @@ define { i128, i8 } @i128_overflowing_mul(i128 %x, i128 %y) {
 define i128 @i128_saturating_mul(i128 %x, i128 %y) {
 ; CHECK-LABEL: i128_saturating_mul:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    asr x8, x1, #63
-; CHECK-NEXT:    asr x11, x3, #63
-; CHECK-NEXT:    umulh x13, x0, x2
-; CHECK-NEXT:    mul x9, x2, x8
-; CHECK-NEXT:    umulh x10, x2, x8
-; CHECK-NEXT:    umulh x12, x11, x0
-; CHECK-NEXT:    mul x14, x1, x2
-; CHECK-NEXT:    add x10, x10, x9
-; CHECK-NEXT:    madd x8, x3, x8, x10
-; CHECK-NEXT:    madd x10, x11, x1, x12
-; CHECK-NEXT:    mul x11, x11, x0
-; CHECK-NEXT:    umulh x12, x1, x2
-; CHECK-NEXT:    mul x16, x0, x3
-; CHECK-NEXT:    add x10, x10, x11
-; CHECK-NEXT:    adds x9, x11, x9
-; CHECK-NEXT:    umulh x15, x0, x3
-; CHECK-NEXT:    adc x8, x10, x8
-; CHECK-NEXT:    adds x10, x14, x13
-; CHECK-NEXT:    cinc x12, x12, hs
-; CHECK-NEXT:    mul x17, x1, x3
-; CHECK-NEXT:    adds x10, x16, x10
-; CHECK-NEXT:    umulh x11, x1, x3
-; CHECK-NEXT:    cinc x13, x15, hs
-; CHECK-NEXT:    adds x12, x12, x13
-; CHECK-NEXT:    cset w13, hs
-; CHECK-NEXT:    adds x12, x17, x12
-; CHECK-NEXT:    adc x11, x11, x13
-; CHECK-NEXT:    adds x9, x12, x9
-; CHECK-NEXT:    asr x12, x10, #63
+; CHECK-NEXT:    asr x9, x1, #63
+; CHECK-NEXT:    umulh x10, x0, x2
+; CHECK-NEXT:    asr x13, x3, #63
+; CHECK-NEXT:    mul x11, x1, x2
+; CHECK-NEXT:    umulh x8, x1, x2
+; CHECK-NEXT:    mul x9, x9, x2
+; CHECK-NEXT:    adds x10, x11, x10
+; CHECK-NEXT:    mul x14, x0, x3
+; CHECK-NEXT:    umulh x12, x0, x3
+; CHECK-NEXT:    adc x8, x8, x9
+; CHECK-NEXT:    mul x13, x0, x13
+; CHECK-NEXT:    adds x9, x14, x10
+; CHECK-NEXT:    mul x11, x1, x3
+; CHECK-NEXT:    adc x10, x12, x13
+; CHECK-NEXT:    smulh x12, x1, x3
+; CHECK-NEXT:    asr x13, x8, #63
+; CHECK-NEXT:    asr x14, x10, #63
+; CHECK-NEXT:    adds x8, x8, x10
+; CHECK-NEXT:    adc x10, x13, x14
+; CHECK-NEXT:    adds x8, x11, x8
+; CHECK-NEXT:    asr x11, x9, #63
 ; CHECK-NEXT:    mul x13, x0, x2
-; CHECK-NEXT:    adc x8, x11, x8
-; CHECK-NEXT:    eor x11, x3, x1
-; CHECK-NEXT:    eor x8, x8, x12
-; CHECK-NEXT:    eor x9, x9, x12
-; CHECK-NEXT:    asr x11, x11, #63
-; CHECK-NEXT:    orr x8, x9, x8
-; CHECK-NEXT:    eor x9, x11, #0x7fffffffffffffff
+; CHECK-NEXT:    adc x10, x12, x10
+; CHECK-NEXT:    eor x12, x3, x1
+; CHECK-NEXT:    eor x8, x8, x11
+; CHECK-NEXT:    eor x10, x10, x11
+; CHECK-NEXT:    asr x11, x12, #63
+; CHECK-NEXT:    orr x8, x8, x10
+; CHECK-NEXT:    eor x10, x11, #0x7fffffffffffffff
 ; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    csel x1, x9, x10, ne
 ; CHECK-NEXT:    csinv x0, x13, x11, eq
+; CHECK-NEXT:    csel x1, x10, x9, ne
 ; CHECK-NEXT:    ret
   %1 = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %x, i128 %y)
   %2 = extractvalue { i128, i1 } %1, 0
diff --git a/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll b/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll
index 08045e814a35ef..edfd80b4f27061 100644
--- a/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll
+++ b/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll
@@ -35,41 +35,33 @@ start:
 define i128 @__muloti4(i128 %0, i128 %1, ptr nocapture nonnull writeonly align 4 %2) #2 {
 ; AARCH-LABEL: __muloti4:
 ; AARCH:       // %bb.0: // %Entry
-; AARCH-NEXT:    asr x10, x1, #63
+; AARCH-NEXT:    asr x11, x1, #63
 ; AARCH-NEXT:    asr x9, x3, #63
-; AARCH-NEXT:    umulh x14, x0, x2
+; AARCH-NEXT:    umulh x12, x0, x2
 ; AARCH-NEXT:    mov x8, x1
 ; AARCH-NEXT:    str wzr, [x4]
-; AARCH-NEXT:    mul x12, x2, x10
-; AARCH-NEXT:    umulh x13, x2, x10
-; AARCH-NEXT:    umulh x11, x9, x0
-; AARCH-NEXT:    mul x15, x1, x2
-; AARCH-NEXT:    add x13, x13, x12
-; AARCH-NEXT:    madd x11, x9, x1, x11
-; AARCH-NEXT:    mul x9, x9, x0
-; AARCH-NEXT:    madd x10, x3, x10, x13
-; AARCH-NEXT:    umulh x13, x1, x2
-; AARCH-NEXT:    add x11, x11, x9
-; AARCH-NEXT:    adds x9, x9, x12
-; AARCH-NEXT:    mul x16, x0, x3
-; AARCH-NEXT:    adc x10, x11, x10
-; AARCH-NEXT:    adds x11, x15, x14
-; AARCH-NEXT:    umulh x17, x0, x3
-; AARCH-NEXT:    cinc x13, x13, hs
-; AARCH-NEXT:    mul x12, x1, x3
-; AARCH-NEXT:    adds x1, x16, x11
-; AARCH-NEXT:    umulh x11, x8, x3
-; AARCH-NEXT:    cinc x14, x17, hs
-; AARCH-NEXT:    adds x13, x13, x14
+; AARCH-NEXT:    mul x13, x1, x2
+; AARCH-NEXT:    umulh x10, x1, x2
+; AARCH-NEXT:    mul x11, x11, x2
+; AARCH-NEXT:    adds x12, x13, x12
+; AARCH-NEXT:    mul x15, x0, x3
+; AARCH-NEXT:    umulh x14, x0, x3
+; AARCH-NEXT:    adc x10, x10, x11
+; AARCH-NEXT:    mul x9, x0, x9
+; AARCH-NEXT:    mul x16, x1, x3
+; AARCH-NEXT:    adds x1, x15, x12
+; AARCH-NEXT:    asr x12, x10, #63
+; AARCH-NEXT:    smulh x11, x8, x3
+; AARCH-NEXT:    adc x9, x14, x9
+; AARCH-NEXT:    asr x13, x9, #63
+; AARCH-NEXT:    adds x9, x10, x9
+; AARCH-NEXT:    asr x10, x1, #63
 ; AARCH-NEXT:    mul x0, x0, x2
-; AARCH-NEXT:    cset w14, hs
-; AARCH-NEXT:    adds x12, x12, x13
-; AARCH-NEXT:    asr x13, x1, #63
-; AARCH-NEXT:    adc x11, x11, x14
-; AARCH-NEXT:    adds x9, x12, x9
-; AARCH-NEXT:    adc x10, x11, x10
-; AARCH-NEXT:    cmp x9, x13
-; AARCH-NEXT:    ccmp x10, x13, #0, eq
+; AARCH-NEXT:    adc x12, x12, x13
+; AARCH-NEXT:    adds x9, x16, x9
+; AARCH-NEXT:    adc x11, x11, x12
+; AARCH-NEXT:    cmp x9, x10
+; AARCH-NEXT:    ccmp x11, x10, #0, eq
 ; AARCH-NEXT:    cset w9, ne
 ; AARCH-NEXT:    tbz x8, #63, .LBB1_2
 ; AARCH-NEXT:  // %bb.1: // %Entry
diff --git a/llvm/test/CodeGen/LoongArch/smul-with-overflow.ll b/llvm/test/CodeGen/LoongArch/smul-with-overflow.ll
index 67a10d4bcbaea9..43d56e5d5eb2fb 100644
--- a/llvm/test/CodeGen/LoongArch/smul-with-overflow.ll
+++ b/llvm/test/CodeGen/LoongArch/smul-with-overflow.ll
@@ -10,43 +10,33 @@ define zeroext i1 @smuloi64(i64 %v1, i64 %v2, ptr %res) {
 ; LA32-NEXT:    add.w $a5, $a6, $a5
 ; LA32-NEXT:    sltu $a6, $a5, $a6
 ; LA32-NEXT:    mulh.wu $a7, $a1, $a2
+; LA32-NEXT:    srai.w $t0, $a1, 31
+; LA32-NEXT:    mul.w $t0, $t0, $a2
+; LA32-NEXT:    add.w $a7, $a7, $t0
 ; LA32-NEXT:    add.w $a6, $a7, $a6
-; LA32-NEXT:    mul.w $a7, $a0, $a3
-; LA32-NEXT:    add.w $a5, $a7, $a5
-; LA32-NEXT:    sltu $a7, $a5, $a7
-; LA32-NEXT:    mulh.wu $t0, $a0, $a3
-; LA32-NEXT:    add.w $a7, $t0, $a7
-; LA32-NEXT:    add.w $a7, $a6, $a7
-; LA32-NEXT:    mul.w $t0, $a1, $a3
-; LA32-NEXT:    add.w $t1, $t0, $a7
-; LA32-NEXT:    srai.w $t2, $a1, 31
-; LA32-NEXT:    mul.w $t3, $a2, $t2
-; LA32-NEXT:    srai.w $t4, $a3, 31
-; LA32-NEXT:    mul.w $t5, $t4, $a0
-; LA32-NEXT:    add.w $t6, $t5, $t3
-; LA32-NEXT:    add.w $t7, $t1, $t6
-; LA32-NEXT:    sltu $t8, $t7, $t1
-; LA32-NEXT:    sltu $t0, $t1, $t0
-; LA32-NEXT:    sltu $a6, $a7, $a6
-; LA32-NEXT:    mulh.wu $a7, $a1, $a3
+; LA32-NEXT:    mulh.wu $a7, $a0, $a3
+; LA32-NEXT:    srai.w $t0, $a3, 31
+; LA32-NEXT:    mul.w $t0, $a0, $t0
+; LA32-NEXT:    add.w $a7, $a7, $t0
+; LA32-NEXT:    mul.w $t0, $a0, $a3
+; LA32-NEXT:    add.w $a5, $t0, $a5
+; LA32-NEXT:    sltu $t0, $a5, $t0
+; LA32-NEXT:    add.w $a7, $a7, $t0
+; LA32-NEXT:    add.w $t0, $a6, $a7
+; LA32-NEXT:    sltu $t1, $t0, $a6
+; LA32-NEXT:    srai.w $a6, $a6, 31
+; LA32-NEXT:    srai.w $a7, $a7, 31
+; LA32-NEXT:    add.w $a6, $a6, $a7
+; LA32-NEXT:    add.w $a6, $a6, $t1
+; LA32-NEXT:    mulh.w $a7, $a1, $a3
 ; LA32-NEXT:    add.w $a6, $a7, $a6
-; LA32-NEXT:    add.w $a6, $a6, $t0
-; LA32-NEXT:    mulh.wu $a7, $a2, $t2
-; LA32-NEXT:    add.w $a7, $a7, $t3
-; LA32-NEXT:    mul.w $a3, $a3, $t2
-; LA32-NEXT:    add.w $a3, $a7, $a3
-; LA32-NEXT:    mul.w $a1, $t4, $a1
-; LA32-NEXT:    mulh.wu $a7, $t4, $a0
-; LA32-NEXT:    add.w $a1, $a7, $a1
-; LA32-NEXT:    add.w $a1, $a1, $t5
-; LA32-NEXT:    add.w $a1, $a1, $a3
-; LA32-NEXT:    sltu $a3, $t6, $t5
-; LA32-NEXT:    add.w $a1, $a1, $a3
+; LA32-NEXT:    mul.w $a1, $a1, $a3
+; LA32-NEXT:    add.w $a3, $a1, $t0
+; LA32-NEXT:    sltu $a1, $a3, $a1
 ; LA32-NEXT:    add.w $a1, $a6, $a1
-; LA32-NEXT:    add.w $a1, $a1, $t8
-; LA32-NEXT:    srai.w $a3, $a5, 31
-; LA32-NEXT:    xor $a1, $a1, $a3
-; LA32-NEXT:    xor $a3, $t7, $a3
+; LA32-NEXT:    srai.w $a6, $a5, 31
+; LA32-NEXT:    xor $a1, $a1, $a6
+; LA32-NEXT:    xor $a3, $a3, $a6
 ; LA32-NEXT:    or $a1, $a3, $a1
 ; LA32-NEXT:    sltu $a1, $zero, $a1
 ; LA32-NEXT:    mul.w $a0, $a0, $a2
@@ -74,19 +64,19 @@ define zeroext i1 @smuloi64(i64 %v1, i64 %v2, ptr %res) {
 define zeroext i1 @smuloi128(i128 %v1, i128 %v2, ptr %res) {
 ; LA32-LABEL: smuloi128:
 ; LA32:       # %bb.0:
-; LA32-NEXT:    addi.w $sp, $sp, -96
-; LA32-NEXT:    .cfi_def_cfa_offset 96
-; LA32-NEXT:    st.w $ra, $sp, 92 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $fp, $sp, 88 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $s0, $sp, 84 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $s1, $sp, 80 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $s2, $sp, 76 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $s3, $sp, 72 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $s4, $sp, 68 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $s5, $sp, 64 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $s6, $sp, 60 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $s7, $sp, 56 # 4-byte Folded Spill
-; LA32-NEXT:    st.w $s8, $sp, 52 # 4-byte Folded Spill
+; LA32-NEXT:    addi.w $sp, $sp, -48
+; LA32-NEXT:    .cfi_def_cfa_offset 48
+; LA32-NEXT:    st.w $ra, $sp, 44 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 40 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 36 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s1, $sp, 32 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s2, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s3, $sp, 24 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s4, $sp, 20 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s5, $sp, 16 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s6, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s7, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s8, $sp, 4 # 4-byte Folded Spill
 ; LA32-NEXT:    .cfi_offset 1, -4
 ; LA32-NEXT:    .cfi_offset 22, -8
 ; LA32-NEXT:    .cfi_offset 23, -12
@@ -98,295 +88,218 @@ define zeroext i1 @smuloi128(i128 %v1, i128 %v2, ptr %res) {
 ; LA32-NEXT:    .cfi_offset 29, -36
 ; LA32-NEXT:    .cfi_offset 30, -40
 ; LA32-NEXT:    .cfi_offset 31, -44
-; LA32-NEXT:    st.w $a2, $sp, 48 # 4-byte Folded Spill
-; LA32-NEXT:    ld.w $t0, $a1, 12
-; LA32-NEXT:    ld.w $t1, $a1, 8
-; LA32-NEXT:    ld.w $a5, $a0, 12
-; LA32-NEXT:    ld.w $a7, $a1, 0
-; LA32-NEXT:    ld.w $a3, $a0, 0
-; LA32-NEXT:    ld.w $a6, $a0, 4
-; LA32-NEXT:    ld.w $a4, $a0, 8
-; LA32-NEXT:    ld.w $t3, $a1, 4
-; LA32-NEXT:    mulh.wu $a0, $a3, $a7
-; LA32-NEXT:    mul.w $a1, $a6, $a7
+; LA32-NEXT:    ld.w $a5, $a1, 12
+; LA32-NEXT:    ld.w $a6, $a1, 8
+; LA32-NEXT:    ld.w $t1, $a0, 4
+; LA32-NEXT:    ld.w $a3, $a1, 0
+; LA32-NEXT:    ld.w $a7, $a0, 8
+; LA32-NEXT:    ld.w $t0, $a0, 12
+; LA32-NEXT:    ld.w $a4, $a0, 0
+; LA32-NEXT:    ld.w $t2, $a1, 4
+; LA32-NEXT:    mulh.wu $a0, $a7, $a3
+; LA32-NEXT:    mul.w $a1, $t0, $a3
 ; LA32-NEXT:    add.w $a0, $a1, $a0
 ; LA32-NEXT:    sltu $a1, $a0, $a1
-; LA32-NEXT:    mulh.wu $t2, $a6, $a7
-; LA32-NEXT:    add.w $a1, $t2, $a1
-; LA32-NEXT:    mul.w $t2, $a3, $t3
-; LA32-NEXT:    add.w $a0, $t2, $a0
-; LA32-NEXT:    st.w $a0, $sp, 44 # 4-byte Folded Spill
-; LA32-NEXT:    sltu $t2, $a0, $t2
-; LA32-NEXT:    mulh.wu $t4, $a3, $t3
-; LA32-NEXT:    add.w $t2, $t4, $t2
-; LA32-NEXT:    add.w $t2, $a1, $t2
-; LA32-NEXT:    mul.w $t4, $a6, $t3
-; LA32-NEXT:    add.w $t5, $t4, $t2
-; LA32-NEXT:    sltu $t4, $t5, $t4
-; LA32-NEXT:    sltu $a1, $t2, $a1
-; LA32-NEXT:    mulh.wu $...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/123991


More information about the llvm-commits mailing list