[llvm] 81713e8 - [InstCombine] Fold series of instructions into mull

via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 24 10:10:19 PDT 2022


Author: zhongyunde
Date: 2022-10-25T01:09:37+08:00
New Revision: 81713e893a33ac2807d95addaab0f3c8bb15d911

URL: https://github.com/llvm/llvm-project/commit/81713e893a33ac2807d95addaab0f3c8bb15d911
DIFF: https://github.com/llvm/llvm-project/commit/81713e893a33ac2807d95addaab0f3c8bb15d911.diff

LOG: [InstCombine] Fold series of instructions into mull

The following sequence should be folded into in0 * in1
      In0Lo = in0 & 0xffffffff; In0Hi = in0 >> 32;
      In1Lo = in1 & 0xffffffff; In1Hi = in1 >> 32;
      m01 = In1Hi * In0Lo; m10 = In1Lo * In0Hi; m00 = In1Lo * In0Lo;
      addc = m01 + m10;
      ResLo = m00 + (addc >> 32);

Reviewed By: spatel, RKSimon
Differential Revision: https://reviews.llvm.org/D136015

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
    llvm/test/Transforms/InstCombine/mul_fold.ll
    llvm/test/Transforms/InstCombine/mul_full_64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 70b216d3b3641..487d408dbbf4b 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1267,6 +1267,49 @@ static Instruction *factorizeMathWithShlOps(BinaryOperator &I,
   return NewShl;
 }
 
+/// Reduce a sequence of masked half-width multiplies to a single multiply.
+/// ((XLow * YHigh) + (YLow * XHigh)) << HalfBits) + (XLow * YLow) --> X * Y
+static Instruction *foldBoxMultiply(BinaryOperator &I) {
+  if (!I.getType()->isIntegerTy())
+    return nullptr;
+
+  unsigned BitWidth = I.getType()->getScalarSizeInBits();
+  // Skip the odd bitwidth types and large bitwidth types
+  // TODO: Relax the constraint of wide/vectors types.
+  if ((BitWidth & 0x1) || (BitWidth > 128))
+    return nullptr;
+
+  unsigned HalfBits = BitWidth >> 1;
+  APInt HalfMask = APInt::getMaxValue(HalfBits);
+
+  // ResLo = (CrossSum << HalfBits) + (YLo * XLo)
+  Value *XLo, *YLo;
+  Value *CrossSum;
+  if (!match(&I, m_c_Add(m_Shl(m_Value(CrossSum), m_SpecificInt(HalfBits)),
+                         m_Mul(m_Value(YLo), m_Value(XLo)))))
+    return nullptr;
+
+  // XLo = X & HalfMask
+  // YLo = Y & HalfMask
+  // TODO: Refactor with SimplifyDemandedBits or KnownBits known leading zeros
+  // to enhance robustness
+  Value *X, *Y;
+  if (!match(XLo, m_And(m_Value(X), m_SpecificInt(HalfMask))) ||
+      !match(YLo, m_And(m_Value(Y), m_SpecificInt(HalfMask))))
+    return nullptr;
+
+  // CrossSum = (X' * (Y >> Halfbits)) + (Y' * (X >> HalfBits))
+  // X' can be either X or XLo in the pattern (and the same for Y')
+  if (match(CrossSum,
+            m_c_Add(m_c_Mul(m_LShr(m_Specific(Y), m_SpecificInt(HalfBits)),
+                            m_CombineOr(m_Specific(X), m_Specific(XLo))),
+                    m_c_Mul(m_LShr(m_Specific(X), m_SpecificInt(HalfBits)),
+                            m_CombineOr(m_Specific(Y), m_Specific(YLo))))))
+    return BinaryOperator::CreateMul(X, Y);
+
+  return nullptr;
+}
+
 Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
   if (Value *V = simplifyAddInst(I.getOperand(0), I.getOperand(1),
                                  I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
@@ -1286,6 +1329,9 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
   if (Value *V = SimplifyUsingDistributiveLaws(I))
     return replaceInstUsesWith(I, V);
 
+  if (Instruction *R = foldBoxMultiply(I))
+    return R;
+
   if (Instruction *R = factorizeMathWithShlOps(I, Builder))
     return R;
 

diff  --git a/llvm/test/Transforms/InstCombine/mul_fold.ll b/llvm/test/Transforms/InstCombine/mul_fold.ll
index cd31cc1da15c3..2c3781097f744 100644
--- a/llvm/test/Transforms/InstCombine/mul_fold.ll
+++ b/llvm/test/Transforms/InstCombine/mul_fold.ll
@@ -16,16 +16,7 @@ declare <2 x i8> @use_v2i8(<2 x i8>)
 ;  4 tests that use in0/in1 with 
diff erent commutes
 define i8 @mul8_low_A0_B0(i8 %in0, i8 %in1) {
 ; CHECK-LABEL: @mul8_low_A0_B0(
-; CHECK-NEXT:    [[IN0LO:%.*]] = and i8 [[IN0:%.*]], 15
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i8 [[IN0]], 4
-; CHECK-NEXT:    [[IN1LO:%.*]] = and i8 [[IN1:%.*]], 15
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i8 [[IN1]], 4
-; CHECK-NEXT:    [[M10:%.*]] = mul i8 [[IN1HI]], [[IN0]]
-; CHECK-NEXT:    [[M01:%.*]] = mul i8 [[IN0HI]], [[IN1]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i8 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i8 [[M10]], [[M01]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i8 [[ADDC]], 4
-; CHECK-NEXT:    [[RETLO:%.*]] = add i8 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i8 [[IN0:%.*]], [[IN1:%.*]]
 ; CHECK-NEXT:    ret i8 [[RETLO]]
 ;
   %In0Lo = and i8 %in0, 15
@@ -44,16 +35,7 @@ define i8 @mul8_low_A0_B0(i8 %in0, i8 %in1) {
 define i8 @mul8_low_A0_B1(i8 %p, i8 %in1) {
 ; CHECK-LABEL: @mul8_low_A0_B1(
 ; CHECK-NEXT:    [[IN0:%.*]] = call i8 @use8(i8 [[P:%.*]])
-; CHECK-NEXT:    [[IN0LO:%.*]] = and i8 [[IN0]], 15
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i8 [[IN0]], 4
-; CHECK-NEXT:    [[IN1LO:%.*]] = and i8 [[IN1:%.*]], 15
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i8 [[IN1]], 4
-; CHECK-NEXT:    [[M10:%.*]] = mul i8 [[IN0]], [[IN1HI]]
-; CHECK-NEXT:    [[M01:%.*]] = mul i8 [[IN0HI]], [[IN1]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i8 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i8 [[M10]], [[M01]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i8 [[ADDC]], 4
-; CHECK-NEXT:    [[RETLO:%.*]] = add i8 [[M00]], [[SHL]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i8 [[IN0]], [[IN1:%.*]]
 ; CHECK-NEXT:    ret i8 [[RETLO]]
 ;
   %in0 = call i8 @use8(i8 %p) ; thwart complexity-based canonicalization
@@ -73,16 +55,7 @@ define i8 @mul8_low_A0_B1(i8 %p, i8 %in1) {
 define i8 @mul8_low_A0_B2(i8 %in0, i8 %p) {
 ; CHECK-LABEL: @mul8_low_A0_B2(
 ; CHECK-NEXT:    [[IN1:%.*]] = call i8 @use8(i8 [[P:%.*]])
-; CHECK-NEXT:    [[IN0LO:%.*]] = and i8 [[IN0:%.*]], 15
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i8 [[IN0]], 4
-; CHECK-NEXT:    [[IN1LO:%.*]] = and i8 [[IN1]], 15
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i8 [[IN1]], 4
-; CHECK-NEXT:    [[M10:%.*]] = mul i8 [[IN1HI]], [[IN0]]
-; CHECK-NEXT:    [[M01:%.*]] = mul i8 [[IN1]], [[IN0HI]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i8 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i8 [[M01]], [[M10]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i8 [[ADDC]], 4
-; CHECK-NEXT:    [[RETLO:%.*]] = add i8 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i8 [[IN1]], [[IN0:%.*]]
 ; CHECK-NEXT:    ret i8 [[RETLO]]
 ;
 
@@ -104,16 +77,7 @@ define i8 @mul8_low_A0_B3(i8 %p, i8 %q) {
 ; CHECK-LABEL: @mul8_low_A0_B3(
 ; CHECK-NEXT:    [[IN0:%.*]] = call i8 @use8(i8 [[P:%.*]])
 ; CHECK-NEXT:    [[IN1:%.*]] = call i8 @use8(i8 [[Q:%.*]])
-; CHECK-NEXT:    [[IN0LO:%.*]] = and i8 [[IN0]], 15
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i8 [[IN0]], 4
-; CHECK-NEXT:    [[IN1LO:%.*]] = and i8 [[IN1]], 15
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i8 [[IN1]], 4
-; CHECK-NEXT:    [[M10:%.*]] = mul i8 [[IN0]], [[IN1HI]]
-; CHECK-NEXT:    [[M01:%.*]] = mul i8 [[IN1]], [[IN0HI]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i8 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i8 [[M01]], [[M10]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i8 [[ADDC]], 4
-; CHECK-NEXT:    [[RETLO:%.*]] = add i8 [[M00]], [[SHL]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i8 [[IN0]], [[IN1]]
 ; CHECK-NEXT:    ret i8 [[RETLO]]
 ;
   %in0 = call i8 @use8(i8 %p) ; thwart complexity-based canonicalization
@@ -142,10 +106,7 @@ define i16 @mul16_low_A1_B0(i16 %in0, i16 %in1) {
 ; CHECK-NEXT:    call void @use16(i16 [[M10]])
 ; CHECK-NEXT:    [[M01:%.*]] = mul nuw i16 [[IN1LO]], [[IN0HI]]
 ; CHECK-NEXT:    call void @use16(i16 [[M01]])
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i16 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i16 [[M10]], [[M01]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i16 [[ADDC]], 8
-; CHECK-NEXT:    [[RETLO:%.*]] = add i16 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i16 [[IN0]], [[IN1]]
 ; CHECK-NEXT:    ret i16 [[RETLO]]
 ;
   %In0Lo = and i16 %in0, 255
@@ -173,10 +134,7 @@ define i16 @mul16_low_A1_B1(i16 %in0, i16 %in1) {
 ; CHECK-NEXT:    call void @use16(i16 [[M10]])
 ; CHECK-NEXT:    [[M01:%.*]] = mul nuw i16 [[IN0HI]], [[IN1LO]]
 ; CHECK-NEXT:    call void @use16(i16 [[M01]])
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i16 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i16 [[M10]], [[M01]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i16 [[ADDC]], 8
-; CHECK-NEXT:    [[RETLO:%.*]] = add i16 [[M00]], [[SHL]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i16 [[IN0]], [[IN1]]
 ; CHECK-NEXT:    ret i16 [[RETLO]]
 ;
   %In0Lo = and i16 %in0, 255
@@ -204,10 +162,7 @@ define i16 @mul16_low_A1_B2(i16 %in0, i16 %in1) {
 ; CHECK-NEXT:    call void @use16(i16 [[M10]])
 ; CHECK-NEXT:    [[M01:%.*]] = mul nuw i16 [[IN1LO]], [[IN0HI]]
 ; CHECK-NEXT:    call void @use16(i16 [[M01]])
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i16 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i16 [[M01]], [[M10]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i16 [[ADDC]], 8
-; CHECK-NEXT:    [[RETLO:%.*]] = add i16 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i16 [[IN0]], [[IN1]]
 ; CHECK-NEXT:    ret i16 [[RETLO]]
 ;
   %In0Lo = and i16 %in0, 255
@@ -235,10 +190,7 @@ define i16 @mul16_low_A1_B3(i16 %in0, i16 %in1) {
 ; CHECK-NEXT:    call void @use16(i16 [[M10]])
 ; CHECK-NEXT:    [[M01:%.*]] = mul nuw i16 [[IN1LO]], [[IN0HI]]
 ; CHECK-NEXT:    call void @use16(i16 [[M01]])
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i16 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i16 [[M01]], [[M10]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i16 [[ADDC]], 8
-; CHECK-NEXT:    [[RETLO:%.*]] = add i16 [[M00]], [[SHL]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i16 [[IN0]], [[IN1]]
 ; CHECK-NEXT:    ret i16 [[RETLO]]
 ;
   %In0Lo = and i16 %in0, 255
@@ -260,16 +212,10 @@ define i16 @mul16_low_A1_B3(i16 %in0, i16 %in1) {
 define i32 @mul32_low_A2_B0(i32 %in0, i32 %in1) {
 ; CHECK-LABEL: @mul32_low_A2_B0(
 ; CHECK-NEXT:    [[IN0LO:%.*]] = and i32 [[IN0:%.*]], 65535
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i32 [[IN0]], 16
-; CHECK-NEXT:    [[IN1LO:%.*]] = and i32 [[IN1:%.*]], 65535
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i32 [[IN1]], 16
+; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i32 [[IN1:%.*]], 16
 ; CHECK-NEXT:    [[M10:%.*]] = mul nuw i32 [[IN1HI]], [[IN0LO]]
 ; CHECK-NEXT:    call void @use32(i32 [[M10]])
-; CHECK-NEXT:    [[M01:%.*]] = mul i32 [[IN0HI]], [[IN1]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i32 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i32 [[M10]], [[M01]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[ADDC]], 16
-; CHECK-NEXT:    [[RETLO:%.*]] = add i32 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i32 [[IN0]], [[IN1]]
 ; CHECK-NEXT:    ret i32 [[RETLO]]
 ;
   %In0Lo = and i32 %in0, 65535
@@ -289,16 +235,10 @@ define i32 @mul32_low_A2_B0(i32 %in0, i32 %in1) {
 define i32 @mul32_low_A2_B1(i32 %in0, i32 %in1) {
 ; CHECK-LABEL: @mul32_low_A2_B1(
 ; CHECK-NEXT:    [[IN0LO:%.*]] = and i32 [[IN0:%.*]], 65535
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i32 [[IN0]], 16
-; CHECK-NEXT:    [[IN1LO:%.*]] = and i32 [[IN1:%.*]], 65535
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i32 [[IN1]], 16
+; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i32 [[IN1:%.*]], 16
 ; CHECK-NEXT:    [[M10:%.*]] = mul nuw i32 [[IN1HI]], [[IN0LO]]
 ; CHECK-NEXT:    call void @use32(i32 [[M10]])
-; CHECK-NEXT:    [[M01:%.*]] = mul i32 [[IN0HI]], [[IN1]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i32 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i32 [[M10]], [[M01]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[ADDC]], 16
-; CHECK-NEXT:    [[RETLO:%.*]] = add i32 [[M00]], [[SHL]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i32 [[IN0]], [[IN1]]
 ; CHECK-NEXT:    ret i32 [[RETLO]]
 ;
   %In0Lo = and i32 %in0, 65535
@@ -319,16 +259,10 @@ define i32 @mul32_low_A2_B2(i32 %in0, i32 %p) {
 ; CHECK-LABEL: @mul32_low_A2_B2(
 ; CHECK-NEXT:    [[IN1:%.*]] = call i32 @use32(i32 [[P:%.*]])
 ; CHECK-NEXT:    [[IN0LO:%.*]] = and i32 [[IN0:%.*]], 65535
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i32 [[IN0]], 16
-; CHECK-NEXT:    [[IN1LO:%.*]] = and i32 [[IN1]], 65535
 ; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i32 [[IN1]], 16
 ; CHECK-NEXT:    [[M10:%.*]] = mul nuw i32 [[IN0LO]], [[IN1HI]]
 ; CHECK-NEXT:    call void @use32(i32 [[M10]])
-; CHECK-NEXT:    [[M01:%.*]] = mul i32 [[IN1]], [[IN0HI]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i32 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i32 [[M01]], [[M10]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[ADDC]], 16
-; CHECK-NEXT:    [[RETLO:%.*]] = add i32 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i32 [[IN1]], [[IN0]]
 ; CHECK-NEXT:    ret i32 [[RETLO]]
 ;
   %in1 = call i32 @use32(i32 %p) ; thwart complexity-based canonicalization
@@ -350,16 +284,10 @@ define i32 @mul32_low_A2_B3(i32 %in0, i32 %p) {
 ; CHECK-LABEL: @mul32_low_A2_B3(
 ; CHECK-NEXT:    [[IN1:%.*]] = call i32 @use32(i32 [[P:%.*]])
 ; CHECK-NEXT:    [[IN0LO:%.*]] = and i32 [[IN0:%.*]], 65535
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i32 [[IN0]], 16
-; CHECK-NEXT:    [[IN1LO:%.*]] = and i32 [[IN1]], 65535
 ; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i32 [[IN1]], 16
 ; CHECK-NEXT:    [[M10:%.*]] = mul nuw i32 [[IN1HI]], [[IN0LO]]
 ; CHECK-NEXT:    call void @use32(i32 [[M10]])
-; CHECK-NEXT:    [[M01:%.*]] = mul i32 [[IN1]], [[IN0HI]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i32 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i32 [[M01]], [[M10]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[ADDC]], 16
-; CHECK-NEXT:    [[RETLO:%.*]] = add i32 [[M00]], [[SHL]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i32 [[IN1]], [[IN0]]
 ; CHECK-NEXT:    ret i32 [[RETLO]]
 ;
   %in1 = call i32 @use32(i32 %p) ; thwart complexity-based canonicalization
@@ -380,17 +308,11 @@ define i32 @mul32_low_A2_B3(i32 %in0, i32 %p) {
 ; 4 tests that use in0/In1Lo with 
diff erent commutes
 define i64 @mul64_low_A3_B0(i64 %in0, i64 %in1) {
 ; CHECK-LABEL: @mul64_low_A3_B0(
-; CHECK-NEXT:    [[IN0LO:%.*]] = and i64 [[IN0:%.*]], 4294967295
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i64 [[IN0]], 32
+; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i64 [[IN0:%.*]], 32
 ; CHECK-NEXT:    [[IN1LO:%.*]] = and i64 [[IN1:%.*]], 4294967295
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i64 [[IN1]], 32
-; CHECK-NEXT:    [[M10:%.*]] = mul i64 [[IN1HI]], [[IN0]]
 ; CHECK-NEXT:    [[M01:%.*]] = mul nuw i64 [[IN0HI]], [[IN1LO]]
 ; CHECK-NEXT:    call void @use64(i64 [[M01]])
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i64 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i64 [[M10]], [[M01]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i64 [[ADDC]], 32
-; CHECK-NEXT:    [[RETLO:%.*]] = add i64 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i64 [[IN0]], [[IN1]]
 ; CHECK-NEXT:    ret i64 [[RETLO]]
 ;
   %In0Lo = and i64 %in0, 4294967295
@@ -409,17 +331,11 @@ define i64 @mul64_low_A3_B0(i64 %in0, i64 %in1) {
 
 define i64 @mul64_low_A3_B1(i64 %in0, i64 %in1) {
 ; CHECK-LABEL: @mul64_low_A3_B1(
-; CHECK-NEXT:    [[IN0LO:%.*]] = and i64 [[IN0:%.*]], 4294967295
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i64 [[IN0]], 32
+; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i64 [[IN0:%.*]], 32
 ; CHECK-NEXT:    [[IN1LO:%.*]] = and i64 [[IN1:%.*]], 4294967295
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i64 [[IN1]], 32
-; CHECK-NEXT:    [[M10:%.*]] = mul i64 [[IN1HI]], [[IN0]]
 ; CHECK-NEXT:    [[M01:%.*]] = mul nuw i64 [[IN0HI]], [[IN1LO]]
 ; CHECK-NEXT:    call void @use64(i64 [[M01]])
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i64 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i64 [[M10]], [[M01]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i64 [[ADDC]], 32
-; CHECK-NEXT:    [[RETLO:%.*]] = add i64 [[M00]], [[SHL]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i64 [[IN0]], [[IN1]]
 ; CHECK-NEXT:    ret i64 [[RETLO]]
 ;
   %In0Lo = and i64 %in0, 4294967295
@@ -439,17 +355,11 @@ define i64 @mul64_low_A3_B1(i64 %in0, i64 %in1) {
 define i64 @mul64_low_A3_B2(i64 %p, i64 %in1) {
 ; CHECK-LABEL: @mul64_low_A3_B2(
 ; CHECK-NEXT:    [[IN0:%.*]] = call i64 @use64(i64 [[P:%.*]])
-; CHECK-NEXT:    [[IN0LO:%.*]] = and i64 [[IN0]], 4294967295
 ; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i64 [[IN0]], 32
 ; CHECK-NEXT:    [[IN1LO:%.*]] = and i64 [[IN1:%.*]], 4294967295
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i64 [[IN1]], 32
-; CHECK-NEXT:    [[M10:%.*]] = mul i64 [[IN0]], [[IN1HI]]
 ; CHECK-NEXT:    [[M01:%.*]] = mul nuw i64 [[IN0HI]], [[IN1LO]]
 ; CHECK-NEXT:    call void @use64(i64 [[M01]])
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i64 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i64 [[M01]], [[M10]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i64 [[ADDC]], 32
-; CHECK-NEXT:    [[RETLO:%.*]] = add i64 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i64 [[IN0]], [[IN1]]
 ; CHECK-NEXT:    ret i64 [[RETLO]]
 ;
   %in0 = call i64 @use64(i64 %p) ; thwart complexity-based canonicalization
@@ -470,17 +380,11 @@ define i64 @mul64_low_A3_B2(i64 %p, i64 %in1) {
 define i64 @mul64_low_A3_B3(i64 %p, i64 %in1) {
 ; CHECK-LABEL: @mul64_low_A3_B3(
 ; CHECK-NEXT:    [[IN0:%.*]] = call i64 @use64(i64 [[P:%.*]])
-; CHECK-NEXT:    [[IN0LO:%.*]] = and i64 [[IN0]], 4294967295
 ; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i64 [[IN0]], 32
 ; CHECK-NEXT:    [[IN1LO:%.*]] = and i64 [[IN1:%.*]], 4294967295
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i64 [[IN1]], 32
-; CHECK-NEXT:    [[M10:%.*]] = mul i64 [[IN0]], [[IN1HI]]
 ; CHECK-NEXT:    [[M01:%.*]] = mul nuw i64 [[IN1LO]], [[IN0HI]]
 ; CHECK-NEXT:    call void @use64(i64 [[M01]])
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i64 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i64 [[M01]], [[M10]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i64 [[ADDC]], 32
-; CHECK-NEXT:    [[RETLO:%.*]] = add i64 [[M00]], [[SHL]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i64 [[IN0]], [[IN1]]
 ; CHECK-NEXT:    ret i64 [[RETLO]]
 ;
   %in0 = call i64 @use64(i64 %p) ; thwart complexity-based canonicalization
@@ -506,11 +410,9 @@ define i32 @mul32_low_one_extra_user(i32 %in0, i32 %in1) {
 ; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i32 [[IN1]], 16
 ; CHECK-NEXT:    [[M10:%.*]] = mul nuw i32 [[IN1HI]], [[IN0LO]]
 ; CHECK-NEXT:    [[M01:%.*]] = mul nuw i32 [[IN1LO]], [[IN0HI]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i32 [[IN1LO]], [[IN0LO]]
 ; CHECK-NEXT:    [[ADDC:%.*]] = add i32 [[M10]], [[M01]]
 ; CHECK-NEXT:    call void @use32(i32 [[ADDC]])
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[ADDC]], 16
-; CHECK-NEXT:    [[RETLO:%.*]] = add i32 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i32 [[IN0]], [[IN1]]
 ; CHECK-NEXT:    ret i32 [[RETLO]]
 ;
   %In0Lo = and i32 %in0, 65535
@@ -531,16 +433,7 @@ define i32 @mul32_low_one_extra_user(i32 %in0, i32 %in1) {
 ; https://alive2.llvm.org/ce/z/2BqKLt
 define i8 @mul8_low(i8 %in0, i8 %in1) {
 ; CHECK-LABEL: @mul8_low(
-; CHECK-NEXT:    [[IN0LO:%.*]] = and i8 [[IN0:%.*]], 15
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i8 [[IN0]], 4
-; CHECK-NEXT:    [[IN1LO:%.*]] = and i8 [[IN1:%.*]], 15
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i8 [[IN1]], 4
-; CHECK-NEXT:    [[M10:%.*]] = mul i8 [[IN1HI]], [[IN0]]
-; CHECK-NEXT:    [[M01:%.*]] = mul i8 [[IN0HI]], [[IN1]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i8 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i8 [[M10]], [[M01]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i8 [[ADDC]], 4
-; CHECK-NEXT:    [[RETLO:%.*]] = add i8 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i8 [[IN0:%.*]], [[IN1:%.*]]
 ; CHECK-NEXT:    ret i8 [[RETLO]]
 ;
   %In0Lo = and i8 %in0, 15
@@ -558,16 +451,7 @@ define i8 @mul8_low(i8 %in0, i8 %in1) {
 
 define i16 @mul16_low(i16 %in0, i16 %in1) {
 ; CHECK-LABEL: @mul16_low(
-; CHECK-NEXT:    [[IN0LO:%.*]] = and i16 [[IN0:%.*]], 255
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i16 [[IN0]], 8
-; CHECK-NEXT:    [[IN1LO:%.*]] = and i16 [[IN1:%.*]], 255
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i16 [[IN1]], 8
-; CHECK-NEXT:    [[M10:%.*]] = mul i16 [[IN1HI]], [[IN0]]
-; CHECK-NEXT:    [[M01:%.*]] = mul i16 [[IN0HI]], [[IN1]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i16 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i16 [[M10]], [[M01]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i16 [[ADDC]], 8
-; CHECK-NEXT:    [[RETLO:%.*]] = add i16 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i16 [[IN0:%.*]], [[IN1:%.*]]
 ; CHECK-NEXT:    ret i16 [[RETLO]]
 ;
   %In0Lo = and i16 %in0, 255
@@ -585,16 +469,7 @@ define i16 @mul16_low(i16 %in0, i16 %in1) {
 
 define i32 @mul32_low(i32 %in0, i32 %in1) {
 ; CHECK-LABEL: @mul32_low(
-; CHECK-NEXT:    [[IN0LO:%.*]] = and i32 [[IN0:%.*]], 65535
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i32 [[IN0]], 16
-; CHECK-NEXT:    [[IN1LO:%.*]] = and i32 [[IN1:%.*]], 65535
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i32 [[IN1]], 16
-; CHECK-NEXT:    [[M10:%.*]] = mul i32 [[IN1HI]], [[IN0]]
-; CHECK-NEXT:    [[M01:%.*]] = mul i32 [[IN0HI]], [[IN1]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i32 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i32 [[M10]], [[M01]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[ADDC]], 16
-; CHECK-NEXT:    [[RETLO:%.*]] = add i32 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i32 [[IN0:%.*]], [[IN1:%.*]]
 ; CHECK-NEXT:    ret i32 [[RETLO]]
 ;
   %In0Lo = and i32 %in0, 65535
@@ -612,16 +487,7 @@ define i32 @mul32_low(i32 %in0, i32 %in1) {
 
 define i64 @mul64_low(i64 %in0, i64 %in1) {
 ; CHECK-LABEL: @mul64_low(
-; CHECK-NEXT:    [[IN0LO:%.*]] = and i64 [[IN0:%.*]], 4294967295
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i64 [[IN0]], 32
-; CHECK-NEXT:    [[IN1LO:%.*]] = and i64 [[IN1:%.*]], 4294967295
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i64 [[IN1]], 32
-; CHECK-NEXT:    [[M10:%.*]] = mul i64 [[IN1HI]], [[IN0]]
-; CHECK-NEXT:    [[M01:%.*]] = mul i64 [[IN0HI]], [[IN1]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i64 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i64 [[M10]], [[M01]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i64 [[ADDC]], 32
-; CHECK-NEXT:    [[RETLO:%.*]] = add i64 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i64 [[IN0:%.*]], [[IN1:%.*]]
 ; CHECK-NEXT:    ret i64 [[RETLO]]
 ;
   %In0Lo = and i64 %in0, 4294967295
@@ -639,16 +505,7 @@ define i64 @mul64_low(i64 %in0, i64 %in1) {
 
 define i128 @mul128_low(i128 %in0, i128 %in1) {
 ; CHECK-LABEL: @mul128_low(
-; CHECK-NEXT:    [[IN0LO:%.*]] = and i128 [[IN0:%.*]], 18446744073709551615
-; CHECK-NEXT:    [[IN0HI:%.*]] = lshr i128 [[IN0]], 64
-; CHECK-NEXT:    [[IN1LO:%.*]] = and i128 [[IN1:%.*]], 18446744073709551615
-; CHECK-NEXT:    [[IN1HI:%.*]] = lshr i128 [[IN1]], 64
-; CHECK-NEXT:    [[M10:%.*]] = mul i128 [[IN1HI]], [[IN0]]
-; CHECK-NEXT:    [[M01:%.*]] = mul i128 [[IN0HI]], [[IN1]]
-; CHECK-NEXT:    [[M00:%.*]] = mul nuw i128 [[IN1LO]], [[IN0LO]]
-; CHECK-NEXT:    [[ADDC:%.*]] = add i128 [[M10]], [[M01]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i128 [[ADDC]], 64
-; CHECK-NEXT:    [[RETLO:%.*]] = add i128 [[SHL]], [[M00]]
+; CHECK-NEXT:    [[RETLO:%.*]] = mul i128 [[IN0:%.*]], [[IN1:%.*]]
 ; CHECK-NEXT:    ret i128 [[RETLO]]
 ;
   %In0Lo = and i128 %in0, 18446744073709551615

diff  --git a/llvm/test/Transforms/InstCombine/mul_full_64.ll b/llvm/test/Transforms/InstCombine/mul_full_64.ll
index fa58271bae10e..eb54ba41e3470 100644
--- a/llvm/test/Transforms/InstCombine/mul_full_64.ll
+++ b/llvm/test/Transforms/InstCombine/mul_full_64.ll
@@ -196,9 +196,7 @@ define i64 @mul_full_64_variant3(i64 %a, i64 %b, i64* nocapture %rhi) {
 ; CHECK-NEXT:    [[SHR_I:%.*]] = lshr i64 [[ADD15]], 32
 ; CHECK-NEXT:    [[ADD17:%.*]] = add i64 [[ADD10]], [[SHR_I]]
 ; CHECK-NEXT:    store i64 [[ADD17]], i64* [[RHI:%.*]], align 8
-; CHECK-NEXT:    [[ADD18:%.*]] = add i64 [[MUL6]], [[MUL5]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i64 [[ADD18]], 32
-; CHECK-NEXT:    [[ADD19:%.*]] = add i64 [[SHL]], [[MUL7]]
+; CHECK-NEXT:    [[ADD19:%.*]] = mul i64 [[A]], [[B]]
 ; CHECK-NEXT:    ret i64 [[ADD19]]
 ;
   %conv = and i64 %a, 4294967295
@@ -493,16 +491,7 @@ define i64 @mullo(i64 %x, i64 %y) {
 
 define i64 @mullo_variant3(i64 %a, i64 %b) {
 ; CHECK-LABEL: @mullo_variant3(
-; CHECK-NEXT:    [[AL:%.*]] = and i64 [[A:%.*]], 4294967295
-; CHECK-NEXT:    [[AH:%.*]] = lshr i64 [[A]], 32
-; CHECK-NEXT:    [[BL:%.*]] = and i64 [[B:%.*]], 4294967295
-; CHECK-NEXT:    [[BH:%.*]] = lshr i64 [[B]], 32
-; CHECK-NEXT:    [[T0:%.*]] = mul nuw i64 [[BL]], [[AL]]
-; CHECK-NEXT:    [[T1:%.*]] = mul i64 [[AH]], [[B]]
-; CHECK-NEXT:    [[T2:%.*]] = mul i64 [[BH]], [[A]]
-; CHECK-NEXT:    [[U1:%.*]] = add i64 [[T2]], [[T1]]
-; CHECK-NEXT:    [[U1LS:%.*]] = shl i64 [[U1]], 32
-; CHECK-NEXT:    [[LO:%.*]] = add i64 [[U1LS]], [[T0]]
+; CHECK-NEXT:    [[LO:%.*]] = mul i64 [[A:%.*]], [[B:%.*]]
 ; CHECK-NEXT:    ret i64 [[LO]]
 ;
   %al = and i64 %a, 4294967295


        


More information about the llvm-commits mailing list