[llvm] [SelectionDAG] Emit a more efficient magic number multiplication for exact udivs (PR #87161)

via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 22 09:04:05 PDT 2024


https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/87161

>From e75f89cefa245a1eaacb77ad4d490a0178997ab0 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Sat, 30 Mar 2024 12:17:06 -0400
Subject: [PATCH 1/2] [X86] Pre-commit tests (NFC)

---
 llvm/test/CodeGen/X86/udiv-exact.ll | 225 ++++++++++++++++++++++++++++
 1 file changed, 225 insertions(+)
 create mode 100644 llvm/test/CodeGen/X86/udiv-exact.ll

diff --git a/llvm/test/CodeGen/X86/udiv-exact.ll b/llvm/test/CodeGen/X86/udiv-exact.ll
new file mode 100644
index 00000000000000..0a835e07107888
--- /dev/null
+++ b/llvm/test/CodeGen/X86/udiv-exact.ll
@@ -0,0 +1,225 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
+
+define i32 @test1(i32 %x) {
+; X86-LABEL: test1:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $1374389535, %eax # imm = 0x51EB851F
+; X86-NEXT:    mull {{[0-9]+}}(%esp)
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    shrl $3, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: test1:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    imulq $1374389535, %rax, %rax # imm = 0x51EB851F
+; X64-NEXT:    shrq $35, %rax
+; X64-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NEXT:    retq
+  %div = udiv exact i32 %x, 25
+  ret i32 %div
+}
+
+define i32 @test2(i32 %x) {
+; X86-LABEL: test2:
+; X86:       # %bb.0:
+; X86-NEXT:    movl $-1431655765, %eax # imm = 0xAAAAAAAB
+; X86-NEXT:    mull {{[0-9]+}}(%esp)
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    shrl $4, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: test2:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %ecx
+; X64-NEXT:    movl $2863311531, %eax # imm = 0xAAAAAAAB
+; X64-NEXT:    imulq %rcx, %rax
+; X64-NEXT:    shrq $36, %rax
+; X64-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NEXT:    retq
+  %div = udiv exact i32 %x, 24
+  ret i32 %div
+}
+
+define <4 x i32> @test3(<4 x i32> %x) {
+; X86-LABEL: test3:
+; X86:       # %bb.0:
+; X86-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq %xmm1, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; X86-NEXT:    pmuludq %xmm1, %xmm2
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    psrld $4, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: test3:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
+; X64-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
+; X64-NEXT:    vpmuludq %xmm2, %xmm0, %xmm0
+; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; X64-NEXT:    vpsrld $4, %xmm0, %xmm0
+; X64-NEXT:    retq
+  %div = udiv exact <4 x i32> %x, <i32 24, i32 24, i32 24, i32 24>
+  ret <4 x i32> %div
+}
+
+define <4 x i32> @test4(<4 x i32> %x) {
+; X86-LABEL: test4:
+; X86:       # %bb.0:
+; X86-NEXT:    movdqa {{.*#+}} xmm1 = [1374389535,1374389535,1374389535,1374389535]
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq %xmm1, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; X86-NEXT:    pmuludq %xmm1, %xmm2
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    psrld $3, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: test4:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
+; X64-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
+; X64-NEXT:    vpmuludq %xmm2, %xmm0, %xmm0
+; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; X64-NEXT:    vpsrld $3, %xmm0, %xmm0
+; X64-NEXT:    retq
+  %div = udiv exact <4 x i32> %x, <i32 25, i32 25, i32 25, i32 25>
+  ret <4 x i32> %div
+}
+
+define <4 x i32> @test5(<4 x i32> %x) {
+; X86-LABEL: test5:
+; X86:       # %bb.0:
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrld $4, %xmm1
+; X86-NEXT:    psrld $3, %xmm0
+; X86-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X86-NEXT:    retl
+;
+; X64-LABEL: test5:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    retq
+  %div = udiv exact <4 x i32> %x, <i32 24, i32 24, i32 25, i32 25>
+  ret <4 x i32> %div
+}
+
+define <4 x i32> @test6(<4 x i32> %x) {
+; X86-LABEL: test6:
+; X86:       # %bb.0:
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrld $4, %xmm1
+; X86-NEXT:    psrld $3, %xmm0
+; X86-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X86-NEXT:    retl
+;
+; X64-LABEL: test6:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    retq
+  %div = udiv exact <4 x i32> %x, <i32 24, i32 24, i32 26, i32 26>
+  ret <4 x i32> %div
+}
+
+define <4 x i32> @test7(<4 x i32> %x) {
+; X86-LABEL: test7:
+; X86:       # %bb.0:
+; X86-NEXT:    movdqa {{.*#+}} xmm1 = [1374389535,1374389535,795364315,795364315]
+; X86-NEXT:    pmuludq %xmm0, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X86-NEXT:    psubd %xmm1, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-NEXT:    paddd %xmm1, %xmm0
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrld $3, %xmm1
+; X86-NEXT:    psrld $4, %xmm0
+; X86-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X86-NEXT:    retl
+;
+; X64-LABEL: test7:
+; X64:       # %bb.0:
+; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
+; X64-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; X64-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
+; X64-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; X64-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
+; X64-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    retq
+  %div = udiv exact <4 x i32> %x, <i32 25, i32 25, i32 27, i32 27>
+  ret <4 x i32> %div
+}
+
+define <4 x i32> @test8(<4 x i32> %x) {
+; X86-LABEL: test8:
+; X86:       # %bb.0:
+; X86-NEXT:    movdqa {{.*#+}} xmm1 = [u,u,2863311531,2863311531]
+; X86-NEXT:    movdqa %xmm0, %xmm2
+; X86-NEXT:    pmuludq %xmm1, %xmm2
+; X86-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq %xmm1, %xmm3
+; X86-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; X86-NEXT:    psrld $4, %xmm2
+; X86-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
+; X86-NEXT:    retl
+;
+; X64-LABEL: test8:
+; X64:       # %bb.0:
+; X64-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
+; X64-NEXT:    vpmuludq %xmm1, %xmm0, %xmm2
+; X64-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; X64-NEXT:    vpmuludq %xmm1, %xmm3, %xmm1
+; X64-NEXT:    vpunpckhdq {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; X64-NEXT:    vpsrld $4, %xmm1, %xmm1
+; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; X64-NEXT:    retq
+  %div = udiv exact <4 x i32> %x, <i32 1, i32 1, i32 24, i32 24>
+  ret <4 x i32> %div
+}

>From 29874459cf4fbb78d620b08895123bf1f2ed78d0 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Sat, 30 Mar 2024 12:24:46 -0400
Subject: [PATCH 2/2] [SelectionDAG] Emit a more efficient magic number
 multiplication for exact udivs.

---
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp |  10 +-
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |  69 +++++++-
 llvm/test/CodeGen/X86/udiv-exact.ll           | 166 ++++++------------
 3 files changed, 127 insertions(+), 118 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index c5ee354f13b7b4..cf0fe7d5eea6a3 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -5087,7 +5087,7 @@ MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) {
     // TODO: Use undef values for divisor of 1.
     if (!Divisor.isOne()) {
       UnsignedDivisionByConstantInfo magics =
-          UnsignedDivisionByConstantInfo::get(Divisor);
+          UnsignedDivisionByConstantInfo::get(Divisor, Divisor.countl_zero(), true);
 
       Magic = std::move(magics.Magic);
 
@@ -5197,12 +5197,8 @@ bool CombinerHelper::matchUDivByConst(MachineInstr &MI) {
       return false;
   }
 
-  auto CheckEltValue = [&](const Constant *C) {
-    if (auto *CI = dyn_cast_or_null<ConstantInt>(C))
-      return !CI->isZero();
-    return false;
-  };
-  return matchUnaryPredicate(MRI, RHS, CheckEltValue);
+  return matchUnaryPredicate(
+      MRI, RHS, [](const Constant *C) { return C && !C->isZeroValue(); });
 }
 
 void CombinerHelper::applyUDivByConst(MachineInstr &MI) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index c938b3996be393..ee78dd726c9765 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -6116,7 +6116,6 @@ static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N,
 
   // Shift the value upfront if it is even, so the LSB is one.
   if (UseSRA) {
-    // TODO: For UDIV use SRL instead of SRA.
     SDNodeFlags Flags;
     Flags.setExact(true);
     Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags);
@@ -6126,6 +6125,70 @@ static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N,
   return DAG.getNode(ISD::MUL, dl, VT, Res, Factor);
 }
 
+/// Given an exact UDIV by a constant, create a multiplication
+/// with the multiplicative inverse of the constant.
+static SDValue BuildExactUDIV(const TargetLowering &TLI, SDNode *N,
+                              const SDLoc &dl, SelectionDAG &DAG,
+                              SmallVectorImpl<SDNode *> &Created) {
+  SDValue Op0 = N->getOperand(0);
+  SDValue Op1 = N->getOperand(1);
+  EVT VT = N->getValueType(0);
+  EVT SVT = VT.getScalarType();
+  EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
+  EVT ShSVT = ShVT.getScalarType();
+
+  bool UseSRL = false;
+  SmallVector<SDValue, 16> Shifts, Factors;
+
+  auto BuildUDIVPattern = [&](ConstantSDNode *C) {
+    if (C->isZero())
+      return false;
+    APInt Divisor = C->getAPIntValue();
+    unsigned Shift = Divisor.countr_zero();
+    if (Shift) {
+      Divisor.lshrInPlace(Shift);
+      UseSRL = true;
+    }
+    // Calculate the multiplicative inverse modulo BW.
+    APInt Factor = Divisor.multiplicativeInverse();
+    Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT));
+    Factors.push_back(DAG.getConstant(Factor, dl, SVT));
+    return true;
+  };
+
+  // Collect all magic values from the build vector.
+  if (!ISD::matchUnaryPredicate(Op1, BuildUDIVPattern))
+    return SDValue();
+
+  SDValue Shift, Factor;
+  if (Op1.getOpcode() == ISD::BUILD_VECTOR) {
+    Shift = DAG.getBuildVector(ShVT, dl, Shifts);
+    Factor = DAG.getBuildVector(VT, dl, Factors);
+  } else if (Op1.getOpcode() == ISD::SPLAT_VECTOR) {
+    assert(Shifts.size() == 1 && Factors.size() == 1 &&
+           "Expected matchUnaryPredicate to return one element for scalable "
+           "vectors");
+    Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]);
+    Factor = DAG.getSplatVector(VT, dl, Factors[0]);
+  } else {
+    assert(isa<ConstantSDNode>(Op1) && "Expected a constant");
+    Shift = Shifts[0];
+    Factor = Factors[0];
+  }
+
+  SDValue Res = Op0;
+
+  // Shift the value upfront if it is even, so the LSB is one.
+  if (UseSRL) {
+    SDNodeFlags Flags;
+    Flags.setExact(true);
+    Res = DAG.getNode(ISD::SRL, dl, VT, Res, Shift, Flags);
+    Created.push_back(Res.getNode());
+  }
+
+  return DAG.getNode(ISD::MUL, dl, VT, Res, Factor);
+}
+
 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
                               SelectionDAG &DAG,
                               SmallVectorImpl<SDNode *> &Created) const {
@@ -6385,6 +6448,10 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
       return SDValue();
   }
 
+  // If the udiv has an 'exact' bit we can use a simpler lowering.
+  if (N->getFlags().hasExact())
+    return BuildExactUDIV(*this, N, dl, DAG, Created);
+
   SDValue N0 = N->getOperand(0);
   SDValue N1 = N->getOperand(1);
 
diff --git a/llvm/test/CodeGen/X86/udiv-exact.ll b/llvm/test/CodeGen/X86/udiv-exact.ll
index 0a835e07107888..271d11edff9a76 100644
--- a/llvm/test/CodeGen/X86/udiv-exact.ll
+++ b/llvm/test/CodeGen/X86/udiv-exact.ll
@@ -5,18 +5,12 @@
 define i32 @test1(i32 %x) {
 ; X86-LABEL: test1:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl $1374389535, %eax # imm = 0x51EB851F
-; X86-NEXT:    mull {{[0-9]+}}(%esp)
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    shrl $3, %eax
+; X86-NEXT:    imull $-1030792151, {{[0-9]+}}(%esp), %eax # imm = 0xC28F5C29
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test1:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %eax
-; X64-NEXT:    imulq $1374389535, %rax, %rax # imm = 0x51EB851F
-; X64-NEXT:    shrq $35, %rax
-; X64-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NEXT:    imull $-1030792151, %edi, %eax # imm = 0xC28F5C29
 ; X64-NEXT:    retq
   %div = udiv exact i32 %x, 25
   ret i32 %div
@@ -25,19 +19,15 @@ define i32 @test1(i32 %x) {
 define i32 @test2(i32 %x) {
 ; X86-LABEL: test2:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl $-1431655765, %eax # imm = 0xAAAAAAAB
-; X86-NEXT:    mull {{[0-9]+}}(%esp)
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    shrl $4, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shrl $3, %eax
+; X86-NEXT:    imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test2:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl %edi, %ecx
-; X64-NEXT:    movl $2863311531, %eax # imm = 0xAAAAAAAB
-; X64-NEXT:    imulq %rcx, %rax
-; X64-NEXT:    shrq $36, %rax
-; X64-NEXT:    # kill: def $eax killed $eax killed $rax
+; X64-NEXT:    shrl $3, %edi
+; X64-NEXT:    imull $-1431655765, %edi, %eax # imm = 0xAAAAAAAB
 ; X64-NEXT:    retq
   %div = udiv exact i32 %x, 24
   ret i32 %div
@@ -46,25 +36,21 @@ define i32 @test2(i32 %x) {
 define <4 x i32> @test3(<4 x i32> %x) {
 ; X86-LABEL: test3:
 ; X86:       # %bb.0:
+; X86-NEXT:    psrld $3, %xmm0
 ; X86-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
 ; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; X86-NEXT:    pmuludq %xmm1, %xmm0
-; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-NEXT:    pmuludq %xmm1, %xmm2
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
 ; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-NEXT:    psrld $4, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test3:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X64-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2863311531,2863311531,2863311531,2863311531]
-; X64-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
-; X64-NEXT:    vpmuludq %xmm2, %xmm0, %xmm0
-; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; X64-NEXT:    vpsrld $4, %xmm0, %xmm0
+; X64-NEXT:    vpsrld $3, %xmm0, %xmm0
+; X64-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
+; X64-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %div = udiv exact <4 x i32> %x, <i32 24, i32 24, i32 24, i32 24>
   ret <4 x i32> %div
@@ -73,25 +59,19 @@ define <4 x i32> @test3(<4 x i32> %x) {
 define <4 x i32> @test4(<4 x i32> %x) {
 ; X86-LABEL: test4:
 ; X86:       # %bb.0:
-; X86-NEXT:    movdqa {{.*#+}} xmm1 = [1374389535,1374389535,1374389535,1374389535]
+; X86-NEXT:    movdqa {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
 ; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; X86-NEXT:    pmuludq %xmm1, %xmm0
-; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; X86-NEXT:    pmuludq %xmm1, %xmm2
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
 ; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-NEXT:    psrld $3, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test4:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X64-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
-; X64-NEXT:    vpmuludq %xmm2, %xmm1, %xmm1
-; X64-NEXT:    vpmuludq %xmm2, %xmm0, %xmm0
-; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; X64-NEXT:    vpsrld $3, %xmm0, %xmm0
+; X64-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
+; X64-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    retq
   %div = udiv exact <4 x i32> %x, <i32 25, i32 25, i32 25, i32 25>
   ret <4 x i32> %div
@@ -100,26 +80,22 @@ define <4 x i32> @test4(<4 x i32> %x) {
 define <4 x i32> @test5(<4 x i32> %x) {
 ; X86-LABEL: test5:
 ; X86:       # %bb.0:
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrld $3, %xmm1
+; X86-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3]
+; X86-NEXT:    movdqa {{.*#+}} xmm0 = [2863311531,2863311531,3264175145,3264175145]
+; X86-NEXT:    pmuludq %xmm1, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,3,3]
 ; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
 ; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-NEXT:    movdqa %xmm0, %xmm1
-; X86-NEXT:    psrld $4, %xmm1
-; X86-NEXT:    psrld $3, %xmm0
-; X86-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test5:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %div = udiv exact <4 x i32> %x, <i32 24, i32 24, i32 25, i32 25>
   ret <4 x i32> %div
@@ -128,26 +104,24 @@ define <4 x i32> @test5(<4 x i32> %x) {
 define <4 x i32> @test6(<4 x i32> %x) {
 ; X86-LABEL: test6:
 ; X86:       # %bb.0:
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; X86-NEXT:    movdqa %xmm0, %xmm1
-; X86-NEXT:    psrld $4, %xmm1
-; X86-NEXT:    psrld $3, %xmm0
+; X86-NEXT:    psrld $3, %xmm1
+; X86-NEXT:    psrld $1, %xmm0
 ; X86-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X86-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,2863311531,3303820997,3303820997]
+; X86-NEXT:    pmuludq %xmm0, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-NEXT:    movdqa %xmm1, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test6:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
 ; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %div = udiv exact <4 x i32> %x, <i32 24, i32 24, i32 26, i32 26>
   ret <4 x i32> %div
@@ -156,42 +130,17 @@ define <4 x i32> @test6(<4 x i32> %x) {
 define <4 x i32> @test7(<4 x i32> %x) {
 ; X86-LABEL: test7:
 ; X86:       # %bb.0:
-; X86-NEXT:    movdqa {{.*#+}} xmm1 = [1374389535,1374389535,795364315,795364315]
-; X86-NEXT:    pmuludq %xmm0, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
-; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; X86-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X86-NEXT:    psubd %xmm1, %xmm0
-; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
-; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X86-NEXT:    paddd %xmm1, %xmm0
-; X86-NEXT:    movdqa %xmm0, %xmm1
-; X86-NEXT:    psrld $3, %xmm1
-; X86-NEXT:    psrld $4, %xmm0
-; X86-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test7:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
-; X64-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; X64-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; X64-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; X64-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; X64-NEXT:    vpmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
-; X64-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %div = udiv exact <4 x i32> %x, <i32 25, i32 25, i32 27, i32 27>
   ret <4 x i32> %div
@@ -200,25 +149,22 @@ define <4 x i32> @test7(<4 x i32> %x) {
 define <4 x i32> @test8(<4 x i32> %x) {
 ; X86-LABEL: test8:
 ; X86:       # %bb.0:
-; X86-NEXT:    movdqa {{.*#+}} xmm1 = [u,u,2863311531,2863311531]
-; X86-NEXT:    movdqa %xmm0, %xmm2
-; X86-NEXT:    pmuludq %xmm1, %xmm2
-; X86-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; X86-NEXT:    pmuludq %xmm1, %xmm3
-; X86-NEXT:    punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; X86-NEXT:    psrld $4, %xmm2
-; X86-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrld $3, %xmm1
+; X86-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; X86-NEXT:    movdqa {{.*#+}} xmm0 = [1,1,2863311531,2863311531]
+; X86-NEXT:    pmuludq %xmm1, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test8:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
-; X64-NEXT:    vpmuludq %xmm1, %xmm0, %xmm2
-; X64-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; X64-NEXT:    vpmuludq %xmm1, %xmm3, %xmm1
-; X64-NEXT:    vpunpckhdq {{.*#+}} xmm1 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; X64-NEXT:    vpsrld $4, %xmm1, %xmm1
-; X64-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %div = udiv exact <4 x i32> %x, <i32 1, i32 1, i32 24, i32 24>
   ret <4 x i32> %div



More information about the llvm-commits mailing list