[llvm] r339756 - [TargetLowering] Add support for non-uniform vectors to BuildExactSDIV

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 15 02:35:12 PDT 2018


Author: rksimon
Date: Wed Aug 15 02:35:12 2018
New Revision: 339756

URL: http://llvm.org/viewvc/llvm-project?rev=339756&view=rev
Log:
[TargetLowering] Add support for non-uniform vectors to BuildExactSDIV

This patch refactors the existing BuildExactSDIV implementation to support non-uniform constant vector denominators.

Differential Revision: https://reviews.llvm.org/D50392

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/trunk/test/CodeGen/X86/sdiv-exact.ll

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp?rev=339756&r1=339755&r2=339756&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp Wed Aug 15 02:35:12 2018
@@ -3438,32 +3438,44 @@ static SDValue BuildExactSDIV(const Targ
   SDValue Op0 = N->getOperand(0);
   SDValue Op1 = N->getOperand(1);
   EVT VT = N->getValueType(0);
+  EVT SVT = VT.getScalarType();
   EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
+  EVT ShSVT = ShVT.getScalarType();
 
-  auto BuildSDIVPattern = [](APInt Divisor, unsigned &Shift, APInt &Factor) {
-    bool UseSRA = false;
-    Shift = Divisor.countTrailingZeros();
+  bool UseSRA = false;
+  SmallVector<SDValue, 16> Shifts, Factors;
+
+  auto BuildSDIVPattern = [&](ConstantSDNode *C) {
+    if (C->isNullValue())
+      return false;
+    APInt Divisor = C->getAPIntValue();
+    unsigned Shift = Divisor.countTrailingZeros();
     if (Shift) {
       Divisor.ashrInPlace(Shift);
       UseSRA = true;
     }
     // Calculate the multiplicative inverse, using Newton's method.
     APInt t;
-    Factor = Divisor;
+    APInt Factor = Divisor;
     while ((t = Divisor * Factor) != 1)
       Factor *= APInt(Divisor.getBitWidth(), 2) - t;
-    return UseSRA;
+    Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT));
+    Factors.push_back(DAG.getConstant(Factor, dl, SVT));
+    return true;
   };
 
-  ConstantSDNode *C = isConstOrConstSplat(Op1);
-  if (!C || C->isNullValue())
+  // Collect all magic values from the build vector.
+  if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern))
     return SDValue();
 
-  APInt FactorVal;
-  unsigned ShiftVal;
-  bool UseSRA = BuildSDIVPattern(C->getAPIntValue(), ShiftVal, FactorVal);
-  SDValue Shift = DAG.getConstant(ShiftVal, dl, ShVT);
-  SDValue Factor = DAG.getConstant(FactorVal, dl, VT);
+  SDValue Shift, Factor;
+  if (VT.isVector()) {
+    Shift = DAG.getBuildVector(ShVT, dl, Shifts);
+    Factor = DAG.getBuildVector(VT, dl, Factors);
+  } else {
+    Shift = Shifts[0];
+    Factor = Factors[0];
+  }
 
   SDValue Res = Op0;
 

Modified: llvm/trunk/test/CodeGen/X86/sdiv-exact.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sdiv-exact.ll?rev=339756&r1=339755&r2=339756&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sdiv-exact.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sdiv-exact.ll Wed Aug 15 02:35:12 2018
@@ -80,45 +80,25 @@ define <4 x i32> @test4(<4 x i32> %x) {
 define <4 x i32> @test5(<4 x i32> %x) {
 ; X86-LABEL: test5:
 ; X86:       # %bb.0:
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; X86-NEXT:    movd %xmm1, %eax
-; X86-NEXT:    imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
-; X86-NEXT:    movd %eax, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; X86-NEXT:    movd %xmm2, %eax
-; X86-NEXT:    imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
-; X86-NEXT:    movd %eax, %xmm2
-; X86-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X86-NEXT:    movd %xmm0, %eax
-; X86-NEXT:    sarl $3, %eax
-; X86-NEXT:    imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
-; X86-NEXT:    movd %eax, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; X86-NEXT:    movd %xmm0, %eax
-; X86-NEXT:    sarl $3, %eax
-; X86-NEXT:    imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
-; X86-NEXT:    movd %eax, %xmm0
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrad $3, %xmm1
+; X86-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X86-NEXT:    movdqa {{.*#+}} xmm2 = [2863311531,2863311531,3264175145,3264175145]
+; X86-NEXT:    movapd %xmm0, %xmm1
+; X86-NEXT:    pmuludq %xmm2, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; X86-NEXT:    pmuludq %xmm0, %xmm2
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
 ; X86-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; X86-NEXT:    movdqa %xmm1, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test5:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpextrd $1, %xmm0, %eax
-; X64-NEXT:    sarl $3, %eax
-; X64-NEXT:    imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
-; X64-NEXT:    vmovd %xmm0, %ecx
-; X64-NEXT:    sarl $3, %ecx
-; X64-NEXT:    imull $-1431655765, %ecx, %ecx # imm = 0xAAAAAAAB
-; X64-NEXT:    vmovd %ecx, %xmm1
-; X64-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
-; X64-NEXT:    vpextrd $2, %xmm0, %eax
-; X64-NEXT:    imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
-; X64-NEXT:    vpinsrd $2, %eax, %xmm1, %xmm1
-; X64-NEXT:    vpextrd $3, %xmm0, %eax
-; X64-NEXT:    imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
-; X64-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm0
+; X64-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %div = sdiv exact <4 x i32> %x, <i32 24, i32 24, i32 25, i32 25>
   ret <4 x i32> %div
@@ -127,49 +107,26 @@ define <4 x i32> @test5(<4 x i32> %x) {
 define <4 x i32> @test6(<4 x i32> %x) {
 ; X86-LABEL: test6:
 ; X86:       # %bb.0:
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; X86-NEXT:    movd %xmm1, %eax
-; X86-NEXT:    sarl %eax
-; X86-NEXT:    imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5
-; X86-NEXT:    movd %eax, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; X86-NEXT:    movd %xmm2, %eax
-; X86-NEXT:    sarl %eax
-; X86-NEXT:    imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5
-; X86-NEXT:    movd %eax, %xmm2
-; X86-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X86-NEXT:    movd %xmm0, %eax
-; X86-NEXT:    sarl $3, %eax
-; X86-NEXT:    imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
-; X86-NEXT:    movd %eax, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; X86-NEXT:    movd %xmm0, %eax
-; X86-NEXT:    sarl $3, %eax
-; X86-NEXT:    imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
-; X86-NEXT:    movd %eax, %xmm0
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrad $3, %xmm1
+; X86-NEXT:    psrad $1, %xmm0
+; X86-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X86-NEXT:    movdqa {{.*#+}} xmm2 = [2863311531,2863311531,3303820997,3303820997]
+; X86-NEXT:    movapd %xmm0, %xmm1
+; X86-NEXT:    pmuludq %xmm2, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; X86-NEXT:    pmuludq %xmm0, %xmm2
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
 ; X86-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; X86-NEXT:    movdqa %xmm1, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test6:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpextrd $1, %xmm0, %eax
-; X64-NEXT:    sarl $3, %eax
-; X64-NEXT:    imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
-; X64-NEXT:    vmovd %xmm0, %ecx
-; X64-NEXT:    sarl $3, %ecx
-; X64-NEXT:    imull $-1431655765, %ecx, %ecx # imm = 0xAAAAAAAB
-; X64-NEXT:    vmovd %ecx, %xmm1
-; X64-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
-; X64-NEXT:    vpextrd $2, %xmm0, %eax
-; X64-NEXT:    sarl %eax
-; X64-NEXT:    imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5
-; X64-NEXT:    vpinsrd $2, %eax, %xmm1, %xmm1
-; X64-NEXT:    vpextrd $3, %xmm0, %eax
-; X64-NEXT:    sarl %eax
-; X64-NEXT:    imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5
-; X64-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm0
+; X64-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %div = sdiv exact <4 x i32> %x, <i32 24, i32 24, i32 26, i32 26>
   ret <4 x i32> %div
@@ -178,41 +135,19 @@ define <4 x i32> @test6(<4 x i32> %x) {
 define <4 x i32> @test7(<4 x i32> %x) {
 ; X86-LABEL: test7:
 ; X86:       # %bb.0:
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; X86-NEXT:    movd %xmm1, %eax
-; X86-NEXT:    imull $1749801491, %eax, %eax # imm = 0x684BDA13
-; X86-NEXT:    movd %eax, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; X86-NEXT:    movd %xmm2, %eax
-; X86-NEXT:    imull $1749801491, %eax, %eax # imm = 0x684BDA13
-; X86-NEXT:    movd %eax, %xmm2
-; X86-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X86-NEXT:    movd %xmm0, %eax
-; X86-NEXT:    imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
-; X86-NEXT:    movd %eax, %xmm1
-; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; X86-NEXT:    movd %xmm0, %eax
-; X86-NEXT:    imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
-; X86-NEXT:    movd %eax, %xmm0
-; X86-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X86-NEXT:    movdqa %xmm1, %xmm0
+; X86-NEXT:    movdqa {{.*#+}} xmm1 = [3264175145,3264175145,1749801491,1749801491]
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq %xmm1, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X86-NEXT:    pmuludq %xmm2, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test7:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpextrd $1, %xmm0, %eax
-; X64-NEXT:    imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
-; X64-NEXT:    vmovd %xmm0, %ecx
-; X64-NEXT:    imull $-1030792151, %ecx, %ecx # imm = 0xC28F5C29
-; X64-NEXT:    vmovd %ecx, %xmm1
-; X64-NEXT:    vpinsrd $1, %eax, %xmm1, %xmm1
-; X64-NEXT:    vpextrd $2, %xmm0, %eax
-; X64-NEXT:    imull $1749801491, %eax, %eax # imm = 0x684BDA13
-; X64-NEXT:    vpinsrd $2, %eax, %xmm1, %xmm1
-; X64-NEXT:    vpextrd $3, %xmm0, %eax
-; X64-NEXT:    imull $1749801491, %eax, %eax # imm = 0x684BDA13
-; X64-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm0
+; X64-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %div = sdiv exact <4 x i32> %x, <i32 25, i32 25, i32 27, i32 27>
   ret <4 x i32> %div
@@ -221,33 +156,24 @@ define <4 x i32> @test7(<4 x i32> %x) {
 define <4 x i32> @test8(<4 x i32> %x) {
 ; X86-LABEL: test8:
 ; X86:       # %bb.0:
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X86-NEXT:    movd %xmm1, %eax
-; X86-NEXT:    sarl $3, %eax
-; X86-NEXT:    imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
-; X86-NEXT:    movd %eax, %xmm1
-; X86-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
-; X86-NEXT:    movaps %xmm0, %xmm2
-; X86-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,1],xmm1[0,2]
-; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; X86-NEXT:    movd %xmm1, %eax
-; X86-NEXT:    sarl $3, %eax
-; X86-NEXT:    imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
-; X86-NEXT:    movd %eax, %xmm1
-; X86-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,0],xmm2[2,0]
-; X86-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrad $3, %xmm1
+; X86-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; X86-NEXT:    movdqa {{.*#+}} xmm2 = [1,1,2863311531,2863311531]
+; X86-NEXT:    movapd %xmm1, %xmm0
+; X86-NEXT:    pmuludq %xmm2, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; X86-NEXT:    pmuludq %xmm1, %xmm2
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test8:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpextrd $2, %xmm0, %eax
-; X64-NEXT:    sarl $3, %eax
-; X64-NEXT:    imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
-; X64-NEXT:    vpinsrd $2, %eax, %xmm0, %xmm1
-; X64-NEXT:    vpextrd $3, %xmm0, %eax
-; X64-NEXT:    sarl $3, %eax
-; X64-NEXT:    imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
-; X64-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm0
+; X64-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
 ; X64-NEXT:    retq
   %div = sdiv exact <4 x i32> %x, <i32 1, i32 1, i32 24, i32 24>
   ret <4 x i32> %div




More information about the llvm-commits mailing list