[llvm] a51f343 - [CodeGen] Emit more efficient magic numbers for exact udivs (#87161)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 17 12:19:06 PDT 2024


Author: AtariDreams
Date: 2024-07-17T12:19:02-07:00
New Revision: a51f343b433120e45f186e5507e8a522d4d7192f

URL: https://github.com/llvm/llvm-project/commit/a51f343b433120e45f186e5507e8a522d4d7192f
DIFF: https://github.com/llvm/llvm-project/commit/a51f343b433120e45f186e5507e8a522d4d7192f.diff

LOG: [CodeGen] Emit more efficient magic numbers for exact udivs (#87161)

Have simpler lowering for exact udivs in both SelectionDAG and
GlobalISel.

The algorithm is the same between unsigned exact divs and signed divs
save for arithmetic vs logical shift for even divisors, according to
Hacker's Delight, 2nd Edition, page 242.

Added: 
    llvm/test/CodeGen/X86/udiv-exact.ll

Modified: 
    llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
    llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index c27b882f17003..dfc3d73e322b8 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -5183,8 +5183,35 @@ MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) {
       KB ? KB->getKnownBits(LHS).countMinLeadingZeros() : 0;
   auto &MIB = Builder;
 
+  bool UseSRL = false;
   bool UseNPQ = false;
   SmallVector<Register, 16> PreShifts, PostShifts, MagicFactors, NPQFactors;
+  SmallVector<Register, 16> Shifts, Factors;
+  auto *RHSDefInstr = cast<GenericMachineInstr>(getDefIgnoringCopies(RHS, MRI));
+  bool IsSplat = getIConstantSplatVal(*RHSDefInstr, MRI).has_value();
+
+  auto BuildExactUDIVPattern = [&](const Constant *C) {
+    // Don't recompute inverses for each splat element.
+    if (IsSplat && !Factors.empty()) {
+      Shifts.push_back(Shifts[0]);
+      Factors.push_back(Factors[0]);
+      return true;
+    }
+
+    auto *CI = cast<ConstantInt>(C);
+    APInt Divisor = CI->getValue();
+    unsigned Shift = Divisor.countr_zero();
+    if (Shift) {
+      Divisor.lshrInPlace(Shift);
+      UseSRL = true;
+    }
+
+    // Calculate the multiplicative inverse modulo BW.
+    APInt Factor = Divisor.multiplicativeInverse();
+    Shifts.push_back(MIB.buildConstant(ScalarShiftAmtTy, Shift).getReg(0));
+    Factors.push_back(MIB.buildConstant(ScalarTy, Factor).getReg(0));
+    return true;
+  };
 
   auto BuildUDIVPattern = [&](const Constant *C) {
     auto *CI = cast<ConstantInt>(C);
@@ -5231,6 +5258,29 @@ MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) {
     return true;
   };
 
+  if (MI.getFlag(MachineInstr::MIFlag::IsExact)) {
+    // Collect all magic values from the build vector.
+    bool Matched = matchUnaryPredicate(MRI, RHS, BuildExactUDIVPattern);
+    (void)Matched;
+    assert(Matched && "Expected unary predicate match to succeed");
+
+    Register Shift, Factor;
+    if (Ty.isVector()) {
+      Shift = MIB.buildBuildVector(ShiftAmtTy, Shifts).getReg(0);
+      Factor = MIB.buildBuildVector(Ty, Factors).getReg(0);
+    } else {
+      Shift = Shifts[0];
+      Factor = Factors[0];
+    }
+
+    Register Res = LHS;
+
+    if (UseSRL)
+      Res = MIB.buildLShr(Ty, Res, Shift, MachineInstr::IsExact).getReg(0);
+
+    return MIB.buildMul(Ty, Res, Factor);
+  }
+
   // Collect the shifts/magic values from each element.
   bool Matched = matchUnaryPredicate(MRI, RHS, BuildUDIVPattern);
   (void)Matched;
@@ -5283,9 +5333,6 @@ bool CombinerHelper::matchUDivByConst(MachineInstr &MI) {
   Register Dst = MI.getOperand(0).getReg();
   Register RHS = MI.getOperand(2).getReg();
   LLT DstTy = MRI.getType(Dst);
-  auto *RHSDef = MRI.getVRegDef(RHS);
-  if (!isConstantOrConstantVector(*RHSDef, MRI))
-    return false;
 
   auto &MF = *MI.getMF();
   AttributeList Attr = MF.getFunction().getAttributes();
@@ -5300,6 +5347,15 @@ bool CombinerHelper::matchUDivByConst(MachineInstr &MI) {
   if (MF.getFunction().hasMinSize())
     return false;
 
+  if (MI.getFlag(MachineInstr::MIFlag::IsExact)) {
+    return matchUnaryPredicate(
+        MRI, RHS, [](const Constant *C) { return C && !C->isNullValue(); });
+  }
+
+  auto *RHSDef = MRI.getVRegDef(RHS);
+  if (!isConstantOrConstantVector(*RHSDef, MRI))
+    return false;
+
   // Don't do this if the types are not going to be legal.
   if (LI) {
     if (!isLegalOrBeforeLegalizer({TargetOpcode::G_MUL, {DstTy, DstTy}}))

diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index adf14bd007356..c3a20b5044c5f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -6092,6 +6092,7 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
 
 /// Given an exact SDIV by a constant, create a multiplication
 /// with the multiplicative inverse of the constant.
+/// Ref: "Hacker's Delight" by Henry Warren, 2nd Edition, p. 242
 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N,
                               const SDLoc &dl, SelectionDAG &DAG,
                               SmallVectorImpl<SDNode *> &Created) {
@@ -6141,10 +6142,7 @@ static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N,
   }
 
   SDValue Res = Op0;
-
-  // Shift the value upfront if it is even, so the LSB is one.
   if (UseSRA) {
-    // TODO: For UDIV use SRL instead of SRA.
     SDNodeFlags Flags;
     Flags.setExact(true);
     Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags);
@@ -6154,6 +6152,69 @@ static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N,
   return DAG.getNode(ISD::MUL, dl, VT, Res, Factor);
 }
 
+/// Given an exact UDIV by a constant, create a multiplication
+/// with the multiplicative inverse of the constant.
+/// Ref: "Hacker's Delight" by Henry Warren, 2nd Edition, p. 242
+static SDValue BuildExactUDIV(const TargetLowering &TLI, SDNode *N,
+                              const SDLoc &dl, SelectionDAG &DAG,
+                              SmallVectorImpl<SDNode *> &Created) {
+  EVT VT = N->getValueType(0);
+  EVT SVT = VT.getScalarType();
+  EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
+  EVT ShSVT = ShVT.getScalarType();
+
+  bool UseSRL = false;
+  SmallVector<SDValue, 16> Shifts, Factors;
+
+  auto BuildUDIVPattern = [&](ConstantSDNode *C) {
+    if (C->isZero())
+      return false;
+    APInt Divisor = C->getAPIntValue();
+    unsigned Shift = Divisor.countr_zero();
+    if (Shift) {
+      Divisor.lshrInPlace(Shift);
+      UseSRL = true;
+    }
+    // Calculate the multiplicative inverse modulo BW.
+    APInt Factor = Divisor.multiplicativeInverse();
+    Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT));
+    Factors.push_back(DAG.getConstant(Factor, dl, SVT));
+    return true;
+  };
+
+  SDValue Op1 = N->getOperand(1);
+
+  // Collect all magic values from the build vector.
+  if (!ISD::matchUnaryPredicate(Op1, BuildUDIVPattern))
+    return SDValue();
+
+  SDValue Shift, Factor;
+  if (Op1.getOpcode() == ISD::BUILD_VECTOR) {
+    Shift = DAG.getBuildVector(ShVT, dl, Shifts);
+    Factor = DAG.getBuildVector(VT, dl, Factors);
+  } else if (Op1.getOpcode() == ISD::SPLAT_VECTOR) {
+    assert(Shifts.size() == 1 && Factors.size() == 1 &&
+           "Expected matchUnaryPredicate to return one element for scalable "
+           "vectors");
+    Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]);
+    Factor = DAG.getSplatVector(VT, dl, Factors[0]);
+  } else {
+    assert(isa<ConstantSDNode>(Op1) && "Expected a constant");
+    Shift = Shifts[0];
+    Factor = Factors[0];
+  }
+
+  SDValue Res = N->getOperand(0);
+  if (UseSRL) {
+    SDNodeFlags Flags;
+    Flags.setExact(true);
+    Res = DAG.getNode(ISD::SRL, dl, VT, Res, Shift, Flags);
+    Created.push_back(Res.getNode());
+  }
+
+  return DAG.getNode(ISD::MUL, dl, VT, Res, Factor);
+}
+
 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
                               SelectionDAG &DAG,
                               SmallVectorImpl<SDNode *> &Created) const {
@@ -6413,6 +6474,10 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
       return SDValue();
   }
 
+  // If the udiv has an 'exact' bit we can use a simpler lowering.
+  if (N->getFlags().hasExact())
+    return BuildExactUDIV(*this, N, dl, DAG, Created);
+
   SDValue N0 = N->getOperand(0);
   SDValue N1 = N->getOperand(1);
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
index c97a00ccdd455..d465e0237201b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.ll
@@ -269,3 +269,45 @@ define i32 @udiv_div_by_180(i32 %x)
   %udiv = udiv i32 %truncate, 180
   ret i32 %udiv
 }
+
+define i32 @udiv_div_by_180_exact(i32 %x)
+; SDAG-LABEL: udiv_div_by_180_exact:
+; SDAG:       // %bb.0:
+; SDAG-NEXT:    lsr w8, w0, #2
+; SDAG-NEXT:    mov w9, #20389 // =0x4fa5
+; SDAG-NEXT:    movk w9, #42234, lsl #16
+; SDAG-NEXT:    mul w0, w8, w9
+; SDAG-NEXT:    ret
+;
+; GISEL-LABEL: udiv_div_by_180_exact:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    lsr w8, w0, #2
+; GISEL-NEXT:    mov w9, #20389 // =0x4fa5
+; GISEL-NEXT:    movk w9, #42234, lsl #16
+; GISEL-NEXT:    mul w0, w8, w9
+; GISEL-NEXT:    ret
+{
+  %udiv = udiv exact i32 %x, 180
+  ret i32 %udiv
+}
+
+define <4 x i32> @udiv_div_by_104_exact(<4 x i32> %x)
+; SDAG-LABEL: udiv_div_by_104_exact:
+; SDAG:       // %bb.0:
+; SDAG-NEXT:    adrp x8, .LCPI8_0
+; SDAG-NEXT:    ushr v0.4s, v0.4s, #3
+; SDAG-NEXT:    ldr q1, [x8, :lo12:.LCPI8_0]
+; SDAG-NEXT:    mul v0.4s, v0.4s, v1.4s
+; SDAG-NEXT:    ret
+;
+; GISEL-LABEL: udiv_div_by_104_exact:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    adrp x8, .LCPI8_0
+; GISEL-NEXT:    ushr v0.4s, v0.4s, #3
+; GISEL-NEXT:    ldr q1, [x8, :lo12:.LCPI8_0]
+; GISEL-NEXT:    mul v0.4s, v0.4s, v1.4s
+; GISEL-NEXT:    ret
+{
+  %udiv = udiv exact <4 x i32> %x, <i32 104, i32 72, i32 104, i32 72>
+  ret <4 x i32> %udiv
+}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir
index 02233b9f498bd..f8578a694e2d4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-udiv.mir
@@ -304,5 +304,127 @@ body:             |
     %10:_(<8 x s16>) = G_UDIV %0, %1
     $q0 = COPY %10(<8 x s16>)
     RET_ReallyLR implicit $q0
+...
+---
+name:            udiv_exact
+body:             |
+  bb.1:
+    liveins: $w0
+
+    ; CHECK-LABEL: name: udiv_exact
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -991146299
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = exact G_LSHR [[COPY]], [[C]](s32)
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+    ; CHECK-NEXT: $w0 = COPY [[MUL]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = G_CONSTANT i32 104
+    %2:_(s32) = exact G_UDIV %0, %1
+    $w0 = COPY %2(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            udiv_noexact
+body:             |
+  bb.1:
+    liveins: $w0
+
+    ; CHECK-LABEL: name: udiv_noexact
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1321528399
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+    ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[COPY]], [[C]]
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UMULH]], [[C1]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[LSHR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = G_CONSTANT i32 104
+    %2:_(s32) = G_UDIV %0, %1
+    $w0 = COPY %2(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            udiv_exact_minsize
+body:             |
+  bb.1:
+    liveins: $w0
+
+    ; CHECK-LABEL: name: udiv_exact_minsize
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -991146299
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = exact G_LSHR [[COPY]], [[C]](s32)
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[LSHR]], [[C1]]
+    ; CHECK-NEXT: $w0 = COPY [[MUL]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = G_CONSTANT i32 104
+    %2:_(s32) = exact G_UDIV %0, %1
+    $w0 = COPY %2(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            div_v4s32
+body:             |
+  bb.1:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: div_v4s32
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -991146299
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 954437177
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32), [[C1]](s32), [[C2]](s32)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<4 x s32>) = exact G_LSHR [[COPY]], [[BUILD_VECTOR]](<4 x s32>)
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<4 x s32>) = G_MUL [[LSHR]], [[BUILD_VECTOR1]]
+    ; CHECK-NEXT: $q0 = COPY [[MUL]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
+    %0:_(<4 x s32>) = COPY $q0
+    %c1:_(s32) = G_CONSTANT i32 104
+    %c2:_(s32) = G_CONSTANT i32 72
+    %1:_(<4 x s32>) = G_BUILD_VECTOR %c1(s32), %c2(s32), %c1(s32), %c2(s32)
+    %3:_(<4 x s32>) = exact G_UDIV %0, %1
+    $q0 = COPY %3(<4 x s32>)
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            div_v4s32_splat
+body:             |
+  bb.1:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: div_v4s32_splat
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -991146299
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<4 x s32>) = exact G_LSHR [[COPY]], [[BUILD_VECTOR]](<4 x s32>)
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<4 x s32>) = G_MUL [[LSHR]], [[BUILD_VECTOR1]]
+    ; CHECK-NEXT: $q0 = COPY [[MUL]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
+    %0:_(<4 x s32>) = COPY $q0
+    %c1:_(s32) = G_CONSTANT i32 104
+    %1:_(<4 x s32>) = G_BUILD_VECTOR %c1(s32), %c1(s32), %c1(s32), %c1(s32)
+    %3:_(<4 x s32>) = exact G_UDIV %0, %1
+    $q0 = COPY %3(<4 x s32>)
+    RET_ReallyLR implicit $q0
 
 ...

diff  --git a/llvm/test/CodeGen/X86/udiv-exact.ll b/llvm/test/CodeGen/X86/udiv-exact.ll
new file mode 100644
index 0000000000000..271d11edff9a7
--- /dev/null
+++ b/llvm/test/CodeGen/X86/udiv-exact.ll
@@ -0,0 +1,171 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
+
+define i32 @test1(i32 %x) {
+; X86-LABEL: test1:
+; X86:       # %bb.0:
+; X86-NEXT:    imull $-1030792151, {{[0-9]+}}(%esp), %eax # imm = 0xC28F5C29
+; X86-NEXT:    retl
+;
+; X64-LABEL: test1:
+; X64:       # %bb.0:
+; X64-NEXT:    imull $-1030792151, %edi, %eax # imm = 0xC28F5C29
+; X64-NEXT:    retq
+  %div = udiv exact i32 %x, 25
+  ret i32 %div
+}
+
+define i32 @test2(i32 %x) {
+; X86-LABEL: test2:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    shrl $3, %eax
+; X86-NEXT:    imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
+; X86-NEXT:    retl
+;
+; X64-LABEL: test2:
+; X64:       # %bb.0:
+; X64-NEXT:    shrl $3, %edi
+; X64-NEXT:    imull $-1431655765, %edi, %eax # imm = 0xAAAAAAAB
+; X64-NEXT:    retq
+  %div = udiv exact i32 %x, 24
+  ret i32 %div
+}
+
+define <4 x i32> @test3(<4 x i32> %x) {
+; X86-LABEL: test3:
+; X86:       # %bb.0:
+; X86-NEXT:    psrld $3, %xmm0
+; X86-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq %xmm1, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    pmuludq %xmm1, %xmm2
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    retl
+;
+; X64-LABEL: test3:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsrld $3, %xmm0, %xmm0
+; X64-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
+; X64-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; X64-NEXT:    retq
+  %div = udiv exact <4 x i32> %x, <i32 24, i32 24, i32 24, i32 24>
+  ret <4 x i32> %div
+}
+
+define <4 x i32> @test4(<4 x i32> %x) {
+; X86-LABEL: test4:
+; X86:       # %bb.0:
+; X86-NEXT:    movdqa {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
+; X86-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq %xmm1, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    pmuludq %xmm1, %xmm2
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    retl
+;
+; X64-LABEL: test4:
+; X64:       # %bb.0:
+; X64-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
+; X64-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; X64-NEXT:    retq
+  %div = udiv exact <4 x i32> %x, <i32 25, i32 25, i32 25, i32 25>
+  ret <4 x i32> %div
+}
+
+define <4 x i32> @test5(<4 x i32> %x) {
+; X86-LABEL: test5:
+; X86:       # %bb.0:
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrld $3, %xmm1
+; X86-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3]
+; X86-NEXT:    movdqa {{.*#+}} xmm0 = [2863311531,2863311531,3264175145,3264175145]
+; X86-NEXT:    pmuludq %xmm1, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    retl
+;
+; X64-LABEL: test5:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    retq
+  %div = udiv exact <4 x i32> %x, <i32 24, i32 24, i32 25, i32 25>
+  ret <4 x i32> %div
+}
+
+define <4 x i32> @test6(<4 x i32> %x) {
+; X86-LABEL: test6:
+; X86:       # %bb.0:
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrld $3, %xmm1
+; X86-NEXT:    psrld $1, %xmm0
+; X86-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X86-NEXT:    movdqa {{.*#+}} xmm1 = [2863311531,2863311531,3303820997,3303820997]
+; X86-NEXT:    pmuludq %xmm0, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-NEXT:    movdqa %xmm1, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: test6:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    retq
+  %div = udiv exact <4 x i32> %x, <i32 24, i32 24, i32 26, i32 26>
+  ret <4 x i32> %div
+}
+
+define <4 x i32> @test7(<4 x i32> %x) {
+; X86-LABEL: test7:
+; X86:       # %bb.0:
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    retl
+;
+; X64-LABEL: test7:
+; X64:       # %bb.0:
+; X64-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    retq
+  %div = udiv exact <4 x i32> %x, <i32 25, i32 25, i32 27, i32 27>
+  ret <4 x i32> %div
+}
+
+define <4 x i32> @test8(<4 x i32> %x) {
+; X86-LABEL: test8:
+; X86:       # %bb.0:
+; X86-NEXT:    movdqa %xmm0, %xmm1
+; X86-NEXT:    psrld $3, %xmm1
+; X86-NEXT:    movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; X86-NEXT:    movdqa {{.*#+}} xmm0 = [1,1,2863311531,2863311531]
+; X86-NEXT:    pmuludq %xmm1, %xmm0
+; X86-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X86-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT:    retl
+;
+; X64-LABEL: test8:
+; X64:       # %bb.0:
+; X64-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    retq
+  %div = udiv exact <4 x i32> %x, <i32 1, i32 1, i32 24, i32 24>
+  ret <4 x i32> %div
+}


        


More information about the llvm-commits mailing list