[llvm] r368702 - [CodeGen][SelectionDAG] More efficient code for X % C == 0 (SREM case)

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 13 07:57:37 PDT 2019


Author: lebedevri
Date: Tue Aug 13 07:57:37 2019
New Revision: 368702

URL: http://llvm.org/viewvc/llvm-project?rev=368702&view=rev
Log:
[CodeGen][SelectionDAG] More efficient code for X % C == 0 (SREM case)

Summary:
This implements an optimization described in Hacker's Delight 10-17:
when `C` is constant, the result of `X % C == 0` can be computed
more cheaply without actually calculating the remainder.
The motivation is discussed here: https://bugs.llvm.org/show_bug.cgi?id=35479.

One huge caveat: this signed case is only valid for positive divisors.

While we can freely negate negative divisors, we can't negate `INT_MIN`,
so for now if `INT_MIN` is encountered, we bailout.
As a follow-up, it should be possible to handle that more gracefully
via extra `and`+`setcc`+`select`.

This passes llvm's test-suite, and from cursory(!) cross-examination
the folds (the assembly) match those of GCC, and manual checking via alive
did not reveal any issues (other than the `INT_MIN` case)

Reviewers: RKSimon, spatel, hermord, craig.topper, xbolva00

Reviewed By: RKSimon, xbolva00

Subscribers: xbolva00, thakis, javed.absar, hiraditya, dexonsmith, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D65366

Modified:
    llvm/trunk/include/llvm/CodeGen/TargetLowering.h
    llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/trunk/test/CodeGen/AArch64/srem-seteq-optsize.ll
    llvm/trunk/test/CodeGen/AArch64/srem-seteq-vec-nonsplat.ll
    llvm/trunk/test/CodeGen/AArch64/srem-seteq-vec-splat.ll
    llvm/trunk/test/CodeGen/AArch64/srem-seteq.ll
    llvm/trunk/test/CodeGen/X86/srem-seteq-optsize.ll
    llvm/trunk/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll
    llvm/trunk/test/CodeGen/X86/srem-seteq-vec-splat.ll
    llvm/trunk/test/CodeGen/X86/srem-seteq.ll
    llvm/trunk/test/CodeGen/X86/vselect-avx.ll

Modified: llvm/trunk/include/llvm/CodeGen/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/TargetLowering.h?rev=368702&r1=368701&r2=368702&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/TargetLowering.h (original)
+++ llvm/trunk/include/llvm/CodeGen/TargetLowering.h Tue Aug 13 07:57:37 2019
@@ -4164,6 +4164,14 @@ private:
   SDValue buildUREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
                           ISD::CondCode Cond, DAGCombinerInfo &DCI,
                           const SDLoc &DL) const;
+
+  SDValue prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
+                            SDValue CompTargetNode, ISD::CondCode Cond,
+                            DAGCombinerInfo &DCI, const SDLoc &DL,
+                            SmallVectorImpl<SDNode *> &Created) const;
+  SDValue buildSREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
+                          ISD::CondCode Cond, DAGCombinerInfo &DCI,
+                          const SDLoc &DL) const;
 };
 
 /// Given an LLVM IR type and return type attributes, compute the return value

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp?rev=368702&r1=368701&r2=368702&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp Tue Aug 13 07:57:37 2019
@@ -3802,15 +3802,21 @@ SDValue TargetLowering::SimplifySetCC(EV
   }
 
   // Fold remainder of division by a constant.
-  if (N0.getOpcode() == ISD::UREM && N0.hasOneUse() &&
-      (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
+  if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) &&
+      N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
     AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
 
     // When division is cheap or optimizing for minimum size,
     // fall through to DIVREM creation by skipping this fold.
-    if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttribute(Attribute::MinSize))
-      if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl))
-        return Folded;
+    if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttribute(Attribute::MinSize)) {
+      if (N0.getOpcode() == ISD::UREM) {
+        if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl))
+          return Folded;
+      } else if (N0.getOpcode() == ISD::SREM) {
+        if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl))
+          return Folded;
+      }
+    }
   }
 
   // Fold away ALL boolean setcc's.
@@ -5003,6 +5009,216 @@ TargetLowering::prepareUREMEqFold(EVT SE
   return DAG.getSetCC(DL, SETCCVT, Op0, QVal,
                       ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT));
 }
+
+/// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE
+/// where the divisor is constant and the comparison target is zero,
+/// return a DAG expression that will generate the same comparison result
+/// using only multiplications, additions and shifts/rotations.
+/// Ref: "Hacker's Delight" 10-17.
+SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode,
+                                        SDValue CompTargetNode,
+                                        ISD::CondCode Cond,
+                                        DAGCombinerInfo &DCI,
+                                        const SDLoc &DL) const {
+  SmallVector<SDNode *, 3> Built;
+  if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond,
+                                         DCI, DL, Built)) {
+    for (SDNode *N : Built)
+      DCI.AddToWorklist(N);
+    return Folded;
+  }
+
+  return SDValue();
+}
+
+SDValue
+TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
+                                  SDValue CompTargetNode, ISD::CondCode Cond,
+                                  DAGCombinerInfo &DCI, const SDLoc &DL,
+                                  SmallVectorImpl<SDNode *> &Created) const {
+  // Fold:
+  //   (seteq/ne (srem N, D), 0)
+  // To:
+  //   (setule/ugt (rotr (add (mul N, P), A), K), Q)
+  //
+  // - D must be constant, with D = D0 * 2^K where D0 is odd
+  // - P is the multiplicative inverse of D0 modulo 2^W
+  // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k)))
+  // - Q = floor((2 * A) / (2^K))
+  // where W is the width of the common type of N and D.
+  assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
+         "Only applicable for (in)equality comparisons.");
+
+  SelectionDAG &DAG = DCI.DAG;
+
+  EVT VT = REMNode.getValueType();
+  EVT SVT = VT.getScalarType();
+  EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout());
+  EVT ShSVT = ShVT.getScalarType();
+
+  // If MUL is unavailable, we cannot proceed in any case.
+  if (!isOperationLegalOrCustom(ISD::MUL, VT))
+    return SDValue();
+
+  // TODO: Could support comparing with non-zero too.
+  ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode);
+  if (!CompTarget || !CompTarget->isNullValue())
+    return SDValue();
+
+  bool HadOneDivisor = false;
+  bool AllDivisorsAreOnes = true;
+  bool HadEvenDivisor = false;
+  bool NeedToApplyOffset = false;
+  bool AllDivisorsArePowerOfTwo = true;
+  SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts;
+
+  auto BuildSREMPattern = [&](ConstantSDNode *C) {
+    // Division by 0 is UB. Leave it to be constant-folded elsewhere.
+    if (C->isNullValue())
+      return false;
+
+    // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine.
+
+    // WARNING: this fold is only valid for positive divisors!
+    APInt D = C->getAPIntValue();
+    if (D.isMinSignedValue())
+      return false; // We can't negate INT_MIN.
+    if (D.isNegative())
+      D.negate(); //  `rem %X, -C` is equivalent to `rem %X, C`
+
+    assert(!D.isNegative() && "The fold is only valid for positive divisors!");
+
+    // If all divisors are ones, we will prefer to avoid the fold.
+    HadOneDivisor |= D.isOneValue();
+    AllDivisorsAreOnes &= D.isOneValue();
+
+    // Decompose D into D0 * 2^K
+    unsigned K = D.countTrailingZeros();
+    assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate.");
+    APInt D0 = D.lshr(K);
+
+    // D is even if it has trailing zeros.
+    HadEvenDivisor |= (K != 0);
+    // D is a power-of-two if D0 is one.
+    // If all divisors are power-of-two, we will prefer to avoid the fold.
+    AllDivisorsArePowerOfTwo &= D0.isOneValue();
+
+    // P = inv(D0, 2^W)
+    // 2^W requires W + 1 bits, so we have to extend and then truncate.
+    unsigned W = D.getBitWidth();
+    APInt P = D0.zext(W + 1)
+                  .multiplicativeInverse(APInt::getSignedMinValue(W + 1))
+                  .trunc(W);
+    assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable
+    assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check.");
+
+    // A = floor((2^(W - 1) - 1) / D0) & -2^K
+    APInt A = APInt::getSignedMaxValue(W).udiv(D0);
+    A.clearLowBits(K);
+
+    NeedToApplyOffset |= A != 0;
+
+    // Q = floor((2 * A) / (2^K))
+    APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K));
+
+    assert(APInt::getAllOnesValue(SVT.getSizeInBits()).ugt(A) &&
+           "We are expecting that A is always less than all-ones for SVT");
+    assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) &&
+           "We are expecting that K is always less than all-ones for ShSVT");
+
+    // If the divisor is 1 the result can be constant-folded.
+    if (D.isOneValue()) {
+      // Set P, A and K to a bogus values so we can try to splat them.
+      P = 0;
+      A = -1;
+      K = -1;
+
+      // x ?% 1 == 0  <-->  true  <-->  x u<= -1
+      Q = -1;
+    }
+
+    PAmts.push_back(DAG.getConstant(P, DL, SVT));
+    AAmts.push_back(DAG.getConstant(A, DL, SVT));
+    KAmts.push_back(
+        DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT));
+    QAmts.push_back(DAG.getConstant(Q, DL, SVT));
+    return true;
+  };
+
+  SDValue N = REMNode.getOperand(0);
+  SDValue D = REMNode.getOperand(1);
+
+  // Collect the values from each element.
+  if (!ISD::matchUnaryPredicate(D, BuildSREMPattern))
+    return SDValue();
+
+  // If this is a srem by a one, avoid the fold since it can be constant-folded.
+  if (AllDivisorsAreOnes)
+    return SDValue();
+
+  // If this is a srem by a powers-of-two, avoid the fold since it can be
+  // best implemented as a bit test.
+  if (AllDivisorsArePowerOfTwo)
+    return SDValue();
+
+  SDValue PVal, AVal, KVal, QVal;
+  if (VT.isVector()) {
+    if (HadOneDivisor) {
+      // Try to turn PAmts into a splat, since we don't care about the values
+      // that are currently '0'. If we can't, just keep '0'`s.
+      turnVectorIntoSplatVector(PAmts, isNullConstant);
+      // Try to turn AAmts into a splat, since we don't care about the
+      // values that are currently '-1'. If we can't, change them to '0'`s.
+      turnVectorIntoSplatVector(AAmts, isAllOnesConstant,
+                                DAG.getConstant(0, DL, SVT));
+      // Try to turn KAmts into a splat, since we don't care about the values
+      // that are currently '-1'. If we can't, change them to '0'`s.
+      turnVectorIntoSplatVector(KAmts, isAllOnesConstant,
+                                DAG.getConstant(0, DL, ShSVT));
+    }
+
+    PVal = DAG.getBuildVector(VT, DL, PAmts);
+    AVal = DAG.getBuildVector(VT, DL, AAmts);
+    KVal = DAG.getBuildVector(ShVT, DL, KAmts);
+    QVal = DAG.getBuildVector(VT, DL, QAmts);
+  } else {
+    PVal = PAmts[0];
+    AVal = AAmts[0];
+    KVal = KAmts[0];
+    QVal = QAmts[0];
+  }
+
+  // (mul N, P)
+  SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal);
+  Created.push_back(Op0.getNode());
+
+  if (NeedToApplyOffset) {
+    // We need ADD to do this.
+    if (!isOperationLegalOrCustom(ISD::ADD, VT))
+      return SDValue();
+
+    // (add (mul N, P), A)
+    Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal);
+    Created.push_back(Op0.getNode());
+  }
+
+  // Rotate right only if any divisor was even. We avoid rotates for all-odd
+  // divisors as a performance improvement, since rotating by 0 is a no-op.
+  if (HadEvenDivisor) {
+    // We need ROTR to do this.
+    if (!isOperationLegalOrCustom(ISD::ROTR, VT))
+      return SDValue();
+    SDNodeFlags Flags;
+    Flags.setExact(true);
+    // SREM: (rotr (add (mul N, P), A), K)
+    Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags);
+    Created.push_back(Op0.getNode());
+  }
+
+  // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q)
+  return DAG.getSetCC(DL, SETCCVT, Op0, QVal,
+                      ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT));
+}
 
 bool TargetLowering::
 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const {

Modified: llvm/trunk/test/CodeGen/AArch64/srem-seteq-optsize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/srem-seteq-optsize.ll?rev=368702&r1=368701&r2=368702&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/srem-seteq-optsize.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/srem-seteq-optsize.ll Tue Aug 13 07:57:37 2019
@@ -21,17 +21,16 @@ define i32 @test_minsize(i32 %X) optsize
 define i32 @test_optsize(i32 %X) optsize nounwind readnone {
 ; CHECK-LABEL: test_optsize:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #26215
-; CHECK-NEXT:    movk w8, #26214, lsl #16
-; CHECK-NEXT:    smull x8, w0, w8
-; CHECK-NEXT:    lsr x10, x8, #63
-; CHECK-NEXT:    asr x8, x8, #33
-; CHECK-NEXT:    add w8, w8, w10
-; CHECK-NEXT:    add w8, w8, w8, lsl #2
-; CHECK-NEXT:    mov w9, #-10
-; CHECK-NEXT:    cmp w0, w8
+; CHECK-NEXT:    mov w8, #52429
+; CHECK-NEXT:    mov w9, #39321
+; CHECK-NEXT:    movk w8, #52428, lsl #16
+; CHECK-NEXT:    movk w9, #6553, lsl #16
+; CHECK-NEXT:    mov w10, #858993459
+; CHECK-NEXT:    madd w8, w0, w8, w9
+; CHECK-NEXT:    mov w11, #-10
+; CHECK-NEXT:    cmp w8, w10
 ; CHECK-NEXT:    mov w8, #42
-; CHECK-NEXT:    csel w0, w8, w9, eq
+; CHECK-NEXT:    csel w0, w8, w11, lo
 ; CHECK-NEXT:    ret
   %rem = srem i32 %X, 5
   %cmp = icmp eq i32 %rem, 0

Modified: llvm/trunk/test/CodeGen/AArch64/srem-seteq-vec-nonsplat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/srem-seteq-vec-nonsplat.ll?rev=368702&r1=368701&r2=368702&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/srem-seteq-vec-nonsplat.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/srem-seteq-vec-nonsplat.ll Tue Aug 13 07:57:37 2019
@@ -37,27 +37,16 @@ define <4 x i32> @test_srem_odd_even(<4
 define <4 x i32> @test_srem_odd_allones_eq(<4 x i32> %X) nounwind {
 ; CHECK-LABEL: test_srem_odd_allones_eq:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI1_0
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI1_0]
-; CHECK-NEXT:    adrp x8, .LCPI1_1
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI1_1]
-; CHECK-NEXT:    adrp x8, .LCPI1_2
-; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI1_2]
-; CHECK-NEXT:    adrp x8, .LCPI1_3
-; CHECK-NEXT:    smull2 v4.2d, v0.4s, v1.4s
-; CHECK-NEXT:    smull v1.2d, v0.2s, v1.2s
-; CHECK-NEXT:    uzp2 v1.4s, v1.4s, v4.4s
-; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI1_3]
-; CHECK-NEXT:    adrp x8, .LCPI1_4
-; CHECK-NEXT:    mla v1.4s, v0.4s, v2.4s
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI1_4]
-; CHECK-NEXT:    neg v3.4s, v3.4s
-; CHECK-NEXT:    sshl v3.4s, v1.4s, v3.4s
-; CHECK-NEXT:    ushr v1.4s, v1.4s, #31
-; CHECK-NEXT:    and v1.16b, v1.16b, v4.16b
-; CHECK-NEXT:    add v1.4s, v3.4s, v1.4s
-; CHECK-NEXT:    mls v0.4s, v1.4s, v2.4s
-; CHECK-NEXT:    cmeq v0.4s, v0.4s, #0
+; CHECK-NEXT:    adrp x10, .LCPI1_0
+; CHECK-NEXT:    mov w8, #52429
+; CHECK-NEXT:    mov w9, #39321
+; CHECK-NEXT:    ldr q1, [x10, :lo12:.LCPI1_0]
+; CHECK-NEXT:    movk w8, #52428, lsl #16
+; CHECK-NEXT:    movk w9, #6553, lsl #16
+; CHECK-NEXT:    dup v2.4s, w8
+; CHECK-NEXT:    dup v3.4s, w9
+; CHECK-NEXT:    mla v3.4s, v0.4s, v2.4s
+; CHECK-NEXT:    cmhs v0.4s, v1.4s, v3.4s
 ; CHECK-NEXT:    movi v1.4s, #1
 ; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
@@ -69,28 +58,16 @@ define <4 x i32> @test_srem_odd_allones_
 define <4 x i32> @test_srem_odd_allones_ne(<4 x i32> %X) nounwind {
 ; CHECK-LABEL: test_srem_odd_allones_ne:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI2_0
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI2_0]
-; CHECK-NEXT:    adrp x8, .LCPI2_1
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI2_1]
-; CHECK-NEXT:    adrp x8, .LCPI2_2
-; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI2_2]
-; CHECK-NEXT:    adrp x8, .LCPI2_3
-; CHECK-NEXT:    smull2 v4.2d, v0.4s, v1.4s
-; CHECK-NEXT:    smull v1.2d, v0.2s, v1.2s
-; CHECK-NEXT:    uzp2 v1.4s, v1.4s, v4.4s
-; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI2_3]
-; CHECK-NEXT:    adrp x8, .LCPI2_4
-; CHECK-NEXT:    mla v1.4s, v0.4s, v2.4s
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI2_4]
-; CHECK-NEXT:    neg v3.4s, v3.4s
-; CHECK-NEXT:    sshl v3.4s, v1.4s, v3.4s
-; CHECK-NEXT:    ushr v1.4s, v1.4s, #31
-; CHECK-NEXT:    and v1.16b, v1.16b, v4.16b
-; CHECK-NEXT:    add v1.4s, v3.4s, v1.4s
-; CHECK-NEXT:    mls v0.4s, v1.4s, v2.4s
-; CHECK-NEXT:    cmeq v0.4s, v0.4s, #0
-; CHECK-NEXT:    mvn v0.16b, v0.16b
+; CHECK-NEXT:    adrp x10, .LCPI2_0
+; CHECK-NEXT:    mov w8, #52429
+; CHECK-NEXT:    mov w9, #39321
+; CHECK-NEXT:    ldr q1, [x10, :lo12:.LCPI2_0]
+; CHECK-NEXT:    movk w8, #52428, lsl #16
+; CHECK-NEXT:    movk w9, #6553, lsl #16
+; CHECK-NEXT:    dup v2.4s, w8
+; CHECK-NEXT:    dup v3.4s, w9
+; CHECK-NEXT:    mla v3.4s, v0.4s, v2.4s
+; CHECK-NEXT:    cmhi v0.4s, v3.4s, v1.4s
 ; CHECK-NEXT:    movi v1.4s, #1
 ; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
@@ -327,27 +304,16 @@ define <4 x i32> @test_srem_odd_even_pow
 define <4 x i32> @test_srem_odd_one(<4 x i32> %X) nounwind {
 ; CHECK-LABEL: test_srem_odd_one:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI10_0
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI10_0]
-; CHECK-NEXT:    adrp x8, .LCPI10_1
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI10_1]
-; CHECK-NEXT:    adrp x8, .LCPI10_2
-; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI10_2]
-; CHECK-NEXT:    adrp x8, .LCPI10_3
-; CHECK-NEXT:    smull2 v4.2d, v0.4s, v1.4s
-; CHECK-NEXT:    smull v1.2d, v0.2s, v1.2s
-; CHECK-NEXT:    uzp2 v1.4s, v1.4s, v4.4s
-; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI10_3]
-; CHECK-NEXT:    adrp x8, .LCPI10_4
-; CHECK-NEXT:    mla v1.4s, v0.4s, v2.4s
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI10_4]
-; CHECK-NEXT:    neg v3.4s, v3.4s
-; CHECK-NEXT:    sshl v3.4s, v1.4s, v3.4s
-; CHECK-NEXT:    ushr v1.4s, v1.4s, #31
-; CHECK-NEXT:    and v1.16b, v1.16b, v4.16b
-; CHECK-NEXT:    add v1.4s, v3.4s, v1.4s
-; CHECK-NEXT:    mls v0.4s, v1.4s, v2.4s
-; CHECK-NEXT:    cmeq v0.4s, v0.4s, #0
+; CHECK-NEXT:    adrp x10, .LCPI10_0
+; CHECK-NEXT:    mov w8, #52429
+; CHECK-NEXT:    mov w9, #39321
+; CHECK-NEXT:    ldr q1, [x10, :lo12:.LCPI10_0]
+; CHECK-NEXT:    movk w8, #52428, lsl #16
+; CHECK-NEXT:    movk w9, #6553, lsl #16
+; CHECK-NEXT:    dup v2.4s, w8
+; CHECK-NEXT:    dup v3.4s, w9
+; CHECK-NEXT:    mla v3.4s, v0.4s, v2.4s
+; CHECK-NEXT:    cmhs v0.4s, v1.4s, v3.4s
 ; CHECK-NEXT:    movi v1.4s, #1
 ; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
@@ -625,27 +591,16 @@ define <4 x i32> @test_srem_odd_even_all
 define <4 x i32> @test_srem_odd_allones_and_one(<4 x i32> %X) nounwind {
 ; CHECK-LABEL: test_srem_odd_allones_and_one:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI19_0
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI19_0]
-; CHECK-NEXT:    adrp x8, .LCPI19_1
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI19_1]
-; CHECK-NEXT:    adrp x8, .LCPI19_2
-; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI19_2]
-; CHECK-NEXT:    adrp x8, .LCPI19_3
-; CHECK-NEXT:    smull2 v4.2d, v0.4s, v1.4s
-; CHECK-NEXT:    smull v1.2d, v0.2s, v1.2s
-; CHECK-NEXT:    uzp2 v1.4s, v1.4s, v4.4s
-; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI19_3]
-; CHECK-NEXT:    adrp x8, .LCPI19_4
-; CHECK-NEXT:    mla v1.4s, v0.4s, v2.4s
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI19_4]
-; CHECK-NEXT:    neg v3.4s, v3.4s
-; CHECK-NEXT:    sshl v3.4s, v1.4s, v3.4s
-; CHECK-NEXT:    ushr v1.4s, v1.4s, #31
-; CHECK-NEXT:    and v1.16b, v1.16b, v4.16b
-; CHECK-NEXT:    add v1.4s, v3.4s, v1.4s
-; CHECK-NEXT:    mls v0.4s, v1.4s, v2.4s
-; CHECK-NEXT:    cmeq v0.4s, v0.4s, #0
+; CHECK-NEXT:    adrp x10, .LCPI19_0
+; CHECK-NEXT:    mov w8, #52429
+; CHECK-NEXT:    mov w9, #39321
+; CHECK-NEXT:    ldr q1, [x10, :lo12:.LCPI19_0]
+; CHECK-NEXT:    movk w8, #52428, lsl #16
+; CHECK-NEXT:    movk w9, #6553, lsl #16
+; CHECK-NEXT:    dup v2.4s, w8
+; CHECK-NEXT:    dup v3.4s, w9
+; CHECK-NEXT:    mla v3.4s, v0.4s, v2.4s
+; CHECK-NEXT:    cmhs v0.4s, v1.4s, v3.4s
 ; CHECK-NEXT:    movi v1.4s, #1
 ; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret

Modified: llvm/trunk/test/CodeGen/AArch64/srem-seteq-vec-splat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/srem-seteq-vec-splat.ll?rev=368702&r1=368701&r2=368702&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/srem-seteq-vec-splat.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/srem-seteq-vec-splat.ll Tue Aug 13 07:57:37 2019
@@ -5,17 +5,17 @@
 define <4 x i32> @test_srem_odd_25(<4 x i32> %X) nounwind {
 ; CHECK-LABEL: test_srem_odd_25:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #34079
-; CHECK-NEXT:    movk w8, #20971, lsl #16
-; CHECK-NEXT:    dup v2.4s, w8
-; CHECK-NEXT:    smull2 v3.2d, v0.4s, v2.4s
-; CHECK-NEXT:    smull v2.2d, v0.2s, v2.2s
-; CHECK-NEXT:    uzp2 v2.4s, v2.4s, v3.4s
-; CHECK-NEXT:    sshr v3.4s, v2.4s, #3
-; CHECK-NEXT:    movi v1.4s, #25
-; CHECK-NEXT:    usra v3.4s, v2.4s, #31
-; CHECK-NEXT:    mls v0.4s, v3.4s, v1.4s
-; CHECK-NEXT:    cmeq v0.4s, v0.4s, #0
+; CHECK-NEXT:    mov w8, #23593
+; CHECK-NEXT:    mov w9, #47185
+; CHECK-NEXT:    movk w8, #49807, lsl #16
+; CHECK-NEXT:    movk w9, #1310, lsl #16
+; CHECK-NEXT:    mov w10, #28834
+; CHECK-NEXT:    movk w10, #2621, lsl #16
+; CHECK-NEXT:    dup v1.4s, w8
+; CHECK-NEXT:    dup v2.4s, w9
+; CHECK-NEXT:    dup v3.4s, w10
+; CHECK-NEXT:    mla v2.4s, v0.4s, v1.4s
+; CHECK-NEXT:    cmhs v0.4s, v3.4s, v2.4s
 ; CHECK-NEXT:    movi v1.4s, #1
 ; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
@@ -55,17 +55,17 @@ define <4 x i32> @test_srem_even_100(<4
 define <4 x i32> @test_srem_odd_neg25(<4 x i32> %X) nounwind {
 ; CHECK-LABEL: test_srem_odd_neg25:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI2_0
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI2_0]
-; CHECK-NEXT:    adrp x8, .LCPI2_1
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI2_1]
-; CHECK-NEXT:    smull2 v3.2d, v0.4s, v1.4s
-; CHECK-NEXT:    smull v1.2d, v0.2s, v1.2s
-; CHECK-NEXT:    uzp2 v1.4s, v1.4s, v3.4s
-; CHECK-NEXT:    sshr v3.4s, v1.4s, #3
-; CHECK-NEXT:    usra v3.4s, v1.4s, #31
-; CHECK-NEXT:    mls v0.4s, v3.4s, v2.4s
-; CHECK-NEXT:    cmeq v0.4s, v0.4s, #0
+; CHECK-NEXT:    mov w8, #23593
+; CHECK-NEXT:    mov w9, #47185
+; CHECK-NEXT:    movk w8, #49807, lsl #16
+; CHECK-NEXT:    movk w9, #1310, lsl #16
+; CHECK-NEXT:    mov w10, #28834
+; CHECK-NEXT:    movk w10, #2621, lsl #16
+; CHECK-NEXT:    dup v1.4s, w8
+; CHECK-NEXT:    dup v2.4s, w9
+; CHECK-NEXT:    dup v3.4s, w10
+; CHECK-NEXT:    mla v2.4s, v0.4s, v1.4s
+; CHECK-NEXT:    cmhs v0.4s, v3.4s, v2.4s
 ; CHECK-NEXT:    movi v1.4s, #1
 ; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret

Modified: llvm/trunk/test/CodeGen/AArch64/srem-seteq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/srem-seteq.ll?rev=368702&r1=368701&r2=368702&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/srem-seteq.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/srem-seteq.ll Tue Aug 13 07:57:37 2019
@@ -8,15 +8,14 @@
 define i32 @test_srem_odd(i32 %X) nounwind {
 ; CHECK-LABEL: test_srem_odd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #26215
-; CHECK-NEXT:    movk w8, #26214, lsl #16
-; CHECK-NEXT:    smull x8, w0, w8
-; CHECK-NEXT:    lsr x9, x8, #63
-; CHECK-NEXT:    asr x8, x8, #33
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    add w8, w8, w8, lsl #2
-; CHECK-NEXT:    cmp w0, w8
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w8, #52429
+; CHECK-NEXT:    mov w9, #39321
+; CHECK-NEXT:    movk w8, #52428, lsl #16
+; CHECK-NEXT:    movk w9, #6553, lsl #16
+; CHECK-NEXT:    madd w8, w0, w8, w9
+; CHECK-NEXT:    mov w9, #858993459
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, 5
   %cmp = icmp eq i32 %srem, 0
@@ -27,16 +26,15 @@ define i32 @test_srem_odd(i32 %X) nounwi
 define i32 @test_srem_odd_25(i32 %X) nounwind {
 ; CHECK-LABEL: test_srem_odd_25:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #34079
-; CHECK-NEXT:    movk w8, #20971, lsl #16
-; CHECK-NEXT:    smull x8, w0, w8
-; CHECK-NEXT:    lsr x9, x8, #63
-; CHECK-NEXT:    asr x8, x8, #35
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    mov w9, #25
-; CHECK-NEXT:    msub w8, w8, w9, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w8, #23593
+; CHECK-NEXT:    mov w9, #47185
+; CHECK-NEXT:    movk w8, #49807, lsl #16
+; CHECK-NEXT:    movk w9, #1310, lsl #16
+; CHECK-NEXT:    madd w8, w0, w8, w9
+; CHECK-NEXT:    mov w9, #28835
+; CHECK-NEXT:    movk w9, #2621, lsl #16
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, 25
   %cmp = icmp eq i32 %srem, 0
@@ -48,18 +46,12 @@ define i32 @test_srem_odd_25(i32 %X) nou
 define i32 @test_srem_odd_bit30(i32 %X) nounwind {
 ; CHECK-LABEL: test_srem_odd_bit30:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
-; CHECK-NEXT:    sxtw x8, w0
-; CHECK-NEXT:    sbfiz x9, x0, #29, #32
-; CHECK-NEXT:    sub x8, x9, x8
-; CHECK-NEXT:    lsr x9, x8, #63
-; CHECK-NEXT:    asr x8, x8, #59
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    mov w9, #3
-; CHECK-NEXT:    movk w9, #16384, lsl #16
-; CHECK-NEXT:    msub w8, w8, w9, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w8, #43691
+; CHECK-NEXT:    movk w8, #27306, lsl #16
+; CHECK-NEXT:    orr w9, wzr, #0x1
+; CHECK-NEXT:    madd w8, w0, w8, w9
+; CHECK-NEXT:    cmp w8, #3 // =3
+; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, 1073741827
   %cmp = icmp eq i32 %srem, 0
@@ -71,17 +63,12 @@ define i32 @test_srem_odd_bit30(i32 %X)
 define i32 @test_srem_odd_bit31(i32 %X) nounwind {
 ; CHECK-LABEL: test_srem_odd_bit31:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
-; CHECK-NEXT:    sxtw x8, w0
-; CHECK-NEXT:    add x8, x8, x8, lsl #29
-; CHECK-NEXT:    neg x8, x8
-; CHECK-NEXT:    lsr x9, x8, #63
-; CHECK-NEXT:    asr x8, x8, #60
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    mov w9, #-2147483645
-; CHECK-NEXT:    msub w8, w8, w9, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w8, #21845
+; CHECK-NEXT:    movk w8, #54613, lsl #16
+; CHECK-NEXT:    orr w9, wzr, #0x1
+; CHECK-NEXT:    madd w8, w0, w8, w9
+; CHECK-NEXT:    cmp w8, #3 // =3
+; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, 2147483651
   %cmp = icmp eq i32 %srem, 0
@@ -118,16 +105,16 @@ define i16 @test_srem_even(i16 %X) nounw
 define i32 @test_srem_even_100(i32 %X) nounwind {
 ; CHECK-LABEL: test_srem_even_100:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #34079
-; CHECK-NEXT:    movk w8, #20971, lsl #16
-; CHECK-NEXT:    smull x8, w0, w8
-; CHECK-NEXT:    lsr x9, x8, #63
-; CHECK-NEXT:    asr x8, x8, #37
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    mov w9, #100
-; CHECK-NEXT:    msub w8, w8, w9, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w8, #23593
+; CHECK-NEXT:    mov w9, #47184
+; CHECK-NEXT:    movk w8, #49807, lsl #16
+; CHECK-NEXT:    movk w9, #1310, lsl #16
+; CHECK-NEXT:    madd w8, w0, w8, w9
+; CHECK-NEXT:    mov w9, #23593
+; CHECK-NEXT:    ror w8, w8, #2
+; CHECK-NEXT:    movk w9, #655, lsl #16
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, 100
   %cmp = icmp eq i32 %srem, 0
@@ -139,17 +126,13 @@ define i32 @test_srem_even_100(i32 %X) n
 define i32 @test_srem_even_bit30(i32 %X) nounwind {
 ; CHECK-LABEL: test_srem_even_bit30:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #65433
-; CHECK-NEXT:    movk w8, #16383, lsl #16
-; CHECK-NEXT:    smull x8, w0, w8
-; CHECK-NEXT:    lsr x9, x8, #63
-; CHECK-NEXT:    asr x8, x8, #60
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    mov w9, #104
-; CHECK-NEXT:    movk w9, #16384, lsl #16
-; CHECK-NEXT:    msub w8, w8, w9, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w8, #20165
+; CHECK-NEXT:    movk w8, #64748, lsl #16
+; CHECK-NEXT:    orr w9, wzr, #0x8
+; CHECK-NEXT:    madd w8, w0, w8, w9
+; CHECK-NEXT:    ror w8, w8, #3
+; CHECK-NEXT:    cmp w8, #3 // =3
+; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, 1073741928
   %cmp = icmp eq i32 %srem, 0
@@ -161,18 +144,13 @@ define i32 @test_srem_even_bit30(i32 %X)
 define i32 @test_srem_even_bit31(i32 %X) nounwind {
 ; CHECK-LABEL: test_srem_even_bit31:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #65433
-; CHECK-NEXT:    movk w8, #32767, lsl #16
-; CHECK-NEXT:    smull x8, w0, w8
-; CHECK-NEXT:    lsr x8, x8, #32
-; CHECK-NEXT:    sub w8, w8, w0
-; CHECK-NEXT:    asr w9, w8, #30
-; CHECK-NEXT:    add w8, w9, w8, lsr #31
-; CHECK-NEXT:    mov w9, #102
-; CHECK-NEXT:    movk w9, #32768, lsl #16
-; CHECK-NEXT:    msub w8, w8, w9, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w8, #1285
+; CHECK-NEXT:    movk w8, #50437, lsl #16
+; CHECK-NEXT:    orr w9, wzr, #0x2
+; CHECK-NEXT:    madd w8, w0, w8, w9
+; CHECK-NEXT:    ror w8, w8, #1
+; CHECK-NEXT:    cmp w8, #3 // =3
+; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, 2147483750
   %cmp = icmp eq i32 %srem, 0
@@ -188,15 +166,15 @@ define i32 @test_srem_even_bit31(i32 %X)
 define i32 @test_srem_odd_setne(i32 %X) nounwind {
 ; CHECK-LABEL: test_srem_odd_setne:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #26215
-; CHECK-NEXT:    movk w8, #26214, lsl #16
-; CHECK-NEXT:    smull x8, w0, w8
-; CHECK-NEXT:    lsr x9, x8, #63
-; CHECK-NEXT:    asr x8, x8, #33
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    add w8, w8, w8, lsl #2
-; CHECK-NEXT:    cmp w0, w8
-; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    mov w8, #52429
+; CHECK-NEXT:    mov w9, #39321
+; CHECK-NEXT:    movk w8, #52428, lsl #16
+; CHECK-NEXT:    movk w9, #6553, lsl #16
+; CHECK-NEXT:    madd w8, w0, w8, w9
+; CHECK-NEXT:    mov w9, #13106
+; CHECK-NEXT:    movk w9, #13107, lsl #16
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, 5
   %cmp = icmp ne i32 %srem, 0
@@ -208,14 +186,15 @@ define i32 @test_srem_odd_setne(i32 %X)
 define i32 @test_srem_negative_odd(i32 %X) nounwind {
 ; CHECK-LABEL: test_srem_negative_odd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #-1717986919
-; CHECK-NEXT:    smull x8, w0, w8
-; CHECK-NEXT:    lsr x9, x8, #63
-; CHECK-NEXT:    asr x8, x8, #33
-; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    add w8, w8, w8, lsl #2
-; CHECK-NEXT:    cmn w0, w8
-; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    mov w8, #52429
+; CHECK-NEXT:    mov w9, #39321
+; CHECK-NEXT:    movk w8, #52428, lsl #16
+; CHECK-NEXT:    movk w9, #6553, lsl #16
+; CHECK-NEXT:    madd w8, w0, w8, w9
+; CHECK-NEXT:    mov w9, #13106
+; CHECK-NEXT:    movk w9, #13107, lsl #16
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, -5
   %cmp = icmp ne i32 %srem, 0
@@ -225,17 +204,14 @@ define i32 @test_srem_negative_odd(i32 %
 define i32 @test_srem_negative_even(i32 %X) nounwind {
 ; CHECK-LABEL: test_srem_negative_even:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #56173
-; CHECK-NEXT:    movk w8, #28086, lsl #16
-; CHECK-NEXT:    smull x8, w0, w8
-; CHECK-NEXT:    lsr x8, x8, #32
-; CHECK-NEXT:    sub w8, w8, w0
-; CHECK-NEXT:    asr w9, w8, #3
-; CHECK-NEXT:    add w8, w9, w8, lsr #31
-; CHECK-NEXT:    mov w9, #-14
-; CHECK-NEXT:    msub w8, w8, w9, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
-; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    mov w8, #28087
+; CHECK-NEXT:    mov w9, #9362
+; CHECK-NEXT:    movk w8, #46811, lsl #16
+; CHECK-NEXT:    movk w9, #4681, lsl #16
+; CHECK-NEXT:    madd w8, w0, w8, w9
+; CHECK-NEXT:    ror w8, w8, #1
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, -14
   %cmp = icmp ne i32 %srem, 0

Modified: llvm/trunk/test/CodeGen/X86/srem-seteq-optsize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/srem-seteq-optsize.ll?rev=368702&r1=368701&r2=368702&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/srem-seteq-optsize.ll (original)
+++ llvm/trunk/test/CodeGen/X86/srem-seteq-optsize.ll Tue Aug 13 07:57:37 2019
@@ -47,18 +47,11 @@ define i32 @test_minsize(i32 %X) optsize
 define i32 @test_optsize(i32 %X) optsize nounwind readnone {
 ; X86-LABEL: test_optsize:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl $1717986919, %edx # imm = 0x66666667
-; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    imull %edx
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    shrl $31, %eax
-; X86-NEXT:    sarl %edx
-; X86-NEXT:    addl %eax, %edx
-; X86-NEXT:    leal (%edx,%edx,4), %eax
-; X86-NEXT:    cmpl %eax, %ecx
+; X86-NEXT:    imull $-858993459, {{[0-9]+}}(%esp), %eax # imm = 0xCCCCCCCD
+; X86-NEXT:    addl $429496729, %eax # imm = 0x19999999
+; X86-NEXT:    cmpl $858993459, %eax # imm = 0x33333333
 ; X86-NEXT:    movl $42, %eax
-; X86-NEXT:    je .LBB1_2
+; X86-NEXT:    jb .LBB1_2
 ; X86-NEXT:  # %bb.1:
 ; X86-NEXT:    movl $-10, %eax
 ; X86-NEXT:  .LBB1_2:
@@ -66,17 +59,12 @@ define i32 @test_optsize(i32 %X) optsize
 ;
 ; X64-LABEL: test_optsize:
 ; X64:       # %bb.0:
-; X64-NEXT:    movslq %edi, %rax
-; X64-NEXT:    imulq $1717986919, %rax, %rcx # imm = 0x66666667
-; X64-NEXT:    movq %rcx, %rdx
-; X64-NEXT:    shrq $63, %rdx
-; X64-NEXT:    sarq $33, %rcx
-; X64-NEXT:    addl %edx, %ecx
-; X64-NEXT:    leal (%rcx,%rcx,4), %ecx
-; X64-NEXT:    cmpl %ecx, %eax
+; X64-NEXT:    imull $-858993459, %edi, %eax # imm = 0xCCCCCCCD
+; X64-NEXT:    addl $429496729, %eax # imm = 0x19999999
+; X64-NEXT:    cmpl $858993459, %eax # imm = 0x33333333
 ; X64-NEXT:    movl $42, %ecx
 ; X64-NEXT:    movl $-10, %eax
-; X64-NEXT:    cmovel %ecx, %eax
+; X64-NEXT:    cmovbl %ecx, %eax
 ; X64-NEXT:    retq
   %rem = srem i32 %X, 5
   %cmp = icmp eq i32 %rem, 0

Modified: llvm/trunk/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll?rev=368702&r1=368701&r2=368702&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/srem-seteq-vec-nonsplat.ll Tue Aug 13 07:57:37 2019
@@ -138,21 +138,10 @@ define <4 x i32> @test_srem_odd_even(<4
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_even:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,1374389535,1374389535]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
 ; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
@@ -168,132 +157,55 @@ define <4 x i32> @test_srem_odd_even(<4
 define <4 x i32> @test_srem_odd_allones_eq(<4 x i32> %X) nounwind {
 ; CHECK-SSE2-LABEL: test_srem_odd_allones_eq:
 ; CHECK-SSE2:       # %bb.0:
-; CHECK-SSE2-NEXT:    pxor %xmm1, %xmm1
-; CHECK-SSE2-NEXT:    pxor %xmm2, %xmm2
-; CHECK-SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1717986919,1717986919,0,1717986919]
-; CHECK-SSE2-NEXT:    pand %xmm3, %xmm2
-; CHECK-SSE2-NEXT:    pmuludq %xmm0, %xmm3
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT:    pmuludq {{.*}}(%rip), %xmm4
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; CHECK-SSE2-NEXT:    psubd %xmm2, %xmm3
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = <0,u,4294967295,u>
-; CHECK-SSE2-NEXT:    pmuludq %xmm0, %xmm2
-; CHECK-SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT:    paddd %xmm3, %xmm2
-; CHECK-SSE2-NEXT:    movdqa %xmm2, %xmm3
-; CHECK-SSE2-NEXT:    psrad $1, %xmm3
-; CHECK-SSE2-NEXT:    movdqa %xmm2, %xmm4
-; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0]
-; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0,2]
-; CHECK-SSE2-NEXT:    psrld $31, %xmm2
-; CHECK-SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT:    paddd %xmm3, %xmm2
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT:    pmuludq {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT:    pmuludq {{.*}}(%rip), %xmm3
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; CHECK-SSE2-NEXT:    psubd %xmm2, %xmm0
-; CHECK-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
-; CHECK-SSE2-NEXT:    psrld $31, %xmm0
+; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT:    paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pxor {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pcmpgtd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pandn {{.*}}(%rip), %xmm0
 ; CHECK-SSE2-NEXT:    retq
 ;
 ; CHECK-SSE41-LABEL: test_srem_odd_allones_eq:
 ; CHECK-SSE41:       # %bb.0:
-; CHECK-SSE41-NEXT:    movl $1717986919, %eax # imm = 0x66666667
-; CHECK-SSE41-NEXT:    movd %eax, %xmm1
-; CHECK-SSE41-NEXT:    pmuldq %xmm0, %xmm1
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT:    pmuldq {{.*}}(%rip), %xmm2
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [0,0,4294967295,0]
-; CHECK-SSE41-NEXT:    pmulld %xmm0, %xmm1
-; CHECK-SSE41-NEXT:    paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT:    movdqa %xmm1, %xmm2
-; CHECK-SSE41-NEXT:    psrad $1, %xmm2
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT:    psrld $31, %xmm1
-; CHECK-SSE41-NEXT:    pxor %xmm3, %xmm3
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7]
-; CHECK-SSE41-NEXT:    paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm1
-; CHECK-SSE41-NEXT:    psubd %xmm1, %xmm0
-; CHECK-SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458]
+; CHECK-SSE41-NEXT:    pminud %xmm0, %xmm1
+; CHECK-SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
 ; CHECK-SSE41-NEXT:    psrld $31, %xmm0
 ; CHECK-SSE41-NEXT:    retq
 ;
 ; CHECK-AVX1-LABEL: test_srem_odd_allones_eq:
 ; CHECK-AVX1:       # %bb.0:
-; CHECK-AVX1-NEXT:    movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX1-NEXT:    vmovd %eax, %xmm1
-; CHECK-AVX1-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT:    vpmuldq {{.*}}(%rip), %xmm2, %xmm2
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX1-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpsrad $1, %xmm1, %xmm2
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT:    vpsrld $31, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7]
-; CHECK-AVX1-NEXT:    vpaddd %xmm1, %xmm2, %xmm1
-; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    retq
 ;
 ; CHECK-AVX2-LABEL: test_srem_odd_allones_eq:
 ; CHECK-AVX2:       # %bb.0:
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX2-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX2-NEXT:    vmovd %eax, %xmm2
-; CHECK-AVX2-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX2-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX2-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-AVX2-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [429496729,429496729,429496729,429496729]
+; CHECK-AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    retq
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_allones_eq:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX512VL-NEXT:    vmovd %eax, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 5, i32 5, i32 4294967295, i32 5>
@@ -304,133 +216,56 @@ define <4 x i32> @test_srem_odd_allones_
 define <4 x i32> @test_srem_odd_allones_ne(<4 x i32> %X) nounwind {
 ; CHECK-SSE2-LABEL: test_srem_odd_allones_ne:
 ; CHECK-SSE2:       # %bb.0:
-; CHECK-SSE2-NEXT:    pxor %xmm1, %xmm1
-; CHECK-SSE2-NEXT:    pxor %xmm2, %xmm2
-; CHECK-SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1717986919,1717986919,0,1717986919]
-; CHECK-SSE2-NEXT:    pand %xmm3, %xmm2
-; CHECK-SSE2-NEXT:    pmuludq %xmm0, %xmm3
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT:    pmuludq {{.*}}(%rip), %xmm4
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; CHECK-SSE2-NEXT:    psubd %xmm2, %xmm3
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = <0,u,4294967295,u>
-; CHECK-SSE2-NEXT:    pmuludq %xmm0, %xmm2
-; CHECK-SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT:    paddd %xmm3, %xmm2
-; CHECK-SSE2-NEXT:    movdqa %xmm2, %xmm3
-; CHECK-SSE2-NEXT:    psrad $1, %xmm3
-; CHECK-SSE2-NEXT:    movdqa %xmm2, %xmm4
-; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0]
-; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0,2]
-; CHECK-SSE2-NEXT:    psrld $31, %xmm2
-; CHECK-SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT:    paddd %xmm3, %xmm2
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT:    pmuludq {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT:    pmuludq {{.*}}(%rip), %xmm3
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; CHECK-SSE2-NEXT:    psubd %xmm2, %xmm0
-; CHECK-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
-; CHECK-SSE2-NEXT:    pandn {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT:    paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pxor {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pcmpgtd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    psrld $31, %xmm0
 ; CHECK-SSE2-NEXT:    retq
 ;
 ; CHECK-SSE41-LABEL: test_srem_odd_allones_ne:
 ; CHECK-SSE41:       # %bb.0:
-; CHECK-SSE41-NEXT:    movl $1717986919, %eax # imm = 0x66666667
-; CHECK-SSE41-NEXT:    movd %eax, %xmm1
-; CHECK-SSE41-NEXT:    pmuldq %xmm0, %xmm1
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT:    pmuldq {{.*}}(%rip), %xmm2
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [0,0,4294967295,0]
-; CHECK-SSE41-NEXT:    pmulld %xmm0, %xmm1
-; CHECK-SSE41-NEXT:    paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT:    movdqa %xmm1, %xmm2
-; CHECK-SSE41-NEXT:    psrad $1, %xmm2
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT:    psrld $31, %xmm1
-; CHECK-SSE41-NEXT:    pxor %xmm3, %xmm3
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7]
-; CHECK-SSE41-NEXT:    paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm1
-; CHECK-SSE41-NEXT:    psubd %xmm1, %xmm0
-; CHECK-SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458]
+; CHECK-SSE41-NEXT:    pminud %xmm0, %xmm1
+; CHECK-SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
 ; CHECK-SSE41-NEXT:    pandn {{.*}}(%rip), %xmm0
 ; CHECK-SSE41-NEXT:    retq
 ;
 ; CHECK-AVX1-LABEL: test_srem_odd_allones_ne:
 ; CHECK-AVX1:       # %bb.0:
-; CHECK-AVX1-NEXT:    movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX1-NEXT:    vmovd %eax, %xmm1
-; CHECK-AVX1-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT:    vpmuldq {{.*}}(%rip), %xmm2, %xmm2
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX1-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpsrad $1, %xmm1, %xmm2
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT:    vpsrld $31, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7]
-; CHECK-AVX1-NEXT:    vpaddd %xmm1, %xmm2, %xmm1
-; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    vpandn {{.*}}(%rip), %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    retq
 ;
 ; CHECK-AVX2-LABEL: test_srem_odd_allones_ne:
 ; CHECK-AVX2:       # %bb.0:
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX2-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX2-NEXT:    vmovd %eax, %xmm2
-; CHECK-AVX2-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX2-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX2-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-AVX2-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [429496729,429496729,429496729,429496729]
+; CHECK-AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
 ; CHECK-AVX2-NEXT:    vpandn %xmm1, %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    retq
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_allones_ne:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX512VL-NEXT:    vmovd %eax, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpandnd {{.*}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 5, i32 5, i32 4294967295, i32 5>
@@ -559,24 +394,11 @@ define <4 x i32> @test_srem_even_allones
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_even_allones_eq:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    movl $-1840700269, %eax # imm = 0x92492493
-; CHECK-AVX512VL-NEXT:    vmovd %eax, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprord $1, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 14, i32 14, i32 4294967295, i32 14>
@@ -705,24 +527,11 @@ define <4 x i32> @test_srem_even_allones
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_even_allones_ne:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    movl $-1840700269, %eax # imm = 0x92492493
-; CHECK-AVX512VL-NEXT:    vmovd %eax, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprord $1, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpandnd {{.*}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 14, i32 14, i32 4294967295, i32 14>
@@ -869,23 +678,11 @@ define <4 x i32> @test_srem_odd_even_all
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_eq:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,0,1374389535]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 5, i32 14, i32 4294967295, i32 100>
@@ -1031,23 +828,11 @@ define <4 x i32> @test_srem_odd_even_all
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_ne:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,0,1374389535]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpandnd {{.*}}(%rip){1to4}, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 5, i32 14, i32 4294967295, i32 100>
@@ -1168,20 +953,10 @@ define <4 x i32> @test_srem_odd_poweroft
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_poweroftwo:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmuldq {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
 ; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
@@ -1283,19 +1058,10 @@ define <4 x i32> @test_srem_even_powerof
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_even_poweroftwo:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmuldq {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm0, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
 ; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
@@ -1438,21 +1204,10 @@ define <4 x i32> @test_srem_odd_even_pow
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_even_poweroftwo:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,2147483649,1374389535]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
 ; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
@@ -1468,132 +1223,55 @@ define <4 x i32> @test_srem_odd_even_pow
 define <4 x i32> @test_srem_odd_one(<4 x i32> %X) nounwind {
 ; CHECK-SSE2-LABEL: test_srem_odd_one:
 ; CHECK-SSE2:       # %bb.0:
-; CHECK-SSE2-NEXT:    pxor %xmm1, %xmm1
-; CHECK-SSE2-NEXT:    pxor %xmm2, %xmm2
-; CHECK-SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1717986919,1717986919,0,1717986919]
-; CHECK-SSE2-NEXT:    pand %xmm3, %xmm2
-; CHECK-SSE2-NEXT:    pmuludq %xmm0, %xmm3
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT:    pmuludq {{.*}}(%rip), %xmm4
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; CHECK-SSE2-NEXT:    psubd %xmm2, %xmm3
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = <0,u,1,u>
-; CHECK-SSE2-NEXT:    pmuludq %xmm0, %xmm2
-; CHECK-SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT:    paddd %xmm3, %xmm2
-; CHECK-SSE2-NEXT:    movdqa %xmm2, %xmm3
-; CHECK-SSE2-NEXT:    psrad $1, %xmm3
-; CHECK-SSE2-NEXT:    movdqa %xmm2, %xmm4
-; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0]
-; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0,2]
-; CHECK-SSE2-NEXT:    psrld $31, %xmm2
-; CHECK-SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT:    paddd %xmm3, %xmm2
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT:    pmuludq {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT:    pmuludq {{.*}}(%rip), %xmm3
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; CHECK-SSE2-NEXT:    psubd %xmm2, %xmm0
-; CHECK-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
-; CHECK-SSE2-NEXT:    psrld $31, %xmm0
+; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm2
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT:    paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pxor {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pcmpgtd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pandn {{.*}}(%rip), %xmm0
 ; CHECK-SSE2-NEXT:    retq
 ;
 ; CHECK-SSE41-LABEL: test_srem_odd_one:
 ; CHECK-SSE41:       # %bb.0:
-; CHECK-SSE41-NEXT:    movl $1717986919, %eax # imm = 0x66666667
-; CHECK-SSE41-NEXT:    movd %eax, %xmm1
-; CHECK-SSE41-NEXT:    pmuldq %xmm0, %xmm1
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT:    pmuldq {{.*}}(%rip), %xmm2
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [0,0,1,0]
-; CHECK-SSE41-NEXT:    pmulld %xmm0, %xmm1
-; CHECK-SSE41-NEXT:    paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT:    movdqa %xmm1, %xmm2
-; CHECK-SSE41-NEXT:    psrad $1, %xmm2
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT:    psrld $31, %xmm1
-; CHECK-SSE41-NEXT:    pxor %xmm3, %xmm3
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7]
-; CHECK-SSE41-NEXT:    paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm1
-; CHECK-SSE41-NEXT:    psubd %xmm1, %xmm0
-; CHECK-SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458]
+; CHECK-SSE41-NEXT:    pminud %xmm0, %xmm1
+; CHECK-SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
 ; CHECK-SSE41-NEXT:    psrld $31, %xmm0
 ; CHECK-SSE41-NEXT:    retq
 ;
 ; CHECK-AVX1-LABEL: test_srem_odd_one:
 ; CHECK-AVX1:       # %bb.0:
-; CHECK-AVX1-NEXT:    movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX1-NEXT:    vmovd %eax, %xmm1
-; CHECK-AVX1-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT:    vpmuldq {{.*}}(%rip), %xmm2, %xmm2
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX1-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpsrad $1, %xmm1, %xmm2
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT:    vpsrld $31, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7]
-; CHECK-AVX1-NEXT:    vpaddd %xmm1, %xmm2, %xmm1
-; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    retq
 ;
 ; CHECK-AVX2-LABEL: test_srem_odd_one:
-; CHECK-AVX2:       # %bb.0:
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX2-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX2-NEXT:    vmovd %eax, %xmm2
-; CHECK-AVX2-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX2-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX2-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX2:       # %bb.0:
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-AVX2-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [429496729,429496729,429496729,429496729]
+; CHECK-AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    retq
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_one:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX512VL-NEXT:    vmovd %eax, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 5, i32 5, i32 1, i32 5>
@@ -1711,23 +1389,11 @@ define <4 x i32> @test_srem_even_one(<4
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_even_one:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    movl $-1840700269, %eax # imm = 0x92492493
-; CHECK-AVX512VL-NEXT:    vmovd %eax, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm0, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprord $1, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 14, i32 14, i32 1, i32 14>
@@ -1874,23 +1540,11 @@ define <4 x i32> @test_srem_odd_even_one
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_even_one:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,0,1374389535]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 5, i32 14, i32 1, i32 100>
@@ -2465,23 +2119,11 @@ define <4 x i32> @test_srem_odd_allones_
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_allones_and_poweroftwo:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1717986919,0,2147483649,1717986919]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 16, i32 5>
@@ -2614,23 +2256,11 @@ define <4 x i32> @test_srem_even_allones
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_even_allones_and_poweroftwo:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [2454267027,0,2147483649,2454267027]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 14, i32 4294967295, i32 16, i32 14>
@@ -2776,23 +2406,11 @@ define <4 x i32> @test_srem_odd_even_all
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_and_poweroftwo:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1717986919,0,2147483649,1374389535]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 16, i32 100>
@@ -2807,138 +2425,55 @@ define <4 x i32> @test_srem_odd_even_all
 define <4 x i32> @test_srem_odd_allones_and_one(<4 x i32> %X) nounwind {
 ; CHECK-SSE2-LABEL: test_srem_odd_allones_and_one:
 ; CHECK-SSE2:       # %bb.0:
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [0,4294967295,1,0]
-; CHECK-SSE2-NEXT:    movdqa %xmm0, %xmm2
+; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT:    pmuludq %xmm3, %xmm1
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [1717986919,0,0,1717986919]
-; CHECK-SSE2-NEXT:    movdqa %xmm0, %xmm1
-; CHECK-SSE2-NEXT:    pmuludq %xmm4, %xmm1
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[1,3,2,3]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[2,2,3,3]
-; CHECK-SSE2-NEXT:    pmuludq %xmm3, %xmm1
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
-; CHECK-SSE2-NEXT:    pxor %xmm1, %xmm1
-; CHECK-SSE2-NEXT:    pxor %xmm3, %xmm3
-; CHECK-SSE2-NEXT:    pcmpgtd %xmm0, %xmm3
-; CHECK-SSE2-NEXT:    pand %xmm4, %xmm3
-; CHECK-SSE2-NEXT:    psubd %xmm3, %xmm5
-; CHECK-SSE2-NEXT:    paddd %xmm2, %xmm5
-; CHECK-SSE2-NEXT:    movdqa %xmm5, %xmm2
-; CHECK-SSE2-NEXT:    psrad $1, %xmm2
-; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,3],xmm5[1,2]
-; CHECK-SSE2-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2,3,1]
-; CHECK-SSE2-NEXT:    psrld $31, %xmm5
-; CHECK-SSE2-NEXT:    pand {{.*}}(%rip), %xmm5
-; CHECK-SSE2-NEXT:    paddd %xmm2, %xmm5
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [5,4294967295,1,5]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
-; CHECK-SSE2-NEXT:    pmuludq %xmm2, %xmm5
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[0,2,2,3]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT:    pmuludq %xmm3, %xmm2
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; CHECK-SSE2-NEXT:    psubd %xmm4, %xmm0
-; CHECK-SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
-; CHECK-SSE2-NEXT:    psrld $31, %xmm0
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT:    paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pxor {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pcmpgtd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pandn {{.*}}(%rip), %xmm0
 ; CHECK-SSE2-NEXT:    retq
 ;
 ; CHECK-SSE41-LABEL: test_srem_odd_allones_and_one:
 ; CHECK-SSE41:       # %bb.0:
-; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [1717986919,0,0,1717986919]
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT:    pmuldq %xmm2, %xmm3
-; CHECK-SSE41-NEXT:    pmuldq %xmm0, %xmm1
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [0,4294967295,1,0]
-; CHECK-SSE41-NEXT:    pmulld %xmm0, %xmm2
-; CHECK-SSE41-NEXT:    paddd %xmm1, %xmm2
-; CHECK-SSE41-NEXT:    movdqa %xmm2, %xmm1
-; CHECK-SSE41-NEXT:    psrad $1, %xmm1
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5],xmm1[6,7]
-; CHECK-SSE41-NEXT:    psrld $31, %xmm2
-; CHECK-SSE41-NEXT:    pxor %xmm3, %xmm3
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3,4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT:    paddd %xmm1, %xmm2
-; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm2
-; CHECK-SSE41-NEXT:    psubd %xmm2, %xmm0
-; CHECK-SSE41-NEXT:    pcmpeqd %xmm3, %xmm0
+; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [858993458,4294967295,4294967295,858993458]
+; CHECK-SSE41-NEXT:    pminud %xmm0, %xmm1
+; CHECK-SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
 ; CHECK-SSE41-NEXT:    psrld $31, %xmm0
 ; CHECK-SSE41-NEXT:    retq
 ;
 ; CHECK-AVX1-LABEL: test_srem_odd_allones_and_one:
 ; CHECK-AVX1:       # %bb.0:
-; CHECK-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [1717986919,0,0,1717986919]
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX1-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX1-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpsrad $1, %xmm1, %xmm2
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3,4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT:    vpsrld $31, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3,4,5],xmm1[6,7]
-; CHECK-AVX1-NEXT:    vpaddd %xmm1, %xmm2, %xmm1
-; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    retq
 ;
 ; CHECK-AVX2-LABEL: test_srem_odd_allones_and_one:
 ; CHECK-AVX2:       # %bb.0:
-; CHECK-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [1717986919,0,0,1717986919]
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX2-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX2-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX2-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3]
-; CHECK-AVX2-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-AVX2-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [429496729,429496729,429496729,429496729]
+; CHECK-AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    retq
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_allones_and_one:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1717986919,0,0,1717986919]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 1, i32 5>
@@ -3070,23 +2605,11 @@ define <4 x i32> @test_srem_even_allones
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_even_allones_and_one:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [2454267027,0,0,2454267027]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprord $1, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 14, i32 4294967295, i32 1, i32 14>
@@ -3225,23 +2748,11 @@ define <4 x i32> @test_srem_odd_even_all
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_and_one:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1717986919,0,0,1374389535]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 1, i32 100>
@@ -3385,23 +2896,11 @@ define <4 x i32> @test_srem_odd_poweroft
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_poweroftwo_and_one:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1717986919,2147483649,0,1717986919]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 5, i32 16, i32 1, i32 5>
@@ -3522,22 +3021,11 @@ define <4 x i32> @test_srem_even_powerof
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_even_poweroftwo_and_one:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [2454267027,2147483649,0,2454267027]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm0, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 14, i32 16, i32 1, i32 14>
@@ -3684,23 +3172,11 @@ define <4 x i32> @test_srem_odd_even_pow
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_even_poweroftwo_and_one:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1717986919,2147483649,0,1374389535]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 5, i32 16, i32 1, i32 100>
@@ -3821,18 +3297,11 @@ define <4 x i32> @test_srem_odd_allones_
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_allones_and_poweroftwo_and_one:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmuldq {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpsrlq $32, %xmm2, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm1, %xmm2, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 16, i32 1>
@@ -3945,18 +3414,11 @@ define <4 x i32> @test_srem_even_allones
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_even_allones_and_poweroftwo_and_one:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmuldq {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpsrlq $32, %xmm2, %xmm2
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm1, %xmm2, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3]
-; CHECK-AVX512VL-NEXT:    vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
   %srem = srem <4 x i32> %X, <i32 14, i32 4294967295, i32 16, i32 1>

Modified: llvm/trunk/test/CodeGen/X86/srem-seteq-vec-splat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/srem-seteq-vec-splat.ll?rev=368702&r1=368701&r2=368702&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/srem-seteq-vec-splat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/srem-seteq-vec-splat.ll Tue Aug 13 07:57:37 2019
@@ -9,105 +9,55 @@
 define <4 x i32> @test_srem_odd_25(<4 x i32> %X) nounwind {
 ; CHECK-SSE2-LABEL: test_srem_odd_25:
 ; CHECK-SSE2:       # %bb.0:
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-SSE2-NEXT:    movdqa %xmm0, %xmm2
-; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm3
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; CHECK-SSE2-NEXT:    pxor %xmm3, %xmm3
-; CHECK-SSE2-NEXT:    pxor %xmm4, %xmm4
-; CHECK-SSE2-NEXT:    pcmpgtd %xmm0, %xmm4
-; CHECK-SSE2-NEXT:    pand %xmm1, %xmm4
-; CHECK-SSE2-NEXT:    psubd %xmm4, %xmm2
-; CHECK-SSE2-NEXT:    movdqa %xmm2, %xmm1
-; CHECK-SSE2-NEXT:    psrld $31, %xmm1
-; CHECK-SSE2-NEXT:    psrad $3, %xmm2
-; CHECK-SSE2-NEXT:    paddd %xmm1, %xmm2
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [25,25,25,25]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
+; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm4
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[0,2,2,3]
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-SSE2-NEXT:    psubd %xmm2, %xmm0
-; CHECK-SSE2-NEXT:    pcmpeqd %xmm3, %xmm0
-; CHECK-SSE2-NEXT:    psrld $31, %xmm0
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT:    paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pxor {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pcmpgtd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pandn {{.*}}(%rip), %xmm0
 ; CHECK-SSE2-NEXT:    retq
 ;
 ; CHECK-SSE41-LABEL: test_srem_odd_25:
 ; CHECK-SSE41:       # %bb.0:
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-SSE41-NEXT:    pmuldq %xmm2, %xmm1
-; CHECK-SSE41-NEXT:    pmuldq %xmm0, %xmm2
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
-; CHECK-SSE41-NEXT:    movdqa %xmm2, %xmm1
-; CHECK-SSE41-NEXT:    psrld $31, %xmm1
-; CHECK-SSE41-NEXT:    psrad $3, %xmm2
-; CHECK-SSE41-NEXT:    paddd %xmm1, %xmm2
-; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm2
-; CHECK-SSE41-NEXT:    psubd %xmm2, %xmm0
-; CHECK-SSE41-NEXT:    pxor %xmm1, %xmm1
+; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690]
+; CHECK-SSE41-NEXT:    pminud %xmm0, %xmm1
 ; CHECK-SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
 ; CHECK-SSE41-NEXT:    psrld $31, %xmm0
 ; CHECK-SSE41-NEXT:    retq
 ;
 ; CHECK-AVX1-LABEL: test_srem_odd_25:
 ; CHECK-AVX1:       # %bb.0:
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-AVX1-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
-; CHECK-AVX1-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX1-NEXT:    vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
 ; CHECK-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    retq
 ;
 ; CHECK-AVX2-LABEL: test_srem_odd_25:
 ; CHECK-AVX2:       # %bb.0:
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-AVX2-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX2-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX2-NEXT:    vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [25,25,25,25]
-; CHECK-AVX2-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
+; CHECK-AVX2-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [85899345,85899345,85899345,85899345]
+; CHECK-AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690]
+; CHECK-AVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm1
 ; CHECK-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    retq
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_25:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip){1to4}, %xmm0, %xmm1
 ; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
@@ -208,18 +158,10 @@ define <4 x i32> @test_srem_even_100(<4
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_even_100:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpsrad $5, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprord $2, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip){1to4}, %xmm0, %xmm1
 ; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
@@ -235,113 +177,55 @@ define <4 x i32> @test_srem_even_100(<4
 define <4 x i32> @test_srem_odd_neg25(<4 x i32> %X) nounwind {
 ; CHECK-SSE2-LABEL: test_srem_odd_neg25:
 ; CHECK-SSE2:       # %bb.0:
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535]
-; CHECK-SSE2-NEXT:    movdqa %xmm0, %xmm2
+; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm0
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,2,3,3]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT:    pmuludq %xmm3, %xmm4
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,3,2,3]
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; CHECK-SSE2-NEXT:    pxor %xmm3, %xmm3
-; CHECK-SSE2-NEXT:    pxor %xmm4, %xmm4
-; CHECK-SSE2-NEXT:    pcmpgtd %xmm0, %xmm4
-; CHECK-SSE2-NEXT:    pand %xmm1, %xmm4
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [0,4294967295,4294967295,0]
-; CHECK-SSE2-NEXT:    pand %xmm0, %xmm1
-; CHECK-SSE2-NEXT:    paddd %xmm4, %xmm1
-; CHECK-SSE2-NEXT:    psubd %xmm1, %xmm2
-; CHECK-SSE2-NEXT:    movdqa %xmm2, %xmm1
-; CHECK-SSE2-NEXT:    psrld $31, %xmm1
-; CHECK-SSE2-NEXT:    psrad $3, %xmm2
-; CHECK-SSE2-NEXT:    paddd %xmm1, %xmm2
-; CHECK-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [25,4294967271,4294967271,25]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT:    pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
-; CHECK-SSE2-NEXT:    pmuludq %xmm4, %xmm1
-; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-SSE2-NEXT:    psubd %xmm2, %xmm0
-; CHECK-SSE2-NEXT:    pcmpeqd %xmm3, %xmm0
-; CHECK-SSE2-NEXT:    psrld $31, %xmm0
+; CHECK-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; CHECK-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT:    paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pxor {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pcmpgtd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT:    pandn {{.*}}(%rip), %xmm0
 ; CHECK-SSE2-NEXT:    retq
 ;
 ; CHECK-SSE41-LABEL: test_srem_odd_neg25:
 ; CHECK-SSE41:       # %bb.0:
-; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535]
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT:    pmuldq %xmm2, %xmm3
-; CHECK-SSE41-NEXT:    pmuldq %xmm0, %xmm1
-; CHECK-SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-SSE41-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; CHECK-SSE41-NEXT:    movdqa %xmm1, %xmm2
-; CHECK-SSE41-NEXT:    psrld $31, %xmm2
-; CHECK-SSE41-NEXT:    psrad $3, %xmm1
-; CHECK-SSE41-NEXT:    paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm1
-; CHECK-SSE41-NEXT:    psubd %xmm1, %xmm0
-; CHECK-SSE41-NEXT:    pxor %xmm1, %xmm1
+; CHECK-SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690]
+; CHECK-SSE41-NEXT:    pminud %xmm0, %xmm1
 ; CHECK-SSE41-NEXT:    pcmpeqd %xmm1, %xmm0
 ; CHECK-SSE41-NEXT:    psrld $31, %xmm0
 ; CHECK-SSE41-NEXT:    retq
 ;
 ; CHECK-AVX1-LABEL: test_srem_odd_neg25:
 ; CHECK-AVX1:       # %bb.0:
-; CHECK-AVX1-NEXT:    vmovdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535]
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX1-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX1-NEXT:    vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm1
 ; CHECK-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX1-NEXT:    retq
 ;
 ; CHECK-AVX2-LABEL: test_srem_odd_neg25:
 ; CHECK-AVX2:       # %bb.0:
-; CHECK-AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535]
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX2-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX2-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX2-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX2-NEXT:    vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
+; CHECK-AVX2-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [85899345,85899345,85899345,85899345]
+; CHECK-AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690]
+; CHECK-AVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm1
 ; CHECK-AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX2-NEXT:    retq
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_odd_neg25:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip){1to4}, %xmm0, %xmm1
 ; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq
@@ -442,19 +326,10 @@ define <4 x i32> @test_srem_even_neg100(
 ;
 ; CHECK-AVX512VL-LABEL: test_srem_even_neg100:
 ; CHECK-AVX512VL:       # %bb.0:
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2920577761,2920577761,2920577761,2920577761]
-; CHECK-AVX512VL-NEXT:    vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT:    vpsrad $5, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vprord $2, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT:    vpminud {{.*}}(%rip){1to4}, %xmm0, %xmm1
 ; CHECK-AVX512VL-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    vpsrld $31, %xmm0, %xmm0
 ; CHECK-AVX512VL-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/srem-seteq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/srem-seteq.ll?rev=368702&r1=368701&r2=368702&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/srem-seteq.ll (original)
+++ llvm/trunk/test/CodeGen/X86/srem-seteq.ll Tue Aug 13 07:57:37 2019
@@ -9,32 +9,20 @@
 define i32 @test_srem_odd(i32 %X) nounwind {
 ; X86-LABEL: test_srem_odd:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl $1717986919, %edx # imm = 0x66666667
-; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    imull %edx
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    shrl $31, %eax
-; X86-NEXT:    sarl %edx
-; X86-NEXT:    addl %eax, %edx
-; X86-NEXT:    leal (%edx,%edx,4), %edx
+; X86-NEXT:    imull $-858993459, {{[0-9]+}}(%esp), %ecx # imm = 0xCCCCCCCD
+; X86-NEXT:    addl $429496729, %ecx # imm = 0x19999999
 ; X86-NEXT:    xorl %eax, %eax
-; X86-NEXT:    cmpl %edx, %ecx
-; X86-NEXT:    sete %al
+; X86-NEXT:    cmpl $858993459, %ecx # imm = 0x33333333
+; X86-NEXT:    setb %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_odd:
 ; X64:       # %bb.0:
-; X64-NEXT:    movslq %edi, %rcx
-; X64-NEXT:    imulq $1717986919, %rcx, %rax # imm = 0x66666667
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    shrq $63, %rdx
-; X64-NEXT:    sarq $33, %rax
-; X64-NEXT:    addl %edx, %eax
-; X64-NEXT:    leal (%rax,%rax,4), %edx
+; X64-NEXT:    imull $-858993459, %edi, %ecx # imm = 0xCCCCCCCD
+; X64-NEXT:    addl $429496729, %ecx # imm = 0x19999999
 ; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    cmpl %edx, %ecx
-; X64-NEXT:    sete %al
+; X64-NEXT:    cmpl $858993459, %ecx # imm = 0x33333333
+; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
   %srem = srem i32 %X, 5
   %cmp = icmp eq i32 %srem, 0
@@ -45,34 +33,20 @@ define i32 @test_srem_odd(i32 %X) nounwi
 define i32 @test_srem_odd_25(i32 %X) nounwind {
 ; X86-LABEL: test_srem_odd_25:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl $1374389535, %edx # imm = 0x51EB851F
-; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    imull %edx
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    shrl $31, %eax
-; X86-NEXT:    sarl $3, %edx
-; X86-NEXT:    addl %eax, %edx
-; X86-NEXT:    leal (%edx,%edx,4), %eax
-; X86-NEXT:    leal (%eax,%eax,4), %edx
+; X86-NEXT:    imull $-1030792151, {{[0-9]+}}(%esp), %ecx # imm = 0xC28F5C29
+; X86-NEXT:    addl $85899345, %ecx # imm = 0x51EB851
 ; X86-NEXT:    xorl %eax, %eax
-; X86-NEXT:    cmpl %edx, %ecx
-; X86-NEXT:    sete %al
+; X86-NEXT:    cmpl $171798691, %ecx # imm = 0xA3D70A3
+; X86-NEXT:    setb %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_odd_25:
 ; X64:       # %bb.0:
-; X64-NEXT:    movslq %edi, %rcx
-; X64-NEXT:    imulq $1374389535, %rcx, %rax # imm = 0x51EB851F
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    shrq $63, %rdx
-; X64-NEXT:    sarq $35, %rax
-; X64-NEXT:    addl %edx, %eax
-; X64-NEXT:    leal (%rax,%rax,4), %eax
-; X64-NEXT:    leal (%rax,%rax,4), %edx
+; X64-NEXT:    imull $-1030792151, %edi, %ecx # imm = 0xC28F5C29
+; X64-NEXT:    addl $85899345, %ecx # imm = 0x51EB851
 ; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    cmpl %edx, %ecx
-; X64-NEXT:    sete %al
+; X64-NEXT:    cmpl $171798691, %ecx # imm = 0xA3D70A3
+; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
   %srem = srem i32 %X, 25
   %cmp = icmp eq i32 %srem, 0
@@ -84,34 +58,20 @@ define i32 @test_srem_odd_25(i32 %X) nou
 define i32 @test_srem_odd_bit30(i32 %X) nounwind {
 ; X86-LABEL: test_srem_odd_bit30:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl $536870911, %edx # imm = 0x1FFFFFFF
-; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    imull %edx
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    shrl $31, %eax
-; X86-NEXT:    sarl $27, %edx
-; X86-NEXT:    addl %eax, %edx
-; X86-NEXT:    imull $1073741827, %edx, %edx # imm = 0x40000003
+; X86-NEXT:    imull $1789569707, {{[0-9]+}}(%esp), %ecx # imm = 0x6AAAAAAB
+; X86-NEXT:    incl %ecx
 ; X86-NEXT:    xorl %eax, %eax
-; X86-NEXT:    cmpl %edx, %ecx
-; X86-NEXT:    sete %al
+; X86-NEXT:    cmpl $3, %ecx
+; X86-NEXT:    setb %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_odd_bit30:
 ; X64:       # %bb.0:
-; X64-NEXT:    movslq %edi, %rcx
-; X64-NEXT:    movq %rcx, %rax
-; X64-NEXT:    shlq $29, %rax
-; X64-NEXT:    subq %rcx, %rax
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    shrq $63, %rdx
-; X64-NEXT:    sarq $59, %rax
-; X64-NEXT:    addl %edx, %eax
-; X64-NEXT:    imull $1073741827, %eax, %edx # imm = 0x40000003
+; X64-NEXT:    imull $1789569707, %edi, %ecx # imm = 0x6AAAAAAB
+; X64-NEXT:    incl %ecx
 ; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    cmpl %edx, %ecx
-; X64-NEXT:    sete %al
+; X64-NEXT:    cmpl $3, %ecx
+; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
   %srem = srem i32 %X, 1073741827
   %cmp = icmp eq i32 %srem, 0
@@ -123,35 +83,20 @@ define i32 @test_srem_odd_bit30(i32 %X)
 define i32 @test_srem_odd_bit31(i32 %X) nounwind {
 ; X86-LABEL: test_srem_odd_bit31:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl $-536870913, %edx # imm = 0xDFFFFFFF
-; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    imull %edx
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    shrl $31, %eax
-; X86-NEXT:    sarl $28, %edx
-; X86-NEXT:    addl %eax, %edx
-; X86-NEXT:    imull $-2147483645, %edx, %edx # imm = 0x80000003
+; X86-NEXT:    imull $-715827883, {{[0-9]+}}(%esp), %ecx # imm = 0xD5555555
+; X86-NEXT:    incl %ecx
 ; X86-NEXT:    xorl %eax, %eax
-; X86-NEXT:    cmpl %edx, %ecx
-; X86-NEXT:    sete %al
+; X86-NEXT:    cmpl $3, %ecx
+; X86-NEXT:    setb %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_odd_bit31:
 ; X64:       # %bb.0:
-; X64-NEXT:    movslq %edi, %rcx
-; X64-NEXT:    movq %rcx, %rax
-; X64-NEXT:    shlq $29, %rax
-; X64-NEXT:    addq %rcx, %rax
-; X64-NEXT:    negq %rax
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    shrq $63, %rdx
-; X64-NEXT:    sarq $60, %rax
-; X64-NEXT:    addl %edx, %eax
-; X64-NEXT:    imull $-2147483645, %eax, %edx # imm = 0x80000003
+; X64-NEXT:    imull $-715827883, %edi, %ecx # imm = 0xD5555555
+; X64-NEXT:    incl %ecx
 ; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    cmpl %edx, %ecx
-; X64-NEXT:    sete %al
+; X64-NEXT:    cmpl $3, %ecx
+; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
   %srem = srem i32 %X, 2147483651
   %cmp = icmp eq i32 %srem, 0
@@ -166,37 +111,25 @@ define i32 @test_srem_odd_bit31(i32 %X)
 define i16 @test_srem_even(i16 %X) nounwind {
 ; X86-LABEL: test_srem_even:
 ; X86:       # %bb.0:
-; X86-NEXT:    movswl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    imull $18725, %ecx, %eax # imm = 0x4925
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    shrl $31, %edx
-; X86-NEXT:    sarl $18, %eax
-; X86-NEXT:    addl %edx, %eax
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    shll $4, %edx
-; X86-NEXT:    subl %eax, %edx
-; X86-NEXT:    subl %eax, %edx
+; X86-NEXT:    imull $28087, {{[0-9]+}}(%esp), %eax # imm = 0x6DB7
+; X86-NEXT:    addl $4680, %eax # imm = 0x1248
+; X86-NEXT:    rorw %ax
+; X86-NEXT:    movzwl %ax, %ecx
 ; X86-NEXT:    xorl %eax, %eax
-; X86-NEXT:    cmpw %dx, %cx
-; X86-NEXT:    setne %al
+; X86-NEXT:    cmpl $4680, %ecx # imm = 0x1248
+; X86-NEXT:    seta %al
 ; X86-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_even:
 ; X64:       # %bb.0:
-; X64-NEXT:    movswl %di, %ecx
-; X64-NEXT:    imull $18725, %ecx, %eax # imm = 0x4925
-; X64-NEXT:    movl %eax, %edx
-; X64-NEXT:    shrl $31, %edx
-; X64-NEXT:    sarl $18, %eax
-; X64-NEXT:    addl %edx, %eax
-; X64-NEXT:    movl %eax, %edx
-; X64-NEXT:    shll $4, %edx
-; X64-NEXT:    subl %eax, %edx
-; X64-NEXT:    subl %eax, %edx
+; X64-NEXT:    imull $28087, %edi, %eax # imm = 0x6DB7
+; X64-NEXT:    addl $4680, %eax # imm = 0x1248
+; X64-NEXT:    rorw %ax
+; X64-NEXT:    movzwl %ax, %ecx
 ; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    cmpw %dx, %cx
-; X64-NEXT:    setne %al
+; X64-NEXT:    cmpl $4680, %ecx # imm = 0x1248
+; X64-NEXT:    seta %al
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
   %srem = srem i16 %X, 14
@@ -208,32 +141,22 @@ define i16 @test_srem_even(i16 %X) nounw
 define i32 @test_srem_even_100(i32 %X) nounwind {
 ; X86-LABEL: test_srem_even_100:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl $1374389535, %edx # imm = 0x51EB851F
-; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    imull %edx
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    shrl $31, %eax
-; X86-NEXT:    sarl $5, %edx
-; X86-NEXT:    addl %eax, %edx
-; X86-NEXT:    imull $100, %edx, %edx
+; X86-NEXT:    imull $-1030792151, {{[0-9]+}}(%esp), %ecx # imm = 0xC28F5C29
+; X86-NEXT:    addl $85899344, %ecx # imm = 0x51EB850
+; X86-NEXT:    rorl $2, %ecx
 ; X86-NEXT:    xorl %eax, %eax
-; X86-NEXT:    cmpl %edx, %ecx
-; X86-NEXT:    sete %al
+; X86-NEXT:    cmpl $42949673, %ecx # imm = 0x28F5C29
+; X86-NEXT:    setb %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_even_100:
 ; X64:       # %bb.0:
-; X64-NEXT:    movslq %edi, %rcx
-; X64-NEXT:    imulq $1374389535, %rcx, %rax # imm = 0x51EB851F
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    shrq $63, %rdx
-; X64-NEXT:    sarq $37, %rax
-; X64-NEXT:    addl %edx, %eax
-; X64-NEXT:    imull $100, %eax, %edx
+; X64-NEXT:    imull $-1030792151, %edi, %ecx # imm = 0xC28F5C29
+; X64-NEXT:    addl $85899344, %ecx # imm = 0x51EB850
+; X64-NEXT:    rorl $2, %ecx
 ; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    cmpl %edx, %ecx
-; X64-NEXT:    sete %al
+; X64-NEXT:    cmpl $42949673, %ecx # imm = 0x28F5C29
+; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
   %srem = srem i32 %X, 100
   %cmp = icmp eq i32 %srem, 0
@@ -245,32 +168,22 @@ define i32 @test_srem_even_100(i32 %X) n
 define i32 @test_srem_even_bit30(i32 %X) nounwind {
 ; X86-LABEL: test_srem_even_bit30:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl $1073741721, %edx # imm = 0x3FFFFF99
-; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    imull %edx
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    shrl $31, %eax
-; X86-NEXT:    sarl $28, %edx
-; X86-NEXT:    addl %eax, %edx
-; X86-NEXT:    imull $1073741928, %edx, %edx # imm = 0x40000068
+; X86-NEXT:    imull $-51622203, {{[0-9]+}}(%esp), %ecx # imm = 0xFCEC4EC5
+; X86-NEXT:    addl $8, %ecx
+; X86-NEXT:    rorl $3, %ecx
 ; X86-NEXT:    xorl %eax, %eax
-; X86-NEXT:    cmpl %edx, %ecx
-; X86-NEXT:    sete %al
+; X86-NEXT:    cmpl $3, %ecx
+; X86-NEXT:    setb %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_even_bit30:
 ; X64:       # %bb.0:
-; X64-NEXT:    movslq %edi, %rcx
-; X64-NEXT:    imulq $1073741721, %rcx, %rax # imm = 0x3FFFFF99
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    shrq $63, %rdx
-; X64-NEXT:    sarq $60, %rax
-; X64-NEXT:    addl %edx, %eax
-; X64-NEXT:    imull $1073741928, %eax, %edx # imm = 0x40000068
+; X64-NEXT:    imull $-51622203, %edi, %ecx # imm = 0xFCEC4EC5
+; X64-NEXT:    addl $8, %ecx
+; X64-NEXT:    rorl $3, %ecx
 ; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    cmpl %edx, %ecx
-; X64-NEXT:    sete %al
+; X64-NEXT:    cmpl $3, %ecx
+; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
   %srem = srem i32 %X, 1073741928
   %cmp = icmp eq i32 %srem, 0
@@ -282,35 +195,22 @@ define i32 @test_srem_even_bit30(i32 %X)
 define i32 @test_srem_even_bit31(i32 %X) nounwind {
 ; X86-LABEL: test_srem_even_bit31:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl $2147483545, %edx # imm = 0x7FFFFF99
-; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    imull %edx
-; X86-NEXT:    subl %ecx, %edx
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    shrl $31, %eax
-; X86-NEXT:    sarl $30, %edx
-; X86-NEXT:    addl %eax, %edx
-; X86-NEXT:    imull $-2147483546, %edx, %edx # imm = 0x80000066
+; X86-NEXT:    imull $-989526779, {{[0-9]+}}(%esp), %ecx # imm = 0xC5050505
+; X86-NEXT:    addl $2, %ecx
+; X86-NEXT:    rorl %ecx
 ; X86-NEXT:    xorl %eax, %eax
-; X86-NEXT:    cmpl %edx, %ecx
-; X86-NEXT:    sete %al
+; X86-NEXT:    cmpl $3, %ecx
+; X86-NEXT:    setb %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_even_bit31:
 ; X64:       # %bb.0:
-; X64-NEXT:    movslq %edi, %rcx
-; X64-NEXT:    imulq $2147483545, %rcx, %rax # imm = 0x7FFFFF99
-; X64-NEXT:    shrq $32, %rax
-; X64-NEXT:    subl %ecx, %eax
-; X64-NEXT:    movl %eax, %edx
-; X64-NEXT:    shrl $31, %edx
-; X64-NEXT:    sarl $30, %eax
-; X64-NEXT:    addl %edx, %eax
-; X64-NEXT:    imull $-2147483546, %eax, %edx # imm = 0x80000066
+; X64-NEXT:    imull $-989526779, %edi, %ecx # imm = 0xC5050505
+; X64-NEXT:    addl $2, %ecx
+; X64-NEXT:    rorl %ecx
 ; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    cmpl %edx, %ecx
-; X64-NEXT:    sete %al
+; X64-NEXT:    cmpl $3, %ecx
+; X64-NEXT:    setb %al
 ; X64-NEXT:    retq
   %srem = srem i32 %X, 2147483750
   %cmp = icmp eq i32 %srem, 0
@@ -326,32 +226,20 @@ define i32 @test_srem_even_bit31(i32 %X)
 define i32 @test_srem_odd_setne(i32 %X) nounwind {
 ; X86-LABEL: test_srem_odd_setne:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl $1717986919, %edx # imm = 0x66666667
-; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    imull %edx
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    shrl $31, %eax
-; X86-NEXT:    sarl %edx
-; X86-NEXT:    addl %eax, %edx
-; X86-NEXT:    leal (%edx,%edx,4), %edx
+; X86-NEXT:    imull $-858993459, {{[0-9]+}}(%esp), %ecx # imm = 0xCCCCCCCD
+; X86-NEXT:    addl $429496729, %ecx # imm = 0x19999999
 ; X86-NEXT:    xorl %eax, %eax
-; X86-NEXT:    cmpl %edx, %ecx
-; X86-NEXT:    setne %al
+; X86-NEXT:    cmpl $858993458, %ecx # imm = 0x33333332
+; X86-NEXT:    seta %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_odd_setne:
 ; X64:       # %bb.0:
-; X64-NEXT:    movslq %edi, %rcx
-; X64-NEXT:    imulq $1717986919, %rcx, %rax # imm = 0x66666667
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    shrq $63, %rdx
-; X64-NEXT:    sarq $33, %rax
-; X64-NEXT:    addl %edx, %eax
-; X64-NEXT:    leal (%rax,%rax,4), %edx
+; X64-NEXT:    imull $-858993459, %edi, %ecx # imm = 0xCCCCCCCD
+; X64-NEXT:    addl $429496729, %ecx # imm = 0x19999999
 ; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    cmpl %edx, %ecx
-; X64-NEXT:    setne %al
+; X64-NEXT:    cmpl $858993458, %ecx # imm = 0x33333332
+; X64-NEXT:    seta %al
 ; X64-NEXT:    retq
   %srem = srem i32 %X, 5
   %cmp = icmp ne i32 %srem, 0
@@ -363,32 +251,20 @@ define i32 @test_srem_odd_setne(i32 %X)
 define i32 @test_srem_negative_odd(i32 %X) nounwind {
 ; X86-LABEL: test_srem_negative_odd:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl $-1717986919, %edx # imm = 0x99999999
-; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    imull %edx
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    shrl $31, %eax
-; X86-NEXT:    sarl %edx
-; X86-NEXT:    addl %eax, %edx
-; X86-NEXT:    leal (%edx,%edx,4), %edx
+; X86-NEXT:    imull $-858993459, {{[0-9]+}}(%esp), %ecx # imm = 0xCCCCCCCD
+; X86-NEXT:    addl $429496729, %ecx # imm = 0x19999999
 ; X86-NEXT:    xorl %eax, %eax
-; X86-NEXT:    addl %ecx, %edx
-; X86-NEXT:    setne %al
+; X86-NEXT:    cmpl $858993458, %ecx # imm = 0x33333332
+; X86-NEXT:    seta %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_negative_odd:
 ; X64:       # %bb.0:
-; X64-NEXT:    movslq %edi, %rcx
-; X64-NEXT:    imulq $-1717986919, %rcx, %rax # imm = 0x99999999
-; X64-NEXT:    movq %rax, %rdx
-; X64-NEXT:    shrq $63, %rdx
-; X64-NEXT:    sarq $33, %rax
-; X64-NEXT:    addl %edx, %eax
-; X64-NEXT:    leal (%rax,%rax,4), %edx
+; X64-NEXT:    imull $-858993459, %edi, %ecx # imm = 0xCCCCCCCD
+; X64-NEXT:    addl $429496729, %ecx # imm = 0x19999999
 ; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    addl %edx, %ecx
-; X64-NEXT:    setne %al
+; X64-NEXT:    cmpl $858993458, %ecx # imm = 0x33333332
+; X64-NEXT:    seta %al
 ; X64-NEXT:    retq
   %srem = srem i32 %X, -5
   %cmp = icmp ne i32 %srem, 0
@@ -398,35 +274,22 @@ define i32 @test_srem_negative_odd(i32 %
 define i32 @test_srem_negative_even(i32 %X) nounwind {
 ; X86-LABEL: test_srem_negative_even:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl $1840700269, %edx # imm = 0x6DB6DB6D
-; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    imull %edx
-; X86-NEXT:    subl %ecx, %edx
-; X86-NEXT:    movl %edx, %eax
-; X86-NEXT:    shrl $31, %eax
-; X86-NEXT:    sarl $3, %edx
-; X86-NEXT:    addl %eax, %edx
-; X86-NEXT:    imull $-14, %edx, %edx
+; X86-NEXT:    imull $-1227133513, {{[0-9]+}}(%esp), %ecx # imm = 0xB6DB6DB7
+; X86-NEXT:    addl $306783378, %ecx # imm = 0x12492492
+; X86-NEXT:    rorl %ecx
 ; X86-NEXT:    xorl %eax, %eax
-; X86-NEXT:    cmpl %edx, %ecx
-; X86-NEXT:    setne %al
+; X86-NEXT:    cmpl $306783378, %ecx # imm = 0x12492492
+; X86-NEXT:    seta %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_srem_negative_even:
 ; X64:       # %bb.0:
-; X64-NEXT:    movslq %edi, %rcx
-; X64-NEXT:    imulq $1840700269, %rcx, %rax # imm = 0x6DB6DB6D
-; X64-NEXT:    shrq $32, %rax
-; X64-NEXT:    subl %ecx, %eax
-; X64-NEXT:    movl %eax, %edx
-; X64-NEXT:    shrl $31, %edx
-; X64-NEXT:    sarl $3, %eax
-; X64-NEXT:    addl %edx, %eax
-; X64-NEXT:    imull $-14, %eax, %edx
+; X64-NEXT:    imull $-1227133513, %edi, %ecx # imm = 0xB6DB6DB7
+; X64-NEXT:    addl $306783378, %ecx # imm = 0x12492492
+; X64-NEXT:    rorl %ecx
 ; X64-NEXT:    xorl %eax, %eax
-; X64-NEXT:    cmpl %edx, %ecx
-; X64-NEXT:    setne %al
+; X64-NEXT:    cmpl $306783378, %ecx # imm = 0x12492492
+; X64-NEXT:    seta %al
 ; X64-NEXT:    retq
   %srem = srem i32 %X, -14
   %cmp = icmp ne i32 %srem, 0

Modified: llvm/trunk/test/CodeGen/X86/vselect-avx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vselect-avx.ll?rev=368702&r1=368701&r2=368702&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vselect-avx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vselect-avx.ll Tue Aug 13 07:57:37 2019
@@ -84,21 +84,14 @@ bb:
 define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17,  <4 x i16> %tmp3, <4 x i16> %tmp12) {
 ; AVX1-LABEL: test3:
 ; AVX1:       ## %bb.0:
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [1431655766,1431655766,1431655766,1431655766]
-; AVX1-NEXT:    vpmuldq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpmuldq %xmm4, %xmm0, %xmm4
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
-; AVX1-NEXT:    vpsrld $31, %xmm3, %xmm4
-; AVX1-NEXT:    vpaddd %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm3, %xmm3
-; AVX1-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpminud {{.*}}(%rip), %xmm0, %xmm3
 ; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
 ; AVX1-NEXT:    vpackssdw %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    vpblendvb %xmm0, %xmm1, %xmm2, %xmm1
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vpblendvb %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX1-NEXT:    vmovq %xmm0, (%rdi)
 ; AVX1-NEXT:    vmovq %xmm1, (%rsi)
@@ -106,22 +99,17 @@ define void @test3(<4 x i32> %induction3
 ;
 ; AVX2-LABEL: test3:
 ; AVX2:       ## %bb.0:
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [1431655766,1431655766,1431655766,1431655766]
-; AVX2-NEXT:    vpmuldq %xmm4, %xmm3, %xmm3
-; AVX2-NEXT:    vpmuldq %xmm4, %xmm0, %xmm4
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
-; AVX2-NEXT:    vpblendd {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2],xmm3[3]
-; AVX2-NEXT:    vpsrld $31, %xmm3, %xmm4
-; AVX2-NEXT:    vpaddd %xmm4, %xmm3, %xmm3
-; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [3,3,3,3]
-; AVX2-NEXT:    vpmulld %xmm4, %xmm3, %xmm3
-; AVX2-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [2863311531,2863311531,2863311531,2863311531]
+; AVX2-NEXT:    vpmulld %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [715827882,715827882,715827882,715827882]
+; AVX2-NEXT:    vpaddd %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1431655764,1431655764,1431655764,1431655764]
+; AVX2-NEXT:    vpminud %xmm3, %xmm0, %xmm3
 ; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
 ; AVX2-NEXT:    vpackssdw %xmm0, %xmm0, %xmm0
 ; AVX2-NEXT:    vpblendvb %xmm0, %xmm1, %xmm2, %xmm1
 ; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpxor %xmm3, %xmm3, %xmm3
 ; AVX2-NEXT:    vpblendvb %xmm0, %xmm2, %xmm3, %xmm0
 ; AVX2-NEXT:    vmovq %xmm0, (%rdi)
 ; AVX2-NEXT:    vmovq %xmm1, (%rsi)




More information about the llvm-commits mailing list