[llvm] 5be64d4 - Revert "[x86] fold vector (X > -1) & Y to shift+andn"

Hans Wennborg via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 15 03:36:06 PST 2021


Author: Hans Wennborg
Date: 2021-11-15T12:35:49+01:00
New Revision: 5be64d416481c59dba5faae5e8b8a6fecb578082

URL: https://github.com/llvm/llvm-project/commit/5be64d416481c59dba5faae5e8b8a6fecb578082
DIFF: https://github.com/llvm/llvm-project/commit/5be64d416481c59dba5faae5e8b8a6fecb578082.diff

LOG: Revert "[x86] fold vector (X > -1) & Y to shift+andn"

This casued assertion failures:

  llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp:9446:
  void llvm::SelectionDAG::ReplaceAllUsesWith(llvm::SDNode *, llvm::SDNode *):
  Assertion `(!From->hasAnyUseOfValue(i) || From->getValueType(i) == To->getValueType(i))
  && "Cannot use this version of ReplaceAllUsesWith!"' failed.

See comment on the code review.

(Had to update some expectations in test/CodeGen/X86/vselect-zero.ll
 manually due to other changes having landed after the reverted one.)

> and (pcmpgt X, -1), Y --> pandn (vsrai X, BitWidth-1), Y
>
> This avoids the -1 constant vector in favor of an arithmetic shift
> instruction if it exists (the ISA is still not complete after all
> these years...).
>
> We catch this pattern late in combining by matching PCMPGT, so it
> should not interfere with more general folds.
>
> Differential Revision: https://reviews.llvm.org/D113603

This reverts commit bf5748a1af0d2f6f9396d9dc6ac89d15de41eee7.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/vector-pcmp.ll
    llvm/test/CodeGen/X86/vselect-zero.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 62a332939f6f..d723970d665b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -45868,31 +45868,6 @@ static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
   if (VT != Op1.getValueType() || !VT.isSimple() || !VT.isInteger())
     return SDValue();
 
-  // Try to convert an "is positive" signbit masking operation into arithmetic
-  // shift and "andn". This saves a materialization of a -1 vector constant.
-  // The "is negative" variant should be handled more generally because it only
-  // requires "and" rather than "andn":
-  // and (pcmpgt X, -1), Y --> pandn (vsrai X, BitWidth - 1), Y
-  if (supportedVectorShiftWithImm(VT.getSimpleVT(), Subtarget, ISD::SRA)) {
-    SDValue X, Y;
-    if (Op1.hasOneUse() && Op1.getOpcode() == X86ISD::PCMPGT &&
-        isAllOnesOrAllOnesSplat(Op1.getOperand(1))) {
-      X = Op1.getOperand(0);
-      Y = Op0;
-    } else if (Op0.hasOneUse() && Op0.getOpcode() == X86ISD::PCMPGT &&
-               isAllOnesOrAllOnesSplat(Op0.getOperand(1))) {
-      X = Op0.getOperand(0);
-      Y = Op1;
-    }
-    if (X && Y) {
-      SDLoc DL(N);
-      SDValue Sra =
-          getTargetVShiftByConstNode(X86ISD::VSRAI, DL, VT.getSimpleVT(), X,
-                                     VT.getScalarSizeInBits() - 1, DAG);
-      return DAG.getNode(X86ISD::ANDNP, DL, VT, Sra, Y);
-    }
-  }
-
   APInt SplatVal;
   if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
       !SplatVal.isMask())

diff  --git a/llvm/test/CodeGen/X86/vector-pcmp.ll b/llvm/test/CodeGen/X86/vector-pcmp.ll
index 683267a69b8f..fbfac84f6332 100644
--- a/llvm/test/CodeGen/X86/vector-pcmp.ll
+++ b/llvm/test/CodeGen/X86/vector-pcmp.ll
@@ -640,25 +640,12 @@ define <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %x, <2 x i64> %y) {
 ; SSE42-NEXT:    pand %xmm1, %xmm0
 ; SSE42-NEXT:    retq
 ;
-; AVX1-LABEL: not_signbit_mask_v2i64:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: not_signbit_mask_v2i64:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    retq
-;
-; AVX512-LABEL: not_signbit_mask_v2i64:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraq $63, %xmm0, %xmm0
-; AVX512-NEXT:    vpandn %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    retq
+; AVX-LABEL: not_signbit_mask_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
   %sh = ashr <2 x i64> %x, <i64 63, i64 63>
   %not = xor <2 x i64> %sh, <i64 -1, i64 -1>
   %and = and <2 x i64> %y, %not
@@ -668,14 +655,16 @@ define <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %x, <2 x i64> %y) {
 define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: not_signbit_mask_v4i32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psrad $31, %xmm0
-; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtd %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: not_signbit_mask_v4i32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
-; AVX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtd %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %sh = ashr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
   %not = xor <4 x i32> %sh, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -686,14 +675,16 @@ define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %x, <4 x i32> %y) {
 define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %x, <8 x i16> %y) {
 ; SSE-LABEL: not_signbit_mask_v8i16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psraw $15, %xmm0
-; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtw %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: not_signbit_mask_v8i16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsraw $15, %xmm0, %xmm0
-; AVX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtw %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %sh = ashr <8 x i16> %x, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
   %not = xor <8 x i16> %sh, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -761,8 +752,9 @@ define <4 x i64> @not_signbit_mask_v4i64(<4 x i64> %x, <4 x i64> %y) {
 ;
 ; AVX512-LABEL: not_signbit_mask_v4i64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraq $63, %ymm0, %ymm0
-; AVX512-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpcmpgtq %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpand %ymm0, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %sh = ashr <4 x i64> %x, <i64 63, i64 63, i64 63, i64 63>
   %not = xor <4 x i64> %sh, <i64 -1, i64 -1, i64 -1, i64 -1>
@@ -773,10 +765,11 @@ define <4 x i64> @not_signbit_mask_v4i64(<4 x i64> %x, <4 x i64> %y) {
 define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %x, <8 x i32> %y) {
 ; SSE-LABEL: not_signbit_mask_v8i32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psrad $31, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm0
-; SSE-NEXT:    psrad $31, %xmm1
-; SSE-NEXT:    pandn %xmm3, %xmm1
+; SSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE-NEXT:    pcmpgtd %xmm4, %xmm1
+; SSE-NEXT:    pcmpgtd %xmm4, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: not_signbit_mask_v8i32:
@@ -790,14 +783,16 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %x, <8 x i32> %y) {
 ;
 ; AVX2-LABEL: not_signbit_mask_v8i32:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
-; AVX2-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpcmpgtd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: not_signbit_mask_v8i32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsrad $31, %ymm0, %ymm0
-; AVX512-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpcmpgtd %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %sh = ashr <8 x i32> %x, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
   %not = xor <8 x i32> %sh, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -808,10 +803,11 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %x, <8 x i32> %y) {
 define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %x, <16 x i16> %y) {
 ; SSE-LABEL: not_signbit_mask_v16i16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psraw $15, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm0
-; SSE-NEXT:    psraw $15, %xmm1
-; SSE-NEXT:    pandn %xmm3, %xmm1
+; SSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE-NEXT:    pcmpgtw %xmm4, %xmm1
+; SSE-NEXT:    pcmpgtw %xmm4, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: not_signbit_mask_v16i16:
@@ -825,14 +821,16 @@ define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %x, <16 x i16> %y) {
 ;
 ; AVX2-LABEL: not_signbit_mask_v16i16:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsraw $15, %ymm0, %ymm0
-; AVX2-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: not_signbit_mask_v16i16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraw $15, %ymm0, %ymm0
-; AVX512-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpand %ymm0, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %sh = ashr <16 x i16> %x, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
   %not = xor <16 x i16> %sh, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -895,25 +893,12 @@ define <2 x i64> @ispositive_mask_v2i64(<2 x i64> %x, <2 x i64> %y) {
 ; SSE42-NEXT:    pand %xmm1, %xmm0
 ; SSE42-NEXT:    retq
 ;
-; AVX1-LABEL: ispositive_mask_v2i64:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: ispositive_mask_v2i64:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    retq
-;
-; AVX512-LABEL: ispositive_mask_v2i64:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraq $63, %xmm0, %xmm0
-; AVX512-NEXT:    vpandn %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    retq
+; AVX-LABEL: ispositive_mask_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %cmp = icmp sgt <2 x i64> %x, <i64 -1, i64 -1>
   %mask = sext <2 x i1> %cmp to <2 x i64>
   %and = and <2 x i64> %mask, %y
@@ -923,14 +908,16 @@ define <2 x i64> @ispositive_mask_v2i64(<2 x i64> %x, <2 x i64> %y) {
 define <4 x i32> @is_positive_mask_v4i32(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: is_positive_mask_v4i32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psrad $31, %xmm0
-; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtd %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: is_positive_mask_v4i32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
-; AVX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtd %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    retq
   %cmp = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
   %mask = sext <4 x i1> %cmp to <4 x i32>
@@ -941,14 +928,16 @@ define <4 x i32> @is_positive_mask_v4i32(<4 x i32> %x, <4 x i32> %y) {
 define <8 x i16> @is_positive_mask_v8i16(<8 x i16> %x, <8 x i16> %y) {
 ; SSE-LABEL: is_positive_mask_v8i16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psraw $15, %xmm0
-; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtw %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: is_positive_mask_v8i16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsraw $15, %xmm0, %xmm0
-; AVX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtw %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %cmp = icmp sgt <8 x i16> %x, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %mask = sext <8 x i1> %cmp to <8 x i16>
@@ -1016,8 +1005,9 @@ define <4 x i64> @is_positive_mask_v4i64(<4 x i64> %x, <4 x i64> %y) {
 ;
 ; AVX512-LABEL: is_positive_mask_v4i64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraq $63, %ymm0, %ymm0
-; AVX512-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpcmpgtq %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %cmp = icmp sgt <4 x i64> %x, <i64 -1, i64 -1, i64 -1, i64 -1>
   %mask = sext <4 x i1> %cmp to <4 x i64>
@@ -1028,10 +1018,11 @@ define <4 x i64> @is_positive_mask_v4i64(<4 x i64> %x, <4 x i64> %y) {
 define <8 x i32> @is_positive_mask_v8i32(<8 x i32> %x, <8 x i32> %y) {
 ; SSE-LABEL: is_positive_mask_v8i32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psrad $31, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm0
-; SSE-NEXT:    psrad $31, %xmm1
-; SSE-NEXT:    pandn %xmm3, %xmm1
+; SSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE-NEXT:    pcmpgtd %xmm4, %xmm1
+; SSE-NEXT:    pcmpgtd %xmm4, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: is_positive_mask_v8i32:
@@ -1046,14 +1037,16 @@ define <8 x i32> @is_positive_mask_v8i32(<8 x i32> %x, <8 x i32> %y) {
 ;
 ; AVX2-LABEL: is_positive_mask_v8i32:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
-; AVX2-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpcmpgtd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: is_positive_mask_v8i32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsrad $31, %ymm0, %ymm0
-; AVX512-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpcmpgtd %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpand %ymm0, %ymm1, %ymm0
 ; AVX512-NEXT:    retq
   %cmp = icmp sgt <8 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
   %mask = sext <8 x i1> %cmp to <8 x i32>
@@ -1064,10 +1057,11 @@ define <8 x i32> @is_positive_mask_v8i32(<8 x i32> %x, <8 x i32> %y) {
 define <16 x i16> @is_positive_mask_v16i16(<16 x i16> %x, <16 x i16> %y) {
 ; SSE-LABEL: is_positive_mask_v16i16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psraw $15, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm0
-; SSE-NEXT:    psraw $15, %xmm1
-; SSE-NEXT:    pandn %xmm3, %xmm1
+; SSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE-NEXT:    pcmpgtw %xmm4, %xmm1
+; SSE-NEXT:    pcmpgtw %xmm4, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: is_positive_mask_v16i16:
@@ -1082,14 +1076,16 @@ define <16 x i16> @is_positive_mask_v16i16(<16 x i16> %x, <16 x i16> %y) {
 ;
 ; AVX2-LABEL: is_positive_mask_v16i16:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsraw $15, %ymm0, %ymm0
-; AVX2-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: is_positive_mask_v16i16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraw $15, %ymm0, %ymm0
-; AVX512-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %cmp = icmp sgt <16 x i16> %x, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %mask = sext <16 x i1> %cmp to <16 x i16>
@@ -1152,25 +1148,12 @@ define <2 x i64> @ispositive_mask_load_v2i64(<2 x i64> %x, <2 x i64>* %p) {
 ; SSE42-NEXT:    pand (%rdi), %xmm0
 ; SSE42-NEXT:    retq
 ;
-; AVX1-LABEL: ispositive_mask_load_v2i64:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpand (%rdi), %xmm0, %xmm0
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: ispositive_mask_load_v2i64:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpand (%rdi), %xmm0, %xmm0
-; AVX2-NEXT:    retq
-;
-; AVX512-LABEL: ispositive_mask_load_v2i64:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraq $63, %xmm0, %xmm0
-; AVX512-NEXT:    vpandn (%rdi), %xmm0, %xmm0
-; AVX512-NEXT:    retq
+; AVX-LABEL: ispositive_mask_load_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpand (%rdi), %xmm0, %xmm0
+; AVX-NEXT:    retq
   %cmp = icmp sgt <2 x i64> %x, <i64 -1, i64 -1>
   %mask = sext <2 x i1> %cmp to <2 x i64>
   %y = load <2 x i64>, <2 x i64>* %p
@@ -1181,14 +1164,16 @@ define <2 x i64> @ispositive_mask_load_v2i64(<2 x i64> %x, <2 x i64>* %p) {
 define <4 x i32> @is_positive_mask_load_v4i32(<4 x i32> %x, <4 x i32>* %p) {
 ; SSE-LABEL: is_positive_mask_load_v4i32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psrad $31, %xmm0
-; SSE-NEXT:    pandn (%rdi), %xmm0
+; SSE-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE-NEXT:    pand (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: is_positive_mask_load_v4i32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
-; AVX-NEXT:    vpandn (%rdi), %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpand (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %cmp = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
   %mask = sext <4 x i1> %cmp to <4 x i32>
@@ -1200,14 +1185,16 @@ define <4 x i32> @is_positive_mask_load_v4i32(<4 x i32> %x, <4 x i32>* %p) {
 define <8 x i16> @is_positive_mask_load_v8i16(<8 x i16> %x, <8 x i16>* %p) {
 ; SSE-LABEL: is_positive_mask_load_v8i16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psraw $15, %xmm0
-; SSE-NEXT:    pandn (%rdi), %xmm0
+; SSE-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE-NEXT:    pcmpgtw %xmm1, %xmm0
+; SSE-NEXT:    pand (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: is_positive_mask_load_v8i16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsraw $15, %xmm0, %xmm0
-; AVX-NEXT:    vpandn (%rdi), %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpand (%rdi), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %cmp = icmp sgt <8 x i16> %x, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %mask = sext <8 x i1> %cmp to <8 x i16>
@@ -1277,8 +1264,9 @@ define <4 x i64> @is_positive_mask_load_v4i64(<4 x i64> %x, <4 x i64>* %p) {
 ;
 ; AVX512-LABEL: is_positive_mask_load_v4i64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraq $63, %ymm0, %ymm0
-; AVX512-NEXT:    vpandn (%rdi), %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpand (%rdi), %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %cmp = icmp sgt <4 x i64> %x, <i64 -1, i64 -1, i64 -1, i64 -1>
   %mask = sext <4 x i1> %cmp to <4 x i64>
@@ -1290,10 +1278,11 @@ define <4 x i64> @is_positive_mask_load_v4i64(<4 x i64> %x, <4 x i64>* %p) {
 define <8 x i32> @is_positive_mask_load_v8i32(<8 x i32> %x, <8 x i32>* %p) {
 ; SSE-LABEL: is_positive_mask_load_v8i32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psrad $31, %xmm0
-; SSE-NEXT:    pandn (%rdi), %xmm0
-; SSE-NEXT:    psrad $31, %xmm1
-; SSE-NEXT:    pandn 16(%rdi), %xmm1
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtd %xmm2, %xmm1
+; SSE-NEXT:    pcmpgtd %xmm2, %xmm0
+; SSE-NEXT:    pand (%rdi), %xmm0
+; SSE-NEXT:    pand 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: is_positive_mask_load_v8i32:
@@ -1308,14 +1297,16 @@ define <8 x i32> @is_positive_mask_load_v8i32(<8 x i32> %x, <8 x i32>* %p) {
 ;
 ; AVX2-LABEL: is_positive_mask_load_v8i32:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
-; AVX2-NEXT:    vpandn (%rdi), %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpand (%rdi), %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: is_positive_mask_load_v8i32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsrad $31, %ymm0, %ymm0
-; AVX512-NEXT:    vpandn (%rdi), %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpand (%rdi), %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %cmp = icmp sgt <8 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
   %mask = sext <8 x i1> %cmp to <8 x i32>
@@ -1327,10 +1318,11 @@ define <8 x i32> @is_positive_mask_load_v8i32(<8 x i32> %x, <8 x i32>* %p) {
 define <16 x i16> @is_positive_mask_load_v16i16(<16 x i16> %x, <16 x i16>* %p) {
 ; SSE-LABEL: is_positive_mask_load_v16i16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psraw $15, %xmm0
-; SSE-NEXT:    pandn (%rdi), %xmm0
-; SSE-NEXT:    psraw $15, %xmm1
-; SSE-NEXT:    pandn 16(%rdi), %xmm1
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtw %xmm2, %xmm1
+; SSE-NEXT:    pcmpgtw %xmm2, %xmm0
+; SSE-NEXT:    pand (%rdi), %xmm0
+; SSE-NEXT:    pand 16(%rdi), %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: is_positive_mask_load_v16i16:
@@ -1345,14 +1337,16 @@ define <16 x i16> @is_positive_mask_load_v16i16(<16 x i16> %x, <16 x i16>* %p) {
 ;
 ; AVX2-LABEL: is_positive_mask_load_v16i16:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsraw $15, %ymm0, %ymm0
-; AVX2-NEXT:    vpandn (%rdi), %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpand (%rdi), %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: is_positive_mask_load_v16i16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraw $15, %ymm0, %ymm0
-; AVX512-NEXT:    vpandn (%rdi), %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpand (%rdi), %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %cmp = icmp sgt <16 x i16> %x, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %mask = sext <16 x i1> %cmp to <16 x i16>
@@ -1457,20 +1451,23 @@ define <2 x i1> @ispositive_mask_v2i64_v2i1(<2 x i64> %x, <2 x i1> %y) {
 define <4 x i1> @is_positive_mask_v4i32_v4i1(<4 x i32> %x, <4 x i1> %y) {
 ; SSE-LABEL: is_positive_mask_v4i32_v4i1:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psrad $31, %xmm0
-; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtd %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: is_positive_mask_v4i32_v4i1:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
-; AVX1-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: is_positive_mask_v4i32_v4i1:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsrad $31, %xmm0, %xmm0
-; AVX2-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpcmpgtd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpand %xmm0, %xmm1, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: is_positive_mask_v4i32_v4i1:
@@ -1498,26 +1495,30 @@ define <4 x i1> @is_positive_mask_v4i32_v4i1(<4 x i32> %x, <4 x i1> %y) {
 define <8 x i1> @is_positive_mask_v8i16_v8i1(<8 x i16> %x, <8 x i1> %y) {
 ; SSE-LABEL: is_positive_mask_v8i16_v8i1:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psraw $15, %xmm0
-; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtw %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: is_positive_mask_v8i16_v8i1:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpsraw $15, %xmm0, %xmm0
-; AVX1-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpgtw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: is_positive_mask_v8i16_v8i1:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsraw $15, %xmm0, %xmm0
-; AVX2-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpcmpgtw %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512F-LABEL: is_positive_mask_v8i16_v8i1:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vpsraw $15, %xmm0, %xmm0
-; AVX512F-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpcmpgtw %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512DQBW-LABEL: is_positive_mask_v8i16_v8i1:
@@ -1580,8 +1581,9 @@ define <4 x i1> @is_positive_mask_v4i64_v4i1(<4 x i64> %x, <4 x i1> %y) {
 ; SSE2-LABEL: is_positive_mask_v4i64_v4i1:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
-; SSE2-NEXT:    psrad $31, %xmm0
-; SSE2-NEXT:    pandn %xmm2, %xmm0
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: is_positive_mask_v4i64_v4i1:

diff  --git a/llvm/test/CodeGen/X86/vselect-zero.ll b/llvm/test/CodeGen/X86/vselect-zero.ll
index e00f06cc5912..f1e67d937878 100644
--- a/llvm/test/CodeGen/X86/vselect-zero.ll
+++ b/llvm/test/CodeGen/X86/vselect-zero.ll
@@ -945,20 +945,23 @@ define <16 x i8> @not_signbit_mask_v16i8(<16 x i8> %a, <16 x i8> %b) {
 define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: not_signbit_mask_v8i16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psraw $15, %xmm0
-; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtw %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: not_signbit_mask_v8i16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsraw $15, %xmm0, %xmm0
-; AVX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtw %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: not_signbit_mask_v8i16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraw $15, %xmm0, %xmm0
-; AVX512-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpcmpgtw %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %cond = icmp sgt <8 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %r = select <8 x i1> %cond, <8 x i16> %b, <8 x i16> zeroinitializer
@@ -970,20 +973,23 @@ define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) {
 define <8 x i16> @not_signbit_mask_swap_v8i16(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: not_signbit_mask_swap_v8i16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psraw $15, %xmm0
-; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtw %xmm2, %xmm0
+; SSE-NEXT:    pand    %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: not_signbit_mask_swap_v8i16:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsraw $15, %xmm0, %xmm0
-; AVX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd        %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtw        %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpand   %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: not_signbit_mask_swap_v8i16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraw $15, %xmm0, %xmm0
-; AVX512-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpeqd        %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpcmpgtw        %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpand   %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %cond = icmp slt <8 x i16> %a, zeroinitializer
   %r = select <8 x i1> %cond, <8 x i16> zeroinitializer, <8 x i16> %b
@@ -993,20 +999,23 @@ define <8 x i16> @not_signbit_mask_swap_v8i16(<8 x i16> %a, <8 x i16> %b) {
 define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: not_signbit_mask_v4i32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psrad $31, %xmm0
-; SSE-NEXT:    pandn %xmm1, %xmm0
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtd %xmm2, %xmm0
+; SSE-NEXT:    pand %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: not_signbit_mask_v4i32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
-; AVX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtd %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: not_signbit_mask_v4i32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsrad $31, %xmm0, %xmm0
-; AVX512-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpcmpgtd %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %cond = icmp sgt <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
   %r = select <4 x i1> %cond, <4 x i32> %b, <4 x i32> zeroinitializer
@@ -1038,8 +1047,9 @@ define <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %a, <2 x i64> %b) {
 ;
 ; AVX512-LABEL: not_signbit_mask_v2i64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraq $63, %xmm0, %xmm0
-; AVX512-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %cond = icmp sgt <2 x i64> %a, <i64 -1, i64 -1>
   %r = select <2 x i1> %cond, <2 x i64> %b, <2 x i64> zeroinitializer
@@ -1087,10 +1097,11 @@ define <32 x i8> @not_signbit_mask_v32i8(<32 x i8> %a, <32 x i8> %b) {
 define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) {
 ; SSE-LABEL: not_signbit_mask_v16i16:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psraw $15, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm0
-; SSE-NEXT:    psraw $15, %xmm1
-; SSE-NEXT:    pandn %xmm3, %xmm1
+; SSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE-NEXT:    pcmpgtw %xmm4, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    pcmpgtw %xmm4, %xmm1
+; SSE-NEXT:    pand %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: not_signbit_mask_v16i16:
@@ -1104,14 +1115,16 @@ define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) {
 ;
 ; AVX2-LABEL: not_signbit_mask_v16i16:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsraw $15, %ymm0, %ymm0
-; AVX2-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: not_signbit_mask_v16i16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraw $15, %ymm0, %ymm0
-; AVX512-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %cond = icmp sgt <16 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %r = select <16 x i1> %cond, <16 x i16> %b, <16 x i16> zeroinitializer
@@ -1121,10 +1134,11 @@ define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) {
 define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) {
 ; SSE-LABEL: not_signbit_mask_v8i32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psrad $31, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm0
-; SSE-NEXT:    psrad $31, %xmm1
-; SSE-NEXT:    pandn %xmm3, %xmm1
+; SSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE-NEXT:    pcmpgtd %xmm4, %xmm0
+; SSE-NEXT:    pand %xmm2, %xmm0
+; SSE-NEXT:    pcmpgtd %xmm4, %xmm1
+; SSE-NEXT:    pand %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: not_signbit_mask_v8i32:
@@ -1138,14 +1152,16 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) {
 ;
 ; AVX2-LABEL: not_signbit_mask_v8i32:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
-; AVX2-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpcmpgtd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: not_signbit_mask_v8i32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsrad $31, %ymm0, %ymm0
-; AVX512-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpcmpgtd %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %cond = icmp sgt <8 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
   %r = select <8 x i1> %cond, <8 x i32> %b, <8 x i32> zeroinitializer
@@ -1157,10 +1173,11 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) {
 define <8 x i32> @not_signbit_mask_swap_v8i32(<8 x i32> %a, <8 x i32> %b) {
 ; SSE-LABEL: not_signbit_mask_swap_v8i32:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    psrad $31, %xmm0
-; SSE-NEXT:    pandn %xmm2, %xmm0
-; SSE-NEXT:    psrad $31, %xmm1
-; SSE-NEXT:    pandn %xmm3, %xmm1
+; SSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE-NEXT:    pcmpgtd %xmm4, %xmm0
+; SSE-NEXT:    pand    %xmm2, %xmm0
+; SSE-NEXT:    pcmpgtd %xmm4, %xmm1
+; SSE-NEXT:    pand    %xmm3, %xmm1
 ; SSE-NEXT:    retq
 ;
 ; AVX1-LABEL: not_signbit_mask_swap_v8i32:
@@ -1174,14 +1191,16 @@ define <8 x i32> @not_signbit_mask_swap_v8i32(<8 x i32> %a, <8 x i32> %b) {
 ;
 ; AVX2-LABEL: not_signbit_mask_swap_v8i32:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
-; AVX2-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpeqd        %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpcmpgtd        %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpand   %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: not_signbit_mask_swap_v8i32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsrad $31, %ymm0, %ymm0
-; AVX512-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd        %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpcmpgtd        %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpand   %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %cond = icmp slt <8 x i32> %a, zeroinitializer
   %r = select <8 x i1> %cond, <8 x i32> zeroinitializer, <8 x i32> %b
@@ -1228,8 +1247,9 @@ define <4 x i64> @not_signbit_mask_v4i64(<4 x i64> %a, <4 x i64> %b) {
 ;
 ; AVX512-LABEL: not_signbit_mask_v4i64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsraq $63, %ymm0, %ymm0
-; AVX512-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpcmpgtq %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpand %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %cond = icmp sgt <4 x i64> %a, <i64 -1, i64 -1, i64 -1, i64 -1>
   %r = select <4 x i1> %cond, <4 x i64> %b, <4 x i64> zeroinitializer


        


More information about the llvm-commits mailing list