[llvm] bf5748a - [x86] fold vector (X > -1) & Y to shift+andn
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 12 05:17:51 PST 2021
Author: Sanjay Patel
Date: 2021-11-12T08:17:46-05:00
New Revision: bf5748a1af0d2f6f9396d9dc6ac89d15de41eee7
URL: https://github.com/llvm/llvm-project/commit/bf5748a1af0d2f6f9396d9dc6ac89d15de41eee7
DIFF: https://github.com/llvm/llvm-project/commit/bf5748a1af0d2f6f9396d9dc6ac89d15de41eee7.diff
LOG: [x86] fold vector (X > -1) & Y to shift+andn
and (pcmpgt X, -1), Y --> pandn (vsrai X, BitWidth-1), Y
This avoids the -1 constant vector in favor of an arithmetic shift
instruction if it exists (the ISA is still not complete after all
these years...).
We catch this pattern late in combining by matching PCMPGT, so it
should not interfere with more general folds.
Differential Revision: https://reviews.llvm.org/D113603
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/vector-pcmp.ll
llvm/test/CodeGen/X86/vselect-zero.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 60daca24e728..1ab05231ba52 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -45809,6 +45809,31 @@ static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
if (VT != Op1.getValueType() || !VT.isSimple() || !VT.isInteger())
return SDValue();
+ // Try to convert an "is positive" signbit masking operation into arithmetic
+ // shift and "andn". This saves a materialization of a -1 vector constant.
+ // The "is negative" variant should be handled more generally because it only
+ // requires "and" rather than "andn":
+ // and (pcmpgt X, -1), Y --> pandn (vsrai X, BitWidth - 1), Y
+ if (supportedVectorShiftWithImm(VT.getSimpleVT(), Subtarget, ISD::SRA)) {
+ SDValue X, Y;
+ if (Op1.hasOneUse() && Op1.getOpcode() == X86ISD::PCMPGT &&
+ isAllOnesOrAllOnesSplat(Op1.getOperand(1))) {
+ X = Op1.getOperand(0);
+ Y = Op0;
+ } else if (Op0.hasOneUse() && Op0.getOpcode() == X86ISD::PCMPGT &&
+ isAllOnesOrAllOnesSplat(Op0.getOperand(1))) {
+ X = Op0.getOperand(0);
+ Y = Op1;
+ }
+ if (X && Y) {
+ SDLoc DL(N);
+ SDValue Sra =
+ getTargetVShiftByConstNode(X86ISD::VSRAI, DL, VT.getSimpleVT(), X,
+ VT.getScalarSizeInBits() - 1, DAG);
+ return DAG.getNode(X86ISD::ANDNP, DL, VT, Sra, Y);
+ }
+ }
+
APInt SplatVal;
if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
!SplatVal.isMask())
diff --git a/llvm/test/CodeGen/X86/vector-pcmp.ll b/llvm/test/CodeGen/X86/vector-pcmp.ll
index fbfac84f6332..683267a69b8f 100644
--- a/llvm/test/CodeGen/X86/vector-pcmp.ll
+++ b/llvm/test/CodeGen/X86/vector-pcmp.ll
@@ -640,12 +640,25 @@ define <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %x, <2 x i64> %y) {
; SSE42-NEXT: pand %xmm1, %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: not_signbit_mask_v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: not_signbit_mask_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: not_signbit_mask_v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: not_signbit_mask_v2i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsraq $63, %xmm0, %xmm0
+; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
%sh = ashr <2 x i64> %x, <i64 63, i64 63>
%not = xor <2 x i64> %sh, <i64 -1, i64 -1>
%and = and <2 x i64> %y, %not
@@ -655,16 +668,14 @@ define <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %x, <2 x i64> %y) {
define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: not_signbit_mask_v4i32:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE-NEXT: pcmpgtd %xmm2, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: not_signbit_mask_v4i32:
; AVX: # %bb.0:
-; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%sh = ashr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
%not = xor <4 x i32> %sh, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -675,16 +686,14 @@ define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %x, <4 x i32> %y) {
define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %x, <8 x i16> %y) {
; SSE-LABEL: not_signbit_mask_v8i16:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE-NEXT: pcmpgtw %xmm2, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: psraw $15, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: not_signbit_mask_v8i16:
; AVX: # %bb.0:
-; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
+; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%sh = ashr <8 x i16> %x, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
%not = xor <8 x i16> %sh, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -752,9 +761,8 @@ define <4 x i64> @not_signbit_mask_v4i64(<4 x i64> %x, <4 x i64> %y) {
;
; AVX512-LABEL: not_signbit_mask_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsraq $63, %ymm0, %ymm0
+; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%sh = ashr <4 x i64> %x, <i64 63, i64 63, i64 63, i64 63>
%not = xor <4 x i64> %sh, <i64 -1, i64 -1, i64 -1, i64 -1>
@@ -765,11 +773,10 @@ define <4 x i64> @not_signbit_mask_v4i64(<4 x i64> %x, <4 x i64> %y) {
define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %x, <8 x i32> %y) {
; SSE-LABEL: not_signbit_mask_v8i32:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE-NEXT: pcmpgtd %xmm4, %xmm1
-; SSE-NEXT: pcmpgtd %xmm4, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: psrad $31, %xmm1
+; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: not_signbit_mask_v8i32:
@@ -783,16 +790,14 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %x, <8 x i32> %y) {
;
; AVX2-LABEL: not_signbit_mask_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v8i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%sh = ashr <8 x i32> %x, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
%not = xor <8 x i32> %sh, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -803,11 +808,10 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %x, <8 x i32> %y) {
define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %x, <16 x i16> %y) {
; SSE-LABEL: not_signbit_mask_v16i16:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE-NEXT: pcmpgtw %xmm4, %xmm1
-; SSE-NEXT: pcmpgtw %xmm4, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: psraw $15, %xmm0
+; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: psraw $15, %xmm1
+; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: not_signbit_mask_v16i16:
@@ -821,16 +825,14 @@ define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %x, <16 x i16> %y) {
;
; AVX2-LABEL: not_signbit_mask_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
+; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v16i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsraw $15, %ymm0, %ymm0
+; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%sh = ashr <16 x i16> %x, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
%not = xor <16 x i16> %sh, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
@@ -893,12 +895,25 @@ define <2 x i64> @ispositive_mask_v2i64(<2 x i64> %x, <2 x i64> %y) {
; SSE42-NEXT: pand %xmm1, %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: ispositive_mask_v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: ispositive_mask_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ispositive_mask_v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ispositive_mask_v2i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsraq $63, %xmm0, %xmm0
+; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
%cmp = icmp sgt <2 x i64> %x, <i64 -1, i64 -1>
%mask = sext <2 x i1> %cmp to <2 x i64>
%and = and <2 x i64> %mask, %y
@@ -908,16 +923,14 @@ define <2 x i64> @ispositive_mask_v2i64(<2 x i64> %x, <2 x i64> %y) {
define <4 x i32> @is_positive_mask_v4i32(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: is_positive_mask_v4i32:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE-NEXT: pcmpgtd %xmm2, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: is_positive_mask_v4i32:
; AVX: # %bb.0:
-; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%cmp = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%mask = sext <4 x i1> %cmp to <4 x i32>
@@ -928,16 +941,14 @@ define <4 x i32> @is_positive_mask_v4i32(<4 x i32> %x, <4 x i32> %y) {
define <8 x i16> @is_positive_mask_v8i16(<8 x i16> %x, <8 x i16> %y) {
; SSE-LABEL: is_positive_mask_v8i16:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE-NEXT: pcmpgtw %xmm2, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: psraw $15, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: is_positive_mask_v8i16:
; AVX: # %bb.0:
-; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
+; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%cmp = icmp sgt <8 x i16> %x, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%mask = sext <8 x i1> %cmp to <8 x i16>
@@ -1005,9 +1016,8 @@ define <4 x i64> @is_positive_mask_v4i64(<4 x i64> %x, <4 x i64> %y) {
;
; AVX512-LABEL: is_positive_mask_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsraq $63, %ymm0, %ymm0
+; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cmp = icmp sgt <4 x i64> %x, <i64 -1, i64 -1, i64 -1, i64 -1>
%mask = sext <4 x i1> %cmp to <4 x i64>
@@ -1018,11 +1028,10 @@ define <4 x i64> @is_positive_mask_v4i64(<4 x i64> %x, <4 x i64> %y) {
define <8 x i32> @is_positive_mask_v8i32(<8 x i32> %x, <8 x i32> %y) {
; SSE-LABEL: is_positive_mask_v8i32:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE-NEXT: pcmpgtd %xmm4, %xmm1
-; SSE-NEXT: pcmpgtd %xmm4, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: psrad $31, %xmm1
+; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: is_positive_mask_v8i32:
@@ -1037,16 +1046,14 @@ define <8 x i32> @is_positive_mask_v8i32(<8 x i32> %x, <8 x i32> %y) {
;
; AVX2-LABEL: is_positive_mask_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: is_positive_mask_v8i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cmp = icmp sgt <8 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%mask = sext <8 x i1> %cmp to <8 x i32>
@@ -1057,11 +1064,10 @@ define <8 x i32> @is_positive_mask_v8i32(<8 x i32> %x, <8 x i32> %y) {
define <16 x i16> @is_positive_mask_v16i16(<16 x i16> %x, <16 x i16> %y) {
; SSE-LABEL: is_positive_mask_v16i16:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE-NEXT: pcmpgtw %xmm4, %xmm1
-; SSE-NEXT: pcmpgtw %xmm4, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: psraw $15, %xmm0
+; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: psraw $15, %xmm1
+; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: is_positive_mask_v16i16:
@@ -1076,16 +1082,14 @@ define <16 x i16> @is_positive_mask_v16i16(<16 x i16> %x, <16 x i16> %y) {
;
; AVX2-LABEL: is_positive_mask_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
+; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: is_positive_mask_v16i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsraw $15, %ymm0, %ymm0
+; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cmp = icmp sgt <16 x i16> %x, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%mask = sext <16 x i1> %cmp to <16 x i16>
@@ -1148,12 +1152,25 @@ define <2 x i64> @ispositive_mask_load_v2i64(<2 x i64> %x, <2 x i64>* %p) {
; SSE42-NEXT: pand (%rdi), %xmm0
; SSE42-NEXT: retq
;
-; AVX-LABEL: ispositive_mask_load_v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpand (%rdi), %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: ispositive_mask_load_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand (%rdi), %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: ispositive_mask_load_v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpand (%rdi), %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: ispositive_mask_load_v2i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsraq $63, %xmm0, %xmm0
+; AVX512-NEXT: vpandn (%rdi), %xmm0, %xmm0
+; AVX512-NEXT: retq
%cmp = icmp sgt <2 x i64> %x, <i64 -1, i64 -1>
%mask = sext <2 x i1> %cmp to <2 x i64>
%y = load <2 x i64>, <2 x i64>* %p
@@ -1164,16 +1181,14 @@ define <2 x i64> @ispositive_mask_load_v2i64(<2 x i64> %x, <2 x i64>* %p) {
define <4 x i32> @is_positive_mask_load_v4i32(<4 x i32> %x, <4 x i32>* %p) {
; SSE-LABEL: is_positive_mask_load_v4i32:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE-NEXT: pcmpgtd %xmm1, %xmm0
-; SSE-NEXT: pand (%rdi), %xmm0
+; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: pandn (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: is_positive_mask_load_v4i32:
; AVX: # %bb.0:
-; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpand (%rdi), %xmm0, %xmm0
+; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX-NEXT: vpandn (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%cmp = icmp sgt <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
%mask = sext <4 x i1> %cmp to <4 x i32>
@@ -1185,16 +1200,14 @@ define <4 x i32> @is_positive_mask_load_v4i32(<4 x i32> %x, <4 x i32>* %p) {
define <8 x i16> @is_positive_mask_load_v8i16(<8 x i16> %x, <8 x i16>* %p) {
; SSE-LABEL: is_positive_mask_load_v8i16:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE-NEXT: pcmpgtw %xmm1, %xmm0
-; SSE-NEXT: pand (%rdi), %xmm0
+; SSE-NEXT: psraw $15, %xmm0
+; SSE-NEXT: pandn (%rdi), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: is_positive_mask_load_v8i16:
; AVX: # %bb.0:
-; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpand (%rdi), %xmm0, %xmm0
+; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
+; AVX-NEXT: vpandn (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
%cmp = icmp sgt <8 x i16> %x, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%mask = sext <8 x i1> %cmp to <8 x i16>
@@ -1264,9 +1277,8 @@ define <4 x i64> @is_positive_mask_load_v4i64(<4 x i64> %x, <4 x i64>* %p) {
;
; AVX512-LABEL: is_positive_mask_load_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpand (%rdi), %ymm0, %ymm0
+; AVX512-NEXT: vpsraq $63, %ymm0, %ymm0
+; AVX512-NEXT: vpandn (%rdi), %ymm0, %ymm0
; AVX512-NEXT: retq
%cmp = icmp sgt <4 x i64> %x, <i64 -1, i64 -1, i64 -1, i64 -1>
%mask = sext <4 x i1> %cmp to <4 x i64>
@@ -1278,11 +1290,10 @@ define <4 x i64> @is_positive_mask_load_v4i64(<4 x i64> %x, <4 x i64>* %p) {
define <8 x i32> @is_positive_mask_load_v8i32(<8 x i32> %x, <8 x i32>* %p) {
; SSE-LABEL: is_positive_mask_load_v8i32:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE-NEXT: pcmpgtd %xmm2, %xmm1
-; SSE-NEXT: pcmpgtd %xmm2, %xmm0
-; SSE-NEXT: pand (%rdi), %xmm0
-; SSE-NEXT: pand 16(%rdi), %xmm1
+; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: pandn (%rdi), %xmm0
+; SSE-NEXT: psrad $31, %xmm1
+; SSE-NEXT: pandn 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: is_positive_mask_load_v8i32:
@@ -1297,16 +1308,14 @@ define <8 x i32> @is_positive_mask_load_v8i32(<8 x i32> %x, <8 x i32>* %p) {
;
; AVX2-LABEL: is_positive_mask_load_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpand (%rdi), %ymm0, %ymm0
+; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX2-NEXT: vpandn (%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: is_positive_mask_load_v8i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpand (%rdi), %ymm0, %ymm0
+; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX512-NEXT: vpandn (%rdi), %ymm0, %ymm0
; AVX512-NEXT: retq
%cmp = icmp sgt <8 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%mask = sext <8 x i1> %cmp to <8 x i32>
@@ -1318,11 +1327,10 @@ define <8 x i32> @is_positive_mask_load_v8i32(<8 x i32> %x, <8 x i32>* %p) {
define <16 x i16> @is_positive_mask_load_v16i16(<16 x i16> %x, <16 x i16>* %p) {
; SSE-LABEL: is_positive_mask_load_v16i16:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE-NEXT: pcmpgtw %xmm2, %xmm1
-; SSE-NEXT: pcmpgtw %xmm2, %xmm0
-; SSE-NEXT: pand (%rdi), %xmm0
-; SSE-NEXT: pand 16(%rdi), %xmm1
+; SSE-NEXT: psraw $15, %xmm0
+; SSE-NEXT: pandn (%rdi), %xmm0
+; SSE-NEXT: psraw $15, %xmm1
+; SSE-NEXT: pandn 16(%rdi), %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: is_positive_mask_load_v16i16:
@@ -1337,16 +1345,14 @@ define <16 x i16> @is_positive_mask_load_v16i16(<16 x i16> %x, <16 x i16>* %p) {
;
; AVX2-LABEL: is_positive_mask_load_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpand (%rdi), %ymm0, %ymm0
+; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
+; AVX2-NEXT: vpandn (%rdi), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: is_positive_mask_load_v16i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpand (%rdi), %ymm0, %ymm0
+; AVX512-NEXT: vpsraw $15, %ymm0, %ymm0
+; AVX512-NEXT: vpandn (%rdi), %ymm0, %ymm0
; AVX512-NEXT: retq
%cmp = icmp sgt <16 x i16> %x, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%mask = sext <16 x i1> %cmp to <16 x i16>
@@ -1451,23 +1457,20 @@ define <2 x i1> @ispositive_mask_v2i64_v2i1(<2 x i64> %x, <2 x i1> %y) {
define <4 x i1> @is_positive_mask_v4i32_v4i1(<4 x i32> %x, <4 x i1> %y) {
; SSE-LABEL: is_positive_mask_v4i32_v4i1:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE-NEXT: pcmpgtd %xmm2, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: is_positive_mask_v4i32_v4i1:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX1-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: is_positive_mask_v4i32_v4i1:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX2-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: is_positive_mask_v4i32_v4i1:
@@ -1495,30 +1498,26 @@ define <4 x i1> @is_positive_mask_v4i32_v4i1(<4 x i32> %x, <4 x i1> %y) {
define <8 x i1> @is_positive_mask_v8i16_v8i1(<8 x i16> %x, <8 x i1> %y) {
; SSE-LABEL: is_positive_mask_v8i16_v8i1:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE-NEXT: pcmpgtw %xmm2, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: psraw $15, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: is_positive_mask_v8i16_v8i1:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
+; AVX1-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: is_positive_mask_v8i16_v8i1:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsraw $15, %xmm0, %xmm0
+; AVX2-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: is_positive_mask_v8i16_v8i1:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512F-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: vpsraw $15, %xmm0, %xmm0
+; AVX512F-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; AVX512DQBW-LABEL: is_positive_mask_v8i16_v8i1:
@@ -1581,9 +1580,8 @@ define <4 x i1> @is_positive_mask_v4i64_v4i1(<4 x i64> %x, <4 x i1> %y) {
; SSE2-LABEL: is_positive_mask_v4i64_v4i1:
; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: pandn %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE42-LABEL: is_positive_mask_v4i64_v4i1:
diff --git a/llvm/test/CodeGen/X86/vselect-zero.ll b/llvm/test/CodeGen/X86/vselect-zero.ll
index 60107667845c..0eb56d829db0 100644
--- a/llvm/test/CodeGen/X86/vselect-zero.ll
+++ b/llvm/test/CodeGen/X86/vselect-zero.ll
@@ -793,23 +793,20 @@ define <16 x i8> @not_signbit_mask_v16i8(<16 x i8> %a, <16 x i8> %b) {
define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE-LABEL: not_signbit_mask_v8i16:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE-NEXT: pcmpgtw %xmm2, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: psraw $15, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: not_signbit_mask_v8i16:
; AVX: # %bb.0:
-; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsraw $15, %xmm0, %xmm0
+; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsraw $15, %xmm0, %xmm0
+; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp sgt <8 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%r = select <8 x i1> %cond, <8 x i16> %b, <8 x i16> zeroinitializer
@@ -819,23 +816,20 @@ define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) {
define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE-LABEL: not_signbit_mask_v4i32:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE-NEXT: pcmpgtd %xmm2, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: not_signbit_mask_v4i32:
; AVX: # %bb.0:
-; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp sgt <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
%r = select <4 x i1> %cond, <4 x i32> %b, <4 x i32> zeroinitializer
@@ -867,9 +861,8 @@ define <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %a, <2 x i64> %b) {
;
; AVX512-LABEL: not_signbit_mask_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsraq $63, %xmm0, %xmm0
+; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp sgt <2 x i64> %a, <i64 -1, i64 -1>
%r = select <2 x i1> %cond, <2 x i64> %b, <2 x i64> zeroinitializer
@@ -917,11 +910,10 @@ define <32 x i8> @not_signbit_mask_v32i8(<32 x i8> %a, <32 x i8> %b) {
define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: not_signbit_mask_v16i16:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE-NEXT: pcmpgtw %xmm4, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pcmpgtw %xmm4, %xmm1
-; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: psraw $15, %xmm0
+; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: psraw $15, %xmm1
+; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: not_signbit_mask_v16i16:
@@ -935,16 +927,14 @@ define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) {
;
; AVX2-LABEL: not_signbit_mask_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
+; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v16i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsraw $15, %ymm0, %ymm0
+; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp sgt <16 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%r = select <16 x i1> %cond, <16 x i16> %b, <16 x i16> zeroinitializer
@@ -954,11 +944,10 @@ define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) {
define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: not_signbit_mask_v8i32:
; SSE: # %bb.0:
-; SSE-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE-NEXT: pcmpgtd %xmm4, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pcmpgtd %xmm4, %xmm1
-; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: pandn %xmm2, %xmm0
+; SSE-NEXT: psrad $31, %xmm1
+; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: not_signbit_mask_v8i32:
@@ -972,16 +961,14 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) {
;
; AVX2-LABEL: not_signbit_mask_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: not_signbit_mask_v8i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp sgt <8 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%r = select <8 x i1> %cond, <8 x i32> %b, <8 x i32> zeroinitializer
@@ -1028,9 +1015,8 @@ define <4 x i64> @not_signbit_mask_v4i64(<4 x i64> %a, <4 x i64> %b) {
;
; AVX512-LABEL: not_signbit_mask_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsraq $63, %ymm0, %ymm0
+; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp sgt <4 x i64> %a, <i64 -1, i64 -1, i64 -1, i64 -1>
%r = select <4 x i1> %cond, <4 x i64> %b, <4 x i64> zeroinitializer
More information about the llvm-commits
mailing list