[llvm] 11522cf - [DAGCombiner] add fold for vselect based on mask of signbit, part 3
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 11 07:34:13 PST 2021
Author: Sanjay Patel
Date: 2021-11-11T10:27:37-05:00
New Revision: 11522cfcad6ba38b0b61a727a711adeef98571c9
URL: https://github.com/llvm/llvm-project/commit/11522cfcad6ba38b0b61a727a711adeef98571c9
DIFF: https://github.com/llvm/llvm-project/commit/11522cfcad6ba38b0b61a727a711adeef98571c9.diff
LOG: [DAGCombiner] add fold for vselect based on mask of signbit, part 3
(Cond0 s> -1) ? N1 : 0 --> ~(Cond0 s>> BW-1) & N1
https://alive2.llvm.org/ce/z/mGCBrd
This was suggested as a potential enhancement in D113212 (also 7e30404c3b6c ).
There's an improvement for AArch that could be generalized ( X > -1 --> X >= 0 ).
For x86, we have a counter-acting fold for most cases that turns the shift+not
back into a setcc, so that needs a work-around to get more cases to use "pandn":
D113603
Note that this pattern (and a previous one) are not currently canonical forms
in IR:
https://alive2.llvm.org/ce/z/e4o96b
Adding swapped variants is left as a TODO item here, but is planned as
a near-term follow-up patch.
Differential Revision: https://reviews.llvm.org/D113426
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/test/CodeGen/AArch64/vselect-constants.ll
llvm/test/CodeGen/X86/vselect-zero.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 60c5328d3e90..0c7db2966724 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -9560,6 +9560,9 @@ static SDValue foldVSelectToSignBitSplatMask(SDNode *N, SelectionDAG &DAG) {
if (VT != Cond0.getValueType())
return SDValue();
+ // TODO: Check for the swapped variants of the following patterns. We can't
+ // be sure what form is chosen as canonical in IR.
+
// (Cond0 s< 0) ? N1 : 0 --> (Cond0 s>> BW-1) & N1
if (CC == ISD::SETLT && isNullOrNullSplat(Cond1) && isNullOrNullSplat(N2)) {
SDLoc DL(N);
@@ -9577,6 +9580,24 @@ static SDValue foldVSelectToSignBitSplatMask(SDNode *N, SelectionDAG &DAG) {
return DAG.getNode(ISD::OR, DL, VT, Sra, N2);
}
+ // If the comparison is testing for a positive value, we have to invert
+ // the sign bit mask, so only do that transform if the target has a bitwise
+ // 'and not' instruction (the invert is free).
+ // (Cond0 s> -1) ? N1 : 0 --> ~(Cond0 s>> BW-1) & N1
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (CC == ISD::SETGT && isAllOnesOrAllOnesSplat(Cond1) &&
+ isNullOrNullSplat(N2) && TLI.hasAndNot(N2)) {
+ SDLoc DL(N);
+ SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT);
+ SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Cond0, ShiftAmt);
+ SDValue Not = DAG.getNOT(DL, Sra, VT);
+ return DAG.getNode(ISD::AND, DL, VT, Not, N1);
+ }
+
+ // TODO: There's another pattern in this family, but it may require
+ // implementing hasOrNot() to check for profitability:
+ // (Cond0 s> -1) ? -1 : N2 --> ~(Cond0 s>> BW-1) | N2
+
return SDValue();
}
diff --git a/llvm/test/CodeGen/AArch64/vselect-constants.ll b/llvm/test/CodeGen/AArch64/vselect-constants.ll
index b3bd1e6aa536..6dc9ecfd0689 100644
--- a/llvm/test/CodeGen/AArch64/vselect-constants.ll
+++ b/llvm/test/CodeGen/AArch64/vselect-constants.ll
@@ -284,9 +284,8 @@ define <2 x i64> @signbit_setmask_v2i64(<2 x i64> %a, <2 x i64> %b) {
define <16 x i8> @not_signbit_mask_v16i8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: not_signbit_mask_v16i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi v2.2d, #0xffffffffffffffff
-; CHECK-NEXT: cmgt v0.16b, v0.16b, v2.16b
-; CHECK-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-NEXT: cmge v0.16b, v0.16b, #0
+; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%cond = icmp sgt <16 x i8> %a, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%r = select <16 x i1> %cond, <16 x i8> %b, <16 x i8> zeroinitializer
@@ -296,9 +295,8 @@ define <16 x i8> @not_signbit_mask_v16i8(<16 x i8> %a, <16 x i8> %b) {
define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) {
; CHECK-LABEL: not_signbit_mask_v8i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi v2.2d, #0xffffffffffffffff
-; CHECK-NEXT: cmgt v0.8h, v0.8h, v2.8h
-; CHECK-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-NEXT: cmge v0.8h, v0.8h, #0
+; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%cond = icmp sgt <8 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%r = select <8 x i1> %cond, <8 x i16> %b, <8 x i16> zeroinitializer
@@ -308,9 +306,8 @@ define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) {
define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: not_signbit_mask_v4i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi v2.2d, #0xffffffffffffffff
-; CHECK-NEXT: cmgt v0.4s, v0.4s, v2.4s
-; CHECK-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-NEXT: cmge v0.4s, v0.4s, #0
+; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%cond = icmp sgt <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
%r = select <4 x i1> %cond, <4 x i32> %b, <4 x i32> zeroinitializer
@@ -320,9 +317,8 @@ define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: not_signbit_mask_v2i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi v2.2d, #0xffffffffffffffff
-; CHECK-NEXT: cmgt v0.2d, v0.2d, v2.2d
-; CHECK-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-NEXT: cmge v0.2d, v0.2d, #0
+; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%cond = icmp sgt <2 x i64> %a, <i64 -1, i64 -1>
%r = select <2 x i1> %cond, <2 x i64> %b, <2 x i64> zeroinitializer
diff --git a/llvm/test/CodeGen/X86/vselect-zero.ll b/llvm/test/CodeGen/X86/vselect-zero.ll
index d156bdb5a99d..60107667845c 100644
--- a/llvm/test/CodeGen/X86/vselect-zero.ll
+++ b/llvm/test/CodeGen/X86/vselect-zero.ll
@@ -779,19 +779,12 @@ define <16 x i8> @not_signbit_mask_v16i8(<16 x i8> %a, <16 x i8> %b) {
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
-; AVX512F-LABEL: not_signbit_mask_v16i8:
-; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512F-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT: retq
-;
-; AVX512DQBW-LABEL: not_signbit_mask_v16i8:
-; AVX512DQBW: # %bb.0:
-; AVX512DQBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512DQBW-NEXT: vpcmpgtb %xmm2, %xmm0, %k1
-; AVX512DQBW-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1} {z}
-; AVX512DQBW-NEXT: retq
+; AVX512-LABEL: not_signbit_mask_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
%cond = icmp sgt <16 x i8> %a, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%r = select <16 x i1> %cond, <16 x i8> %b, <16 x i8> zeroinitializer
ret <16 x i8> %r
@@ -812,19 +805,12 @@ define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) {
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
-; AVX512F-LABEL: not_signbit_mask_v8i16:
-; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512F-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT: retq
-;
-; AVX512DQBW-LABEL: not_signbit_mask_v8i16:
-; AVX512DQBW: # %bb.0:
-; AVX512DQBW-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512DQBW-NEXT: vpcmpgtw %xmm2, %xmm0, %k1
-; AVX512DQBW-NEXT: vmovdqu16 %xmm1, %xmm0 {%k1} {z}
-; AVX512DQBW-NEXT: retq
+; AVX512-LABEL: not_signbit_mask_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
%cond = icmp sgt <8 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%r = select <8 x i1> %cond, <8 x i16> %b, <8 x i16> zeroinitializer
ret <8 x i16> %r
@@ -848,8 +834,8 @@ define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %a, <4 x i32> %b) {
; AVX512-LABEL: not_signbit_mask_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vpcmpgtd %xmm2, %xmm0, %k1
-; AVX512-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp sgt <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
%r = select <4 x i1> %cond, <4 x i32> %b, <4 x i32> zeroinitializer
@@ -882,8 +868,8 @@ define <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %a, <2 x i64> %b) {
; AVX512-LABEL: not_signbit_mask_v2i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vpcmpgtq %xmm2, %xmm0, %k1
-; AVX512-NEXT: vmovdqa64 %xmm1, %xmm0 {%k1} {z}
+; AVX512-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%cond = icmp sgt <2 x i64> %a, <i64 -1, i64 -1>
%r = select <2 x i1> %cond, <2 x i64> %b, <2 x i64> zeroinitializer
@@ -894,20 +880,20 @@ define <32 x i8> @not_signbit_mask_v32i8(<32 x i8> %a, <32 x i8> %b) {
; SSE-LABEL: not_signbit_mask_v32i8:
; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE-NEXT: pcmpgtb %xmm4, %xmm1
; SSE-NEXT: pcmpgtb %xmm4, %xmm0
; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pcmpgtb %xmm4, %xmm1
; SSE-NEXT: pand %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: not_signbit_mask_v32i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vandnps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: not_signbit_mask_v32i8:
@@ -917,19 +903,12 @@ define <32 x i8> @not_signbit_mask_v32i8(<32 x i8> %a, <32 x i8> %b) {
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512F-LABEL: not_signbit_mask_v32i8:
-; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT: retq
-;
-; AVX512DQBW-LABEL: not_signbit_mask_v32i8:
-; AVX512DQBW: # %bb.0:
-; AVX512DQBW-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512DQBW-NEXT: vpcmpgtb %ymm2, %ymm0, %k1
-; AVX512DQBW-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1} {z}
-; AVX512DQBW-NEXT: retq
+; AVX512-LABEL: not_signbit_mask_v32i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%cond = icmp sgt <32 x i8> %a, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
%r = select <32 x i1> %cond, <32 x i8> %b, <32 x i8> zeroinitializer
ret <32 x i8> %r
@@ -939,20 +918,19 @@ define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE-LABEL: not_signbit_mask_v16i16:
; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE-NEXT: pcmpgtw %xmm4, %xmm1
; SSE-NEXT: pcmpgtw %xmm4, %xmm0
; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pcmpgtw %xmm4, %xmm1
; SSE-NEXT: pand %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: not_signbit_mask_v16i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpcmpgtw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsraw $15, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vandnps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: not_signbit_mask_v16i16:
@@ -962,19 +940,12 @@ define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
-; AVX512F-LABEL: not_signbit_mask_v16i16:
-; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT: retq
-;
-; AVX512DQBW-LABEL: not_signbit_mask_v16i16:
-; AVX512DQBW: # %bb.0:
-; AVX512DQBW-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512DQBW-NEXT: vpcmpgtw %ymm2, %ymm0, %k1
-; AVX512DQBW-NEXT: vmovdqu16 %ymm1, %ymm0 {%k1} {z}
-; AVX512DQBW-NEXT: retq
+; AVX512-LABEL: not_signbit_mask_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%cond = icmp sgt <16 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
%r = select <16 x i1> %cond, <16 x i16> %b, <16 x i16> zeroinitializer
ret <16 x i16> %r
@@ -984,20 +955,19 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE-LABEL: not_signbit_mask_v8i32:
; SSE: # %bb.0:
; SSE-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE-NEXT: pcmpgtd %xmm4, %xmm1
; SSE-NEXT: pcmpgtd %xmm4, %xmm0
; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pcmpgtd %xmm4, %xmm1
; SSE-NEXT: pand %xmm3, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: not_signbit_mask_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtd %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vandnps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: not_signbit_mask_v8i32:
@@ -1010,8 +980,8 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX512-LABEL: not_signbit_mask_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpcmpgtd %ymm2, %ymm0, %k1
-; AVX512-NEXT: vmovdqa32 %ymm1, %ymm0 {%k1} {z}
+; AVX512-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp sgt <8 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%r = select <8 x i1> %cond, <8 x i32> %b, <8 x i32> zeroinitializer
@@ -1021,32 +991,32 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) {
define <4 x i64> @not_signbit_mask_v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-LABEL: not_signbit_mask_v4i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
; SSE2-NEXT: pcmpgtd %xmm4, %xmm0
; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm1
; SSE2-NEXT: pand %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSE42-LABEL: not_signbit_mask_v4i64:
; SSE42: # %bb.0:
; SSE42-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE42-NEXT: pcmpgtq %xmm4, %xmm1
; SSE42-NEXT: pcmpgtq %xmm4, %xmm0
; SSE42-NEXT: pand %xmm2, %xmm0
+; SSE42-NEXT: pcmpgtq %xmm4, %xmm1
; SSE42-NEXT: pand %xmm3, %xmm1
; SSE42-NEXT: retq
;
; AVX1-LABEL: not_signbit_mask_v4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vandnps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: not_signbit_mask_v4i64:
@@ -1059,8 +1029,8 @@ define <4 x i64> @not_signbit_mask_v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX512-LABEL: not_signbit_mask_v4i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpcmpgtq %ymm2, %ymm0, %k1
-; AVX512-NEXT: vmovdqa64 %ymm1, %ymm0 {%k1} {z}
+; AVX512-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%cond = icmp sgt <4 x i64> %a, <i64 -1, i64 -1, i64 -1, i64 -1>
%r = select <4 x i1> %cond, <4 x i64> %b, <4 x i64> zeroinitializer
More information about the llvm-commits
mailing list