[llvm] 6c32dd4 - [AArch64][x86] add tests for swapped cmp+vselect patterns; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 12 12:50:15 PST 2021


Author: Sanjay Patel
Date: 2021-11-12T15:49:46-05:00
New Revision: 6c32dd4dfafe01d01e4b189a4eeda5c4497c71ec

URL: https://github.com/llvm/llvm-project/commit/6c32dd4dfafe01d01e4b189a4eeda5c4497c71ec
DIFF: https://github.com/llvm/llvm-project/commit/6c32dd4dfafe01d01e4b189a4eeda5c4497c71ec.diff

LOG: [AArch64][x86] add tests for swapped cmp+vselect patterns; NFC

These patterns were noted in the recent D113212 and follow-ups.
I did not bother to duplicate every test because it should be
clear if we recognize the swaps from a smaller sample. We have
complete coverage for the original patterns.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/vselect-constants.ll
    llvm/test/CodeGen/X86/vselect-zero.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/vselect-constants.ll b/llvm/test/CodeGen/AArch64/vselect-constants.ll
index 6dc9ecfd0689..f95b34ecf39c 100644
--- a/llvm/test/CodeGen/AArch64/vselect-constants.ll
+++ b/llvm/test/CodeGen/AArch64/vselect-constants.ll
@@ -204,6 +204,20 @@ define <16 x i8> @signbit_mask_v16i8(<16 x i8> %a, <16 x i8> %b) {
   ret <16 x i8> %r
 }
 
+; Swap cmp pred and select ops. This is logically equivalent to the above test.
+
+define <16 x i8> @signbit_mask_swap_v16i8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-LABEL: signbit_mask_swap_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v2.2d, #0xffffffffffffffff
+; CHECK-NEXT:    cmgt v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    bic v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    ret
+  %cond = icmp sgt <16 x i8> %a, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+  %r = select <16 x i1> %cond, <16 x i8> zeroinitializer, <16 x i8> %b
+  ret <16 x i8> %r
+}
+
 define <8 x i16> @signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-LABEL: signbit_mask_v8i16:
 ; CHECK:       // %bb.0:
@@ -259,6 +273,21 @@ define <8 x i16> @signbit_setmask_v8i16(<8 x i16> %a, <8 x i16> %b) {
   ret <8 x i16> %r
 }
 
+; Swap cmp pred and select ops. This is logically equivalent to the above test.
+
+define <8 x i16> @signbit_setmask_swap_v8i16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-LABEL: signbit_setmask_swap_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v2.2d, #0xffffffffffffffff
+; CHECK-NEXT:    cmgt v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    and v1.16b, v1.16b, v0.16b
+; CHECK-NEXT:    orn v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    ret
+  %cond = icmp sgt <8 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+  %r = select <8 x i1> %cond, <8 x i16> %b, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+  ret <8 x i16> %r
+}
+
 define <4 x i32> @signbit_setmask_v4i32(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: signbit_setmask_v4i32:
 ; CHECK:       // %bb.0:
@@ -314,6 +343,19 @@ define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %a, <4 x i32> %b) {
   ret <4 x i32> %r
 }
 
+; Swap cmp pred and select ops. This is logically equivalent to the above test.
+
+define <4 x i32> @not_signbit_mask_swap_v4i32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-LABEL: not_signbit_mask_swap_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-NEXT:    bic v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    ret
+  %cond = icmp slt <4 x i32> %a, zeroinitializer
+  %r = select <4 x i1> %cond, <4 x i32> zeroinitializer, <4 x i32> %b
+  ret <4 x i32> %r
+}
+
 define <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: not_signbit_mask_v2i64:
 ; CHECK:       // %bb.0:

diff  --git a/llvm/test/CodeGen/X86/vselect-zero.ll b/llvm/test/CodeGen/X86/vselect-zero.ll
index 0eb56d829db0..290561faee9a 100644
--- a/llvm/test/CodeGen/X86/vselect-zero.ll
+++ b/llvm/test/CodeGen/X86/vselect-zero.ll
@@ -340,6 +340,49 @@ define <2 x i64> @signbit_mask_v2i64(<2 x i64> %a, <2 x i64> %b) {
   ret <2 x i64> %r
 }
 
+; Swap cmp pred and select ops. This is logically equivalent to the above test.
+
+define <2 x i64> @signbit_mask_swap_v2i64(<2 x i64> %a, <2 x i64> %b) {
+; SSE2-LABEL: signbit_mask_swap_v2i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [18446744071562067967,18446744071562067967]
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pand %xmm4, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    pandn %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE42-LABEL: signbit_mask_swap_v2i64:
+; SSE42:       # %bb.0:
+; SSE42-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE42-NEXT:    pcmpgtq %xmm2, %xmm0
+; SSE42-NEXT:    pandn %xmm1, %xmm0
+; SSE42-NEXT:    retq
+;
+; AVX-LABEL: signbit_mask_swap_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512-LABEL: signbit_mask_swap_v2i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpcmpleq %xmm2, %xmm0, %k1
+; AVX512-NEXT:    vmovdqa64 %xmm1, %xmm0 {%k1} {z}
+; AVX512-NEXT:    retq
+  %cond = icmp sgt <2 x i64> %a, <i64 -1, i64 -1>
+  %r = select <2 x i1> %cond, <2 x i64> zeroinitializer, <2 x i64> %b
+  ret <2 x i64> %r
+}
+
 define <32 x i8> @signbit_mask_v32i8(<32 x i8> %a, <32 x i8> %b) {
 ; SSE-LABEL: signbit_mask_v32i8:
 ; SSE:       # %bb.0:
@@ -449,6 +492,46 @@ define <8 x i32> @signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) {
   ret <8 x i32> %r
 }
 
+; Swap cmp pred and select ops. This is logically equivalent to the above test.
+
+define <8 x i32> @signbit_mask_swap_v8i32(<8 x i32> %a, <8 x i32> %b) {
+; SSE-LABEL: signbit_mask_swap_v8i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE-NEXT:    pcmpgtd %xmm4, %xmm0
+; SSE-NEXT:    pandn %xmm2, %xmm0
+; SSE-NEXT:    pcmpgtd %xmm4, %xmm1
+; SSE-NEXT:    pandn %xmm3, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: signbit_mask_swap_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpgtd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vandnps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: signbit_mask_swap_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpcmpgtd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: signbit_mask_swap_v8i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpcmpled %ymm2, %ymm0, %k1
+; AVX512-NEXT:    vmovdqa32 %ymm1, %ymm0 {%k1} {z}
+; AVX512-NEXT:    retq
+  %cond = icmp sgt <8 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+  %r = select <8 x i1> %cond, <8 x i32> zeroinitializer, <8 x i32> %b
+  ret <8 x i32> %r
+}
+
 define <4 x i64> @signbit_mask_v4i64(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: signbit_mask_v4i64:
 ; SSE2:       # %bb.0:
@@ -526,6 +609,44 @@ define <16 x i8> @signbit_setmask_v16i8(<16 x i8> %a, <16 x i8> %b) {
   ret <16 x i8> %r
 }
 
+; Swap cmp pred and select ops. This is logically equivalent to the above test.
+
+define <16 x i8> @signbit_setmask_swap_v16i8(<16 x i8> %a, <16 x i8> %b) {
+; SSE-LABEL: signbit_setmask_swap_v16i8:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtb %xmm2, %xmm0
+; SSE-NEXT:    pxor %xmm2, %xmm0
+; SSE-NEXT:    por %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: signbit_setmask_swap_v16i8:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtb %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: signbit_setmask_swap_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpcmpgtb %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    vpternlogq $222, %xmm2, %xmm1, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512DQBW-LABEL: signbit_setmask_swap_v16i8:
+; AVX512DQBW:       # %bb.0:
+; AVX512DQBW-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512DQBW-NEXT:    vpcmpgtb %xmm2, %xmm0, %k1
+; AVX512DQBW-NEXT:    vmovdqu8 %xmm1, %xmm2 {%k1}
+; AVX512DQBW-NEXT:    vmovdqa %xmm2, %xmm0
+; AVX512DQBW-NEXT:    retq
+  %cond = icmp sgt <16 x i8> %a, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+  %r = select <16 x i1> %cond, <16 x i8> %b, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+  ret <16 x i8> %r
+}
+
 define <8 x i16> @signbit_setmask_v8i16(<8 x i16> %a, <8 x i16> %b) {
 ; SSE-LABEL: signbit_setmask_v8i16:
 ; SSE:       # %bb.0:
@@ -764,6 +885,81 @@ define <4 x i64> @signbit_setmask_v4i64(<4 x i64> %a, <4 x i64> %b) {
   ret <4 x i64> %r
 }
 
+; Swap cmp pred and select ops. This is logically equivalent to the above test.
+
+define <4 x i64> @signbit_setmask_swap_v4i64(<4 x i64> %a, <4 x i64> %b) {
+; SSE2-LABEL: signbit_setmask_swap_v4i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
+; SSE2-NEXT:    pxor %xmm8, %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [18446744071562067967,18446744071562067967]
+; SSE2-NEXT:    movdqa %xmm0, %xmm6
+; SSE2-NEXT:    pcmpgtd %xmm5, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm5, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pand %xmm7, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE2-NEXT:    pxor %xmm4, %xmm0
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    pxor %xmm8, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm5, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; SSE2-NEXT:    pand %xmm6, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-NEXT:    por %xmm5, %xmm1
+; SSE2-NEXT:    pxor %xmm4, %xmm1
+; SSE2-NEXT:    por %xmm3, %xmm1
+; SSE2-NEXT:    retq
+;
+; SSE42-LABEL: signbit_setmask_swap_v4i64:
+; SSE42:       # %bb.0:
+; SSE42-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE42-NEXT:    pcmpgtq %xmm4, %xmm0
+; SSE42-NEXT:    pxor %xmm4, %xmm0
+; SSE42-NEXT:    por %xmm2, %xmm0
+; SSE42-NEXT:    pcmpgtq %xmm4, %xmm1
+; SSE42-NEXT:    pxor %xmm4, %xmm1
+; SSE42-NEXT:    por %xmm3, %xmm1
+; SSE42-NEXT:    retq
+;
+; AVX1-LABEL: signbit_setmask_swap_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpxor %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: signbit_setmask_swap_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT:    vpcmpgtq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: signbit_setmask_swap_v4i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpcmpgtq %ymm2, %ymm0, %k1
+; AVX512-NEXT:    vmovdqa64 %ymm1, %ymm2 {%k1}
+; AVX512-NEXT:    vmovdqa %ymm2, %ymm0
+; AVX512-NEXT:    retq
+  %cond = icmp sgt <4 x i64> %a, <i64 -1, i64 -1, i64 -1, i64 -1>
+  %r = select <4 x i1> %cond, <4 x i64> %b, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>
+  ret <4 x i64> %r
+}
+
 define <16 x i8> @not_signbit_mask_v16i8(<16 x i8> %a, <16 x i8> %b) {
 ; SSE-LABEL: not_signbit_mask_v16i8:
 ; SSE:       # %bb.0:
@@ -813,6 +1009,42 @@ define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) {
   ret <8 x i16> %r
 }
 
+; Swap cmp pred and select ops. This is logically equivalent to the above test.
+
+define <8 x i16> @not_signbit_mask_swap_v8i16(<8 x i16> %a, <8 x i16> %b) {
+; SSE-LABEL: not_signbit_mask_swap_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pxor %xmm2, %xmm2
+; SSE-NEXT:    pcmpgtw %xmm0, %xmm2
+; SSE-NEXT:    pandn %xmm1, %xmm2
+; SSE-NEXT:    movdqa %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: not_signbit_mask_swap_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtw %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+;
+; AVX512F-LABEL: not_signbit_mask_swap_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpcmpgtw %xmm0, %xmm2, %xmm0
+; AVX512F-NEXT:    vpandn %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512DQBW-LABEL: not_signbit_mask_swap_v8i16:
+; AVX512DQBW:       # %bb.0:
+; AVX512DQBW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512DQBW-NEXT:    vpcmpnltw %xmm2, %xmm0, %k1
+; AVX512DQBW-NEXT:    vmovdqu16 %xmm1, %xmm0 {%k1} {z}
+; AVX512DQBW-NEXT:    retq
+  %cond = icmp slt <8 x i16> %a, zeroinitializer
+  %r = select <8 x i1> %cond, <8 x i16> zeroinitializer, <8 x i16> %b
+  ret <8 x i16> %r
+}
+
 define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %a, <4 x i32> %b) {
 ; SSE-LABEL: not_signbit_mask_v4i32:
 ; SSE:       # %bb.0:
@@ -975,6 +1207,49 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) {
   ret <8 x i32> %r
 }
 
+; Swap cmp pred and select ops. This is logically equivalent to the above test.
+
+define <8 x i32> @not_signbit_mask_swap_v8i32(<8 x i32> %a, <8 x i32> %b) {
+; SSE-LABEL: not_signbit_mask_swap_v8i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pxor %xmm4, %xmm4
+; SSE-NEXT:    pxor %xmm5, %xmm5
+; SSE-NEXT:    pcmpgtd %xmm0, %xmm5
+; SSE-NEXT:    pandn %xmm2, %xmm5
+; SSE-NEXT:    pcmpgtd %xmm1, %xmm4
+; SSE-NEXT:    pandn %xmm3, %xmm4
+; SSE-NEXT:    movdqa %xmm5, %xmm0
+; SSE-NEXT:    movdqa %xmm4, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: not_signbit_mask_swap_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpcmpgtd %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vandnps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: not_signbit_mask_swap_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpcmpgtd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    vpandn %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: not_signbit_mask_swap_v8i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpcmpnltd %ymm2, %ymm0, %k1
+; AVX512-NEXT:    vmovdqa32 %ymm1, %ymm0 {%k1} {z}
+; AVX512-NEXT:    retq
+  %cond = icmp slt <8 x i32> %a, zeroinitializer
+  %r = select <8 x i1> %cond, <8 x i32> zeroinitializer, <8 x i32> %b
+  ret <8 x i32> %r
+}
+
 define <4 x i64> @not_signbit_mask_v4i64(<4 x i64> %a, <4 x i64> %b) {
 ; SSE2-LABEL: not_signbit_mask_v4i64:
 ; SSE2:       # %bb.0:


        


More information about the llvm-commits mailing list