[llvm] r359385 - [X86] Add vector boolean reduction tests (PR38840)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sat Apr 27 09:49:54 PDT 2019
Author: rksimon
Date: Sat Apr 27 09:49:54 2019
New Revision: 359385
URL: http://llvm.org/viewvc/llvm-project?rev=359385&view=rev
Log:
[X86] Add vector boolean reduction tests (PR38840)
AND/OR/XOR tests for the @llvm.experimental.vector.reduce intrinsics
AND/OR are pretty good (pre-AVX512), XOR (not so common but used for parity reduction) is still pretty bad.
Added:
llvm/trunk/test/CodeGen/X86/vector-reduce-and-bool.ll
llvm/trunk/test/CodeGen/X86/vector-reduce-or-bool.ll
llvm/trunk/test/CodeGen/X86/vector-reduce-xor-bool.ll
Added: llvm/trunk/test/CodeGen/X86/vector-reduce-and-bool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-and-bool.ll?rev=359385&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-and-bool.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-and-bool.ll Sat Apr 27 09:49:54 2019
@@ -0,0 +1,1846 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+
+;
+; Truncate
+;
+
+define i1 @trunc_v2i64_v2i1(<2 x i64>) {
+; SSE-LABEL: trunc_v2i64_v2i1:
+; SSE: # %bb.0:
+; SSE-NEXT: psllq $63, %xmm0
+; SSE-NEXT: movmskpd %xmm0, %eax
+; SSE-NEXT: cmpb $3, %al
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc_v2i64_v2i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX-NEXT: vmovmskpd %xmm0, %eax
+; AVX-NEXT: cmpb $3, %al
+; AVX-NEXT: sete %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v2i64_v2i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v2i64_v2i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k0 {%k1}
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = trunc <2 x i64> %0 to <2 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.and.v2i1(<2 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v4i32_v4i1(<4 x i32>) {
+; SSE-LABEL: trunc_v4i32_v4i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pslld $31, %xmm0
+; SSE-NEXT: movmskps %xmm0, %eax
+; SSE-NEXT: cmpb $15, %al
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc_v4i32_v4i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: cmpb $15, %al
+; AVX-NEXT: sete %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v4i32_v4i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v4i32_v4i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k0 {%k1}
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = trunc <4 x i32> %0 to <4 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.and.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v8i16_v8i1(<8 x i8>) {
+; SSE-LABEL: trunc_v8i16_v8i1:
+; SSE: # %bb.0:
+; SSE-NEXT: psllw $15, %xmm0
+; SSE-NEXT: packsswb %xmm0, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: cmpb $-1, %al
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc_v8i16_v8i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: cmpb $-1, %al
+; AVX-NEXT: sete %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v8i16_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovw2m %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v8i16_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX512VL-NEXT: vpmovw2m %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0 {%k1}
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <8 x i8> %0 to <8 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.and.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v16i8_v16i1(<16 x i8>) {
+; SSE-LABEL: trunc_v16i8_v16i1:
+; SSE: # %bb.0:
+; SSE-NEXT: psllw $7, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: cmpw $-1, %ax
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc_v16i8_v16i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: cmpw $-1, %ax
+; AVX-NEXT: sete %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v16i8_v16i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: kshiftrw $8, %k0, %k1
+; AVX512BW-NEXT: vpcmpgtb %zmm0, %zmm1, %k0 {%k1}
+; AVX512BW-NEXT: kshiftrw $4, %k0, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $2, %k0, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v16i8_v16i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX512VL-NEXT: vpmovb2m %xmm0, %k0
+; AVX512VL-NEXT: kshiftrw $8, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $4, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $2, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $1, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = trunc <16 x i8> %0 to <16 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.and.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v4i64_v4i1(<4 x i64>) {
+; SSE-LABEL: trunc_v4i64_v4i1:
+; SSE: # %bb.0:
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: pslld $31, %xmm0
+; SSE-NEXT: movmskps %xmm0, %eax
+; SSE-NEXT: cmpb $15, %al
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc_v4i64_v4i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: cmpb $15, %al
+; AVX-NEXT: sete %al
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v4i64_v4i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllq $63, %ymm0, %ymm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v4i64_v4i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllq $63, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmq %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k0 {%k1}
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <4 x i64> %0 to <4 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.and.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v8i32_v8i1(<8 x i32>) {
+; SSE2-LABEL: trunc_v8i32_v8i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: pslld $16, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: psllw $15, %xmm0
+; SSE2-NEXT: packsswb %xmm0, %xmm0
+; SSE2-NEXT: pmovmskb %xmm0, %eax
+; SSE2-NEXT: cmpb $-1, %al
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v8i32_v8i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: psllw $15, %xmm0
+; SSE41-NEXT: packsswb %xmm0, %xmm0
+; SSE41-NEXT: pmovmskb %xmm0, %eax
+; SSE41-NEXT: cmpb $-1, %al
+; SSE41-NEXT: sete %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v8i32_v8i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: cmpb $-1, %al
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v8i32_v8i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: cmpb $-1, %al
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v8i32_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v8i32_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0 {%k1}
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <8 x i32> %0 to <8 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.and.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v16i16_v16i1(<16 x i16>) {
+; SSE2-LABEL: trunc_v16i16_v16i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: psllw $7, %xmm0
+; SSE2-NEXT: pmovmskb %xmm0, %eax
+; SSE2-NEXT: cmpw $-1, %ax
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v16i16_v16i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: psllw $7, %xmm0
+; SSE41-NEXT: pmovmskb %xmm0, %eax
+; SSE41-NEXT: cmpw $-1, %ax
+; SSE41-NEXT: sete %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v16i16_v16i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: cmpw $-1, %ax
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v16i16_v16i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: cmpw $-1, %ax
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v16i16_v16i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $15, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: kshiftrw $8, %k0, %k1
+; AVX512BW-NEXT: vpcmpgtw %zmm0, %zmm1, %k0 {%k1}
+; AVX512BW-NEXT: kshiftrw $4, %k0, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $2, %k0, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v16i16_v16i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllw $15, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovw2m %ymm0, %k0
+; AVX512VL-NEXT: kshiftrw $8, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $4, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $2, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $1, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <16 x i16> %0 to <16 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.and.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v32i8_v32i1(<32 x i8>) {
+; SSE-LABEL: trunc_v32i8_v32i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pand %xmm1, %xmm0
+; SSE-NEXT: psllw $7, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: cmpw $-1, %ax
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_v32i8_v32i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: cmpw $-1, %ax
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v32i8_v32i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: cmpl $-1, %eax
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v32i8_v32i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: kshiftrd $16, %k0, %k1
+; AVX512BW-NEXT: vpcmpgtb %zmm0, %zmm1, %k0 {%k1}
+; AVX512BW-NEXT: kshiftrd $8, %k0, %k1
+; AVX512BW-NEXT: kandd %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $4, %k0, %k1
+; AVX512BW-NEXT: kandd %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $2, %k0, %k1
+; AVX512BW-NEXT: kandd %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $1, %k0, %k1
+; AVX512BW-NEXT: kandd %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v32i8_v32i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovb2m %ymm0, %k0
+; AVX512VL-NEXT: kshiftrd $16, %k0, %k1
+; AVX512VL-NEXT: kandd %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $8, %k0, %k1
+; AVX512VL-NEXT: kandd %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $4, %k0, %k1
+; AVX512VL-NEXT: kandd %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $2, %k0, %k1
+; AVX512VL-NEXT: kandd %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $1, %k0, %k1
+; AVX512VL-NEXT: kandd %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <32 x i8> %0 to <32 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.and.v32i1(<32 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v8i64_v8i1(<8 x i64>) {
+; SSE2-LABEL: trunc_v8i64_v8i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT: psllw $15, %xmm2
+; SSE2-NEXT: packsswb %xmm0, %xmm2
+; SSE2-NEXT: pmovmskb %xmm2, %eax
+; SSE2-NEXT: cmpb $-1, %al
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v8i64_v8i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: packusdw %xmm2, %xmm0
+; SSE41-NEXT: psllw $15, %xmm0
+; SSE41-NEXT: packsswb %xmm0, %xmm0
+; SSE41-NEXT: pmovmskb %xmm0, %eax
+; SSE41-NEXT: cmpb $-1, %al
+; SSE41-NEXT: sete %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v8i64_v8i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535]
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: cmpb $-1, %al
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v8i64_v8i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: cmpb $-1, %al
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v8i64_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v8i64_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0 {%k1}
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <8 x i64> %0 to <8 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.and.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v16i32_v16i1(<16 x i32>) {
+; SSE2-LABEL: trunc_v16i32_v16i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: psllw $7, %xmm0
+; SSE2-NEXT: pmovmskb %xmm0, %eax
+; SSE2-NEXT: cmpw $-1, %ax
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v16i32_v16i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE41-NEXT: pand %xmm4, %xmm3
+; SSE41-NEXT: pand %xmm4, %xmm2
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: pand %xmm4, %xmm1
+; SSE41-NEXT: pand %xmm4, %xmm0
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: packuswb %xmm2, %xmm0
+; SSE41-NEXT: psllw $7, %xmm0
+; SSE41-NEXT: pmovmskb %xmm0, %eax
+; SSE41-NEXT: cmpw $-1, %ax
+; SSE41-NEXT: sete %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v16i32_v16i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: cmpw $-1, %ax
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v16i32_v16i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: cmpw $-1, %ax
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_v16i32_v16i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kshiftrw $8, %k0, %k1
+; AVX512-NEXT: kandw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $4, %k0, %k1
+; AVX512-NEXT: kandw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $2, %k0, %k1
+; AVX512-NEXT: kandw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $1, %k0, %k1
+; AVX512-NEXT: kandw %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = trunc <16 x i32> %0 to <16 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.and.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v32i16_v32i1(<32 x i16>) {
+; SSE2-LABEL: trunc_v32i16_v32i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: psllw $7, %xmm0
+; SSE2-NEXT: pmovmskb %xmm0, %eax
+; SSE2-NEXT: cmpw $-1, %ax
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v32i16_v32i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm4, %xmm3
+; SSE41-NEXT: pshufb %xmm4, %xmm2
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE41-NEXT: pshufb %xmm4, %xmm1
+; SSE41-NEXT: pshufb %xmm4, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: psllw $7, %xmm0
+; SSE41-NEXT: pmovmskb %xmm0, %eax
+; SSE41-NEXT: cmpw $-1, %ax
+; SSE41-NEXT: sete %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v32i16_v32i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: cmpw $-1, %ax
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v32i16_v32i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: cmpl $-1, %eax
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_v32i16_v32i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $15, %zmm0, %zmm0
+; AVX512-NEXT: vpmovw2m %zmm0, %k0
+; AVX512-NEXT: kshiftrd $16, %k0, %k1
+; AVX512-NEXT: kandd %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $8, %k0, %k1
+; AVX512-NEXT: kandd %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $4, %k0, %k1
+; AVX512-NEXT: kandd %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $2, %k0, %k1
+; AVX512-NEXT: kandd %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $1, %k0, %k1
+; AVX512-NEXT: kandd %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = trunc <32 x i16> %0 to <32 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.and.v32i1(<32 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v64i8_v64i1(<64 x i8>) {
+; SSE2-LABEL: trunc_v64i8_v64i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlw $8, %xmm1
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v64i8_v64i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand %xmm3, %xmm1
+; SSE41-NEXT: pand %xmm2, %xmm1
+; SSE41-NEXT: pand %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: pand %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrlw $8, %xmm1
+; SSE41-NEXT: pand %xmm0, %xmm1
+; SSE41-NEXT: pextrb $0, %xmm1, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v64i8_v64i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v64i8_v64i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_v64i8_v64i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $7, %zmm0, %zmm0
+; AVX512-NEXT: vpmovb2m %zmm0, %k0
+; AVX512-NEXT: kshiftrq $32, %k0, %k1
+; AVX512-NEXT: kandq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $16, %k0, %k1
+; AVX512-NEXT: kandq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $8, %k0, %k1
+; AVX512-NEXT: kandq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $4, %k0, %k1
+; AVX512-NEXT: kandq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $2, %k0, %k1
+; AVX512-NEXT: kandq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $1, %k0, %k1
+; AVX512-NEXT: kandq %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = trunc <64 x i8> %0 to <64 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.and.v64i1(<64 x i1> %a)
+ ret i1 %b
+}
+
+;
+; Comparison
+;
+
+define i1 @icmp_v2i64_v2i1(<2 x i64>) {
+; SSE2-LABEL: icmp_v2i64_v2i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: movmskpd %xmm0, %eax
+; SSE2-NEXT: cmpb $3, %al
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v2i64_v2i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pcmpeqq %xmm0, %xmm1
+; SSE41-NEXT: movmskpd %xmm1, %eax
+; SSE41-NEXT: cmpb $3, %al
+; SSE41-NEXT: sete %al
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: icmp_v2i64_v2i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovmskpd %xmm0, %eax
+; AVX-NEXT: cmpb $3, %al
+; AVX-NEXT: sete %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v2i64_v2i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vptestnmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v2i64_v2i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmq %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k0 {%k1}
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = icmp eq <2 x i64> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.and.v2i1(<2 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v4i32_v4i1(<4 x i32>) {
+; SSE-LABEL: icmp_v4i32_v4i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE-NEXT: movmskps %xmm1, %eax
+; SSE-NEXT: cmpb $15, %al
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: icmp_v4i32_v4i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: cmpb $15, %al
+; AVX-NEXT: sete %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v4i32_v4i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vptestnmd %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v4i32_v4i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmd %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k0 {%k1}
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = icmp eq <4 x i32> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.and.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v8i16_v8i1(<8 x i8>) {
+; SSE-LABEL: icmp_v8i16_v8i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pcmpeqw %xmm0, %xmm1
+; SSE-NEXT: packsswb %xmm0, %xmm1
+; SSE-NEXT: pmovmskb %xmm1, %eax
+; SSE-NEXT: cmpb $-1, %al
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: icmp_v8i16_v8i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: cmpb $-1, %al
+; AVX-NEXT: sete %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v8i16_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX512BW-NEXT: vptestnmw %zmm1, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v8i16_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmw {{.*}}(%rip), %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0 {%k1}
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <8 x i8> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.and.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v16i8_v16i1(<16 x i8>) {
+; SSE-LABEL: icmp_v16i8_v16i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE-NEXT: pmovmskb %xmm1, %eax
+; SSE-NEXT: cmpw $-1, %ax
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: icmp_v16i8_v16i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: cmpw $-1, %ax
+; AVX-NEXT: sete %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v16i8_v16i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: kshiftrw $8, %k0, %k1
+; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kshiftrw $4, %k0, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $2, %k0, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v16i8_v16i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmb %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: kshiftrw $8, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $4, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $2, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $1, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = icmp eq <16 x i8> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.and.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v4i64_v4i1(<4 x i64>) {
+; SSE2-LABEL: icmp_v4i64_v4i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,0,3,2]
+; SSE2-NEXT: pand %xmm1, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: packssdw %xmm3, %xmm1
+; SSE2-NEXT: movmskps %xmm1, %eax
+; SSE2-NEXT: cmpb $15, %al
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v4i64_v4i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pcmpeqq %xmm2, %xmm1
+; SSE41-NEXT: pcmpeqq %xmm2, %xmm0
+; SSE41-NEXT: packssdw %xmm1, %xmm0
+; SSE41-NEXT: movmskps %xmm0, %eax
+; SSE41-NEXT: cmpb $15, %al
+; SSE41-NEXT: sete %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: icmp_v4i64_v4i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovmskpd %ymm0, %eax
+; AVX1-NEXT: cmpb $15, %al
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v4i64_v4i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovmskpd %ymm0, %eax
+; AVX2-NEXT: cmpb $15, %al
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v4i64_v4i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vptestnmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v4i64_v4i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmq %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k0 {%k1}
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <4 x i64> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.and.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v8i32_v8i1(<8 x i32>) {
+; SSE-LABEL: icmp_v8i32_v8i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: pcmpeqd %xmm2, %xmm1
+; SSE-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packsswb %xmm0, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: cmpb $-1, %al
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: icmp_v8i32_v8i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovmskps %ymm0, %eax
+; AVX1-NEXT: cmpb $-1, %al
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v8i32_v8i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovmskps %ymm0, %eax
+; AVX2-NEXT: cmpb $-1, %al
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v8i32_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vptestnmd %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v8i32_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmd %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0 {%k1}
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <8 x i32> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.and.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v16i16_v16i1(<16 x i16>) {
+; SSE-LABEL: icmp_v16i16_v16i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: pcmpeqw %xmm2, %xmm1
+; SSE-NEXT: pcmpeqw %xmm2, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: cmpw $-1, %ax
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: icmp_v16i16_v16i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: cmpw $-1, %ax
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v16i16_v16i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: cmpw $-1, %ax
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v16i16_v16i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vptestnmw %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: kshiftrw $8, %k0, %k1
+; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kshiftrw $4, %k0, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $2, %k0, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k1
+; AVX512BW-NEXT: kandw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v16i16_v16i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmw %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: kshiftrw $8, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $4, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $2, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $1, %k0, %k1
+; AVX512VL-NEXT: kandw %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <16 x i16> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.and.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v32i8_v32i1(<32 x i8>) {
+; SSE-LABEL: icmp_v32i8_v32i1:
+; SSE: # %bb.0:
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE-NEXT: pmovmskb %xmm1, %eax
+; SSE-NEXT: cmpw $-1, %ax
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: icmp_v32i8_v32i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: cmpw $-1, %ax
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v32i8_v32i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: cmpl $-1, %eax
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v32i8_v32i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: kshiftrd $16, %k0, %k1
+; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kshiftrd $8, %k0, %k1
+; AVX512BW-NEXT: kandd %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $4, %k0, %k1
+; AVX512BW-NEXT: kandd %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $2, %k0, %k1
+; AVX512BW-NEXT: kandd %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $1, %k0, %k1
+; AVX512BW-NEXT: kandd %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v32i8_v32i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmb %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: kshiftrd $16, %k0, %k1
+; AVX512VL-NEXT: kandd %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $8, %k0, %k1
+; AVX512VL-NEXT: kandd %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $4, %k0, %k1
+; AVX512VL-NEXT: kandd %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $2, %k0, %k1
+; AVX512VL-NEXT: kandd %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $1, %k0, %k1
+; AVX512VL-NEXT: kandd %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <32 x i8> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.and.v32i1(<32 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v8i64_v8i1(<8 x i64>) {
+; SSE2-LABEL: icmp_v8i64_v8i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,0,3,2]
+; SSE2-NEXT: pand %xmm3, %xmm5
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,0,3,2]
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: packssdw %xmm5, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,0,3,2]
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: packssdw %xmm2, %xmm1
+; SSE2-NEXT: packssdw %xmm3, %xmm1
+; SSE2-NEXT: packsswb %xmm0, %xmm1
+; SSE2-NEXT: pmovmskb %xmm1, %eax
+; SSE2-NEXT: cmpb $-1, %al
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v8i64_v8i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pcmpeqq %xmm4, %xmm3
+; SSE41-NEXT: pcmpeqq %xmm4, %xmm2
+; SSE41-NEXT: packssdw %xmm3, %xmm2
+; SSE41-NEXT: pcmpeqq %xmm4, %xmm1
+; SSE41-NEXT: pcmpeqq %xmm4, %xmm0
+; SSE41-NEXT: packssdw %xmm1, %xmm0
+; SSE41-NEXT: packssdw %xmm2, %xmm0
+; SSE41-NEXT: packsswb %xmm0, %xmm0
+; SSE41-NEXT: pmovmskb %xmm0, %eax
+; SSE41-NEXT: cmpb $-1, %al
+; SSE41-NEXT: sete %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: icmp_v8i64_v8i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovmskps %ymm0, %eax
+; AVX1-NEXT: cmpb $-1, %al
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v8i64_v8i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vmovmskps %ymm0, %eax
+; AVX2-NEXT: cmpb $-1, %al
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v8i64_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vptestnmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v8i64_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmq %zmm0, %zmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k1 {%k1}
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0 {%k1}
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <8 x i64> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.and.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v16i32_v16i1(<16 x i32>) {
+; SSE-LABEL: icmp_v16i32_v16i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm4, %xmm4
+; SSE-NEXT: pcmpeqd %xmm4, %xmm3
+; SSE-NEXT: pcmpeqd %xmm4, %xmm2
+; SSE-NEXT: packssdw %xmm3, %xmm2
+; SSE-NEXT: pcmpeqd %xmm4, %xmm1
+; SSE-NEXT: pcmpeqd %xmm4, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packsswb %xmm2, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: cmpw $-1, %ax
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: icmp_v16i32_v16i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: cmpw $-1, %ax
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v16i32_v16i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: cmpw $-1, %ax
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: icmp_v16i32_v16i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vptestnmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kshiftrw $8, %k0, %k1
+; AVX512-NEXT: kandw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $4, %k0, %k1
+; AVX512-NEXT: kandw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $2, %k0, %k1
+; AVX512-NEXT: kandw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $1, %k0, %k1
+; AVX512-NEXT: kandw %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = icmp eq <16 x i32> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.and.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v32i16_v32i1(<32 x i16>) {
+; SSE-LABEL: icmp_v32i16_v32i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm4, %xmm4
+; SSE-NEXT: pcmpeqw %xmm4, %xmm1
+; SSE-NEXT: pcmpeqw %xmm4, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: pcmpeqw %xmm4, %xmm3
+; SSE-NEXT: pcmpeqw %xmm4, %xmm2
+; SSE-NEXT: packsswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm0, %xmm2
+; SSE-NEXT: pmovmskb %xmm2, %eax
+; SSE-NEXT: cmpw $-1, %ax
+; SSE-NEXT: sete %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: icmp_v32i16_v32i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: cmpw $-1, %ax
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v32i16_v32i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: cmpl $-1, %eax
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: icmp_v32i16_v32i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vptestnmw %zmm0, %zmm0, %k0
+; AVX512-NEXT: kshiftrd $16, %k0, %k1
+; AVX512-NEXT: kandd %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $8, %k0, %k1
+; AVX512-NEXT: kandd %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $4, %k0, %k1
+; AVX512-NEXT: kandd %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $2, %k0, %k1
+; AVX512-NEXT: kandd %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $1, %k0, %k1
+; AVX512-NEXT: kandd %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = icmp eq <32 x i16> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.and.v32i1(<32 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v64i8_v64i1(<64 x i8>) {
+; SSE-LABEL: icmp_v64i8_v64i1:
+; SSE: # %bb.0:
+; SSE-NEXT: por %xmm3, %xmm1
+; SSE-NEXT: pxor %xmm3, %xmm3
+; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: pcmpeqb %xmm3, %xmm1
+; SSE-NEXT: pmovmskb %xmm1, %eax
+; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; SSE-NEXT: sete %al
+; SSE-NEXT: negb %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: icmp_v64i8_v64i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; AVX1-NEXT: sete %al
+; AVX1-NEXT: negb %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v64i8_v64i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: cmpl $-1, %eax
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: negb %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: icmp_v64i8_v64i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vptestnmb %zmm0, %zmm0, %k0
+; AVX512-NEXT: kshiftrq $32, %k0, %k1
+; AVX512-NEXT: kandq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $16, %k0, %k1
+; AVX512-NEXT: kandq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $8, %k0, %k1
+; AVX512-NEXT: kandq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $4, %k0, %k1
+; AVX512-NEXT: kandq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $2, %k0, %k1
+; AVX512-NEXT: kandq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $1, %k0, %k1
+; AVX512-NEXT: kandq %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = icmp eq <64 x i8> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.and.v64i1(<64 x i1> %a)
+ ret i1 %b
+}
+
+declare i1 @llvm.experimental.vector.reduce.and.v2i1(<2 x i1>)
+declare i1 @llvm.experimental.vector.reduce.and.v4i1(<4 x i1>)
+declare i1 @llvm.experimental.vector.reduce.and.v8i1(<8 x i1>)
+declare i1 @llvm.experimental.vector.reduce.and.v16i1(<16 x i1>)
+declare i1 @llvm.experimental.vector.reduce.and.v32i1(<32 x i1>)
+declare i1 @llvm.experimental.vector.reduce.and.v64i1(<64 x i1>)
Added: llvm/trunk/test/CodeGen/X86/vector-reduce-or-bool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-or-bool.ll?rev=359385&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-or-bool.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-or-bool.ll Sat Apr 27 09:49:54 2019
@@ -0,0 +1,1902 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+
+;
+; Truncate
+;
+
+define i1 @trunc_v2i64_v2i1(<2 x i64>) {
+; SSE-LABEL: trunc_v2i64_v2i1:
+; SSE: # %bb.0:
+; SSE-NEXT: psllq $63, %xmm0
+; SSE-NEXT: movmskpd %xmm0, %eax
+; SSE-NEXT: testb %al, %al
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc_v2i64_v2i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX-NEXT: vmovmskpd %xmm0, %eax
+; AVX-NEXT: testb %al, %al
+; AVX-NEXT: setne %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v2i64_v2i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v2i64_v2i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = trunc <2 x i64> %0 to <2 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.or.v2i1(<2 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v4i32_v4i1(<4 x i32>) {
+; SSE-LABEL: trunc_v4i32_v4i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pslld $31, %xmm0
+; SSE-NEXT: movmskps %xmm0, %eax
+; SSE-NEXT: testb %al, %al
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc_v4i32_v4i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: testb %al, %al
+; AVX-NEXT: setne %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v4i32_v4i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v4i32_v4i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vmovdqa32 %xmm1, %xmm2 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %xmm2, %xmm2
+; AVX512VL-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vmovdqa32 %xmm1, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512VL-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = trunc <4 x i32> %0 to <4 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.or.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v8i16_v8i1(<8 x i8>) {
+; SSE-LABEL: trunc_v8i16_v8i1:
+; SSE: # %bb.0:
+; SSE-NEXT: psllw $15, %xmm0
+; SSE-NEXT: packsswb %xmm0, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: testb %al, %al
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc_v8i16_v8i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: testb %al, %al
+; AVX-NEXT: setne %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v8i16_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovw2m %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v8i16_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX512VL-NEXT: vpmovw2m %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <8 x i8> %0 to <8 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.or.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v16i8_v16i1(<16 x i8>) {
+; SSE-LABEL: trunc_v16i8_v16i1:
+; SSE: # %bb.0:
+; SSE-NEXT: psllw $7, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: testw %ax, %ax
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc_v16i8_v16i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: testw %ax, %ax
+; AVX-NEXT: setne %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v16i8_v16i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
+; AVX512BW-NEXT: kshiftrw $8, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $4, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $2, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v16i8_v16i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX512VL-NEXT: vpmovb2m %xmm0, %k0
+; AVX512VL-NEXT: kshiftrw $8, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $4, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $2, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $1, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = trunc <16 x i8> %0 to <16 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.or.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v4i64_v4i1(<4 x i64>) {
+; SSE-LABEL: trunc_v4i64_v4i1:
+; SSE: # %bb.0:
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: pslld $31, %xmm0
+; SSE-NEXT: movmskps %xmm0, %eax
+; SSE-NEXT: testb %al, %al
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc_v4i64_v4i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: testb %al, %al
+; AVX-NEXT: setne %al
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v4i64_v4i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllq $63, %ymm0, %ymm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v4i64_v4i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllq $63, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmq %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <4 x i64> %0 to <4 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.or.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v8i32_v8i1(<8 x i32>) {
+; SSE2-LABEL: trunc_v8i32_v8i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: pslld $16, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: psllw $15, %xmm0
+; SSE2-NEXT: packsswb %xmm0, %xmm0
+; SSE2-NEXT: pmovmskb %xmm0, %eax
+; SSE2-NEXT: testb %al, %al
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v8i32_v8i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: psllw $15, %xmm0
+; SSE41-NEXT: packsswb %xmm0, %xmm0
+; SSE41-NEXT: pmovmskb %xmm0, %eax
+; SSE41-NEXT: testb %al, %al
+; SSE41-NEXT: setne %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v8i32_v8i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: testb %al, %al
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v8i32_v8i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: testb %al, %al
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v8i32_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v8i32_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT: vmovdqa32 %ymm1, %ymm2 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX512VL-NEXT: vpslld $31, %ymm2, %ymm2
+; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm1, %ymm2 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm2, %ymm2
+; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm1, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <8 x i32> %0 to <8 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.or.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v16i16_v16i1(<16 x i16>) {
+; SSE2-LABEL: trunc_v16i16_v16i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: psllw $7, %xmm0
+; SSE2-NEXT: pmovmskb %xmm0, %eax
+; SSE2-NEXT: testw %ax, %ax
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v16i16_v16i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: psllw $7, %xmm0
+; SSE41-NEXT: pmovmskb %xmm0, %eax
+; SSE41-NEXT: testw %ax, %ax
+; SSE41-NEXT: setne %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v16i16_v16i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: testw %ax, %ax
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v16i16_v16i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: testw %ax, %ax
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v16i16_v16i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $15, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
+; AVX512BW-NEXT: kshiftrw $8, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $4, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $2, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v16i16_v16i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllw $15, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovw2m %ymm0, %k0
+; AVX512VL-NEXT: kshiftrw $8, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $4, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $2, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $1, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <16 x i16> %0 to <16 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.or.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v32i8_v32i1(<32 x i8>) {
+; SSE-LABEL: trunc_v32i8_v32i1:
+; SSE: # %bb.0:
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: psllw $7, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: testw %ax, %ax
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_v32i8_v32i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: testw %ax, %ax
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v32i8_v32i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: testl %eax, %eax
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v32i8_v32i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
+; AVX512BW-NEXT: kshiftrd $16, %k0, %k1
+; AVX512BW-NEXT: kord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $8, %k0, %k1
+; AVX512BW-NEXT: kord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $4, %k0, %k1
+; AVX512BW-NEXT: kord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $2, %k0, %k1
+; AVX512BW-NEXT: kord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $1, %k0, %k1
+; AVX512BW-NEXT: kord %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v32i8_v32i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovb2m %ymm0, %k0
+; AVX512VL-NEXT: kshiftrd $16, %k0, %k1
+; AVX512VL-NEXT: kord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $8, %k0, %k1
+; AVX512VL-NEXT: kord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $4, %k0, %k1
+; AVX512VL-NEXT: kord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $2, %k0, %k1
+; AVX512VL-NEXT: kord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $1, %k0, %k1
+; AVX512VL-NEXT: kord %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <32 x i8> %0 to <32 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.or.v32i1(<32 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v8i64_v8i1(<8 x i64>) {
+; SSE2-LABEL: trunc_v8i64_v8i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT: psllw $15, %xmm2
+; SSE2-NEXT: packsswb %xmm0, %xmm2
+; SSE2-NEXT: pmovmskb %xmm2, %eax
+; SSE2-NEXT: testb %al, %al
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v8i64_v8i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: packusdw %xmm2, %xmm0
+; SSE41-NEXT: psllw $15, %xmm0
+; SSE41-NEXT: packsswb %xmm0, %xmm0
+; SSE41-NEXT: pmovmskb %xmm0, %eax
+; SSE41-NEXT: testb %al, %al
+; SSE41-NEXT: setne %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v8i64_v8i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535]
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: testb %al, %al
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v8i64_v8i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: testb %al, %al
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v8i64_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v8i64_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <8 x i64> %0 to <8 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.or.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v16i32_v16i1(<16 x i32>) {
+; SSE2-LABEL: trunc_v16i32_v16i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: psllw $7, %xmm0
+; SSE2-NEXT: pmovmskb %xmm0, %eax
+; SSE2-NEXT: testw %ax, %ax
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v16i32_v16i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE41-NEXT: pand %xmm4, %xmm3
+; SSE41-NEXT: pand %xmm4, %xmm2
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: pand %xmm4, %xmm1
+; SSE41-NEXT: pand %xmm4, %xmm0
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: packuswb %xmm2, %xmm0
+; SSE41-NEXT: psllw $7, %xmm0
+; SSE41-NEXT: pmovmskb %xmm0, %eax
+; SSE41-NEXT: testw %ax, %ax
+; SSE41-NEXT: setne %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v16i32_v16i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: testw %ax, %ax
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v16i32_v16i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: testw %ax, %ax
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_v16i32_v16i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kshiftrw $8, %k0, %k1
+; AVX512-NEXT: korw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $4, %k0, %k1
+; AVX512-NEXT: korw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $2, %k0, %k1
+; AVX512-NEXT: korw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $1, %k0, %k1
+; AVX512-NEXT: korw %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = trunc <16 x i32> %0 to <16 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.or.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v32i16_v32i1(<32 x i16>) {
+; SSE2-LABEL: trunc_v32i16_v32i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: psllw $7, %xmm0
+; SSE2-NEXT: pmovmskb %xmm0, %eax
+; SSE2-NEXT: testw %ax, %ax
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v32i16_v32i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm4, %xmm3
+; SSE41-NEXT: pshufb %xmm4, %xmm2
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE41-NEXT: pshufb %xmm4, %xmm1
+; SSE41-NEXT: pshufb %xmm4, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: por %xmm2, %xmm0
+; SSE41-NEXT: psllw $7, %xmm0
+; SSE41-NEXT: pmovmskb %xmm0, %eax
+; SSE41-NEXT: testw %ax, %ax
+; SSE41-NEXT: setne %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v32i16_v32i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: testw %ax, %ax
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v32i16_v32i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: testl %eax, %eax
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_v32i16_v32i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $15, %zmm0, %zmm0
+; AVX512-NEXT: vpmovw2m %zmm0, %k0
+; AVX512-NEXT: kshiftrd $16, %k0, %k1
+; AVX512-NEXT: kord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $8, %k0, %k1
+; AVX512-NEXT: kord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $4, %k0, %k1
+; AVX512-NEXT: kord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $2, %k0, %k1
+; AVX512-NEXT: kord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $1, %k0, %k1
+; AVX512-NEXT: kord %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = trunc <32 x i16> %0 to <32 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.or.v32i1(<32 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v64i8_v64i1(<64 x i8>) {
+; SSE2-LABEL: trunc_v64i8_v64i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: por %xmm3, %xmm1
+; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlw $8, %xmm1
+; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v64i8_v64i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: por %xmm3, %xmm1
+; SSE41-NEXT: por %xmm2, %xmm1
+; SSE41-NEXT: por %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: por %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrlw $8, %xmm1
+; SSE41-NEXT: por %xmm0, %xmm1
+; SSE41-NEXT: pextrb $0, %xmm1, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v64i8_v64i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v64i8_v64i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_v64i8_v64i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $7, %zmm0, %zmm0
+; AVX512-NEXT: vpmovb2m %zmm0, %k0
+; AVX512-NEXT: kshiftrq $32, %k0, %k1
+; AVX512-NEXT: korq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $16, %k0, %k1
+; AVX512-NEXT: korq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $8, %k0, %k1
+; AVX512-NEXT: korq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $4, %k0, %k1
+; AVX512-NEXT: korq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $2, %k0, %k1
+; AVX512-NEXT: korq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $1, %k0, %k1
+; AVX512-NEXT: korq %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = trunc <64 x i8> %0 to <64 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.or.v64i1(<64 x i1> %a)
+ ret i1 %b
+}
+
+;
+; Comparison
+;
+
+define i1 @icmp_v2i64_v2i1(<2 x i64>) {
+; SSE2-LABEL: icmp_v2i64_v2i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: movmskpd %xmm0, %eax
+; SSE2-NEXT: testb %al, %al
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v2i64_v2i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pcmpeqq %xmm0, %xmm1
+; SSE41-NEXT: movmskpd %xmm1, %eax
+; SSE41-NEXT: testb %al, %al
+; SSE41-NEXT: setne %al
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: icmp_v2i64_v2i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovmskpd %xmm0, %eax
+; AVX-NEXT: testb %al, %al
+; AVX-NEXT: setne %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v2i64_v2i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vptestnmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v2i64_v2i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmq %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = icmp eq <2 x i64> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.or.v2i1(<2 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v4i32_v4i1(<4 x i32>) {
+; SSE-LABEL: icmp_v4i32_v4i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE-NEXT: movmskps %xmm1, %eax
+; SSE-NEXT: testb %al, %al
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: icmp_v4i32_v4i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovmskps %xmm0, %eax
+; AVX-NEXT: testb %al, %al
+; AVX-NEXT: setne %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v4i32_v4i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vptestnmd %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v4i32_v4i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmd %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = icmp eq <4 x i32> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.or.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v8i16_v8i1(<8 x i8>) {
+; SSE-LABEL: icmp_v8i16_v8i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pcmpeqw %xmm0, %xmm1
+; SSE-NEXT: packsswb %xmm0, %xmm1
+; SSE-NEXT: pmovmskb %xmm1, %eax
+; SSE-NEXT: testb %al, %al
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: icmp_v8i16_v8i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: testb %al, %al
+; AVX-NEXT: setne %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v8i16_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX512BW-NEXT: vptestnmw %zmm1, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v8i16_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmw {{.*}}(%rip), %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <8 x i8> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.or.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v16i8_v16i1(<16 x i8>) {
+; SSE-LABEL: icmp_v16i8_v16i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE-NEXT: pmovmskb %xmm1, %eax
+; SSE-NEXT: testw %ax, %ax
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX-LABEL: icmp_v16i8_v16i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpmovmskb %xmm0, %eax
+; AVX-NEXT: testw %ax, %ax
+; AVX-NEXT: setne %al
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v16i8_v16i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kshiftrw $8, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $4, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $2, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v16i8_v16i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmb %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: kshiftrw $8, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $4, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $2, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $1, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = icmp eq <16 x i8> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.or.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v4i64_v4i1(<4 x i64>) {
+; SSE2-LABEL: icmp_v4i64_v4i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,0,3,2]
+; SSE2-NEXT: pand %xmm1, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: packssdw %xmm3, %xmm1
+; SSE2-NEXT: movmskps %xmm1, %eax
+; SSE2-NEXT: testb %al, %al
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v4i64_v4i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pcmpeqq %xmm2, %xmm1
+; SSE41-NEXT: pcmpeqq %xmm2, %xmm0
+; SSE41-NEXT: packssdw %xmm1, %xmm0
+; SSE41-NEXT: movmskps %xmm0, %eax
+; SSE41-NEXT: testb %al, %al
+; SSE41-NEXT: setne %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: icmp_v4i64_v4i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovmskpd %ymm0, %eax
+; AVX1-NEXT: testb %al, %al
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v4i64_v4i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovmskpd %ymm0, %eax
+; AVX2-NEXT: testb %al, %al
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v4i64_v4i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vptestnmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v4i64_v4i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmq %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <4 x i64> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.or.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v8i32_v8i1(<8 x i32>) {
+; SSE-LABEL: icmp_v8i32_v8i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: pcmpeqd %xmm2, %xmm1
+; SSE-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packsswb %xmm0, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: testb %al, %al
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: icmp_v8i32_v8i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovmskps %ymm0, %eax
+; AVX1-NEXT: testb %al, %al
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v8i32_v8i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovmskps %ymm0, %eax
+; AVX2-NEXT: testb %al, %al
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v8i32_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vptestnmd %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v8i32_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmd %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <8 x i32> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.or.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v16i16_v16i1(<16 x i16>) {
+; SSE-LABEL: icmp_v16i16_v16i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: pcmpeqw %xmm2, %xmm1
+; SSE-NEXT: pcmpeqw %xmm2, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: testw %ax, %ax
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: icmp_v16i16_v16i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: testw %ax, %ax
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v16i16_v16i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: testw %ax, %ax
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v16i16_v16i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vptestnmw %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kshiftrw $8, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $4, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $2, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v16i16_v16i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmw %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: kshiftrw $8, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $4, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $2, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $1, %k0, %k1
+; AVX512VL-NEXT: korw %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <16 x i16> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.or.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v32i8_v32i1(<32 x i8>) {
+; SSE-LABEL: icmp_v32i8_v32i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: pcmpeqb %xmm2, %xmm1
+; SSE-NEXT: pcmpeqb %xmm2, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: testw %ax, %ax
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: icmp_v32i8_v32i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: testw %ax, %ax
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v32i8_v32i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: testl %eax, %eax
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v32i8_v32i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kshiftrd $16, %k0, %k1
+; AVX512BW-NEXT: kord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $8, %k0, %k1
+; AVX512BW-NEXT: kord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $4, %k0, %k1
+; AVX512BW-NEXT: kord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $2, %k0, %k1
+; AVX512BW-NEXT: kord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $1, %k0, %k1
+; AVX512BW-NEXT: kord %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v32i8_v32i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmb %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: kshiftrd $16, %k0, %k1
+; AVX512VL-NEXT: kord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $8, %k0, %k1
+; AVX512VL-NEXT: kord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $4, %k0, %k1
+; AVX512VL-NEXT: kord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $2, %k0, %k1
+; AVX512VL-NEXT: kord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $1, %k0, %k1
+; AVX512VL-NEXT: kord %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <32 x i8> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.or.v32i1(<32 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v8i64_v8i1(<8 x i64>) {
+; SSE2-LABEL: icmp_v8i64_v8i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,0,3,2]
+; SSE2-NEXT: pand %xmm3, %xmm5
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,0,3,2]
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: packssdw %xmm5, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,0,3,2]
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: packssdw %xmm2, %xmm1
+; SSE2-NEXT: packssdw %xmm3, %xmm1
+; SSE2-NEXT: packsswb %xmm0, %xmm1
+; SSE2-NEXT: pmovmskb %xmm1, %eax
+; SSE2-NEXT: testb %al, %al
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v8i64_v8i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pcmpeqq %xmm4, %xmm3
+; SSE41-NEXT: pcmpeqq %xmm4, %xmm2
+; SSE41-NEXT: packssdw %xmm3, %xmm2
+; SSE41-NEXT: pcmpeqq %xmm4, %xmm1
+; SSE41-NEXT: pcmpeqq %xmm4, %xmm0
+; SSE41-NEXT: packssdw %xmm1, %xmm0
+; SSE41-NEXT: packssdw %xmm2, %xmm0
+; SSE41-NEXT: packsswb %xmm0, %xmm0
+; SSE41-NEXT: pmovmskb %xmm0, %eax
+; SSE41-NEXT: testb %al, %al
+; SSE41-NEXT: setne %al
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: icmp_v8i64_v8i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovmskps %ymm0, %eax
+; AVX1-NEXT: testb %al, %al
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v8i64_v8i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vmovmskps %ymm0, %eax
+; AVX2-NEXT: testb %al, %al
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v8i64_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vptestnmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v8i64_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmq %zmm0, %zmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: korw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <8 x i64> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.or.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v16i32_v16i1(<16 x i32>) {
+; SSE-LABEL: icmp_v16i32_v16i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm4, %xmm4
+; SSE-NEXT: pcmpeqd %xmm4, %xmm3
+; SSE-NEXT: pcmpeqd %xmm4, %xmm2
+; SSE-NEXT: packssdw %xmm3, %xmm2
+; SSE-NEXT: pcmpeqd %xmm4, %xmm1
+; SSE-NEXT: pcmpeqd %xmm4, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packsswb %xmm2, %xmm0
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: testw %ax, %ax
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: icmp_v16i32_v16i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: testw %ax, %ax
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v16i32_v16i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: testw %ax, %ax
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: icmp_v16i32_v16i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vptestnmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kshiftrw $8, %k0, %k1
+; AVX512-NEXT: korw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $4, %k0, %k1
+; AVX512-NEXT: korw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $2, %k0, %k1
+; AVX512-NEXT: korw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $1, %k0, %k1
+; AVX512-NEXT: korw %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = icmp eq <16 x i32> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.or.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v32i16_v32i1(<32 x i16>) {
+; SSE-LABEL: icmp_v32i16_v32i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm4, %xmm4
+; SSE-NEXT: pcmpeqw %xmm4, %xmm1
+; SSE-NEXT: pcmpeqw %xmm4, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: pcmpeqw %xmm4, %xmm3
+; SSE-NEXT: pcmpeqw %xmm4, %xmm2
+; SSE-NEXT: packsswb %xmm3, %xmm2
+; SSE-NEXT: por %xmm0, %xmm2
+; SSE-NEXT: pmovmskb %xmm2, %eax
+; SSE-NEXT: testw %ax, %ax
+; SSE-NEXT: setne %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: icmp_v32i16_v32i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: testw %ax, %ax
+; AVX1-NEXT: setne %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v32i16_v32i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: testl %eax, %eax
+; AVX2-NEXT: setne %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: icmp_v32i16_v32i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vptestnmw %zmm0, %zmm0, %k0
+; AVX512-NEXT: kshiftrd $16, %k0, %k1
+; AVX512-NEXT: kord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $8, %k0, %k1
+; AVX512-NEXT: kord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $4, %k0, %k1
+; AVX512-NEXT: kord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $2, %k0, %k1
+; AVX512-NEXT: kord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $1, %k0, %k1
+; AVX512-NEXT: kord %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = icmp eq <32 x i16> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.or.v32i1(<32 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v64i8_v64i1(<64 x i8>) {
+; SSE-LABEL: icmp_v64i8_v64i1:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm4, %xmm4
+; SSE-NEXT: pcmpeqb %xmm4, %xmm2
+; SSE-NEXT: pcmpeqb %xmm4, %xmm0
+; SSE-NEXT: pcmpeqb %xmm4, %xmm3
+; SSE-NEXT: pcmpeqb %xmm4, %xmm1
+; SSE-NEXT: por %xmm3, %xmm1
+; SSE-NEXT: por %xmm2, %xmm1
+; SSE-NEXT: por %xmm0, %xmm1
+; SSE-NEXT: pmovmskb %xmm1, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: sbbb %al, %al
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: icmp_v64i8_v64i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpor %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: negl %eax
+; AVX1-NEXT: sbbb %al, %al
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v64i8_v64i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpmovmskb %ymm0, %eax
+; AVX2-NEXT: negl %eax
+; AVX2-NEXT: sbbb %al, %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: icmp_v64i8_v64i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vptestnmb %zmm0, %zmm0, %k0
+; AVX512-NEXT: kshiftrq $32, %k0, %k1
+; AVX512-NEXT: korq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $16, %k0, %k1
+; AVX512-NEXT: korq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $8, %k0, %k1
+; AVX512-NEXT: korq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $4, %k0, %k1
+; AVX512-NEXT: korq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $2, %k0, %k1
+; AVX512-NEXT: korq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $1, %k0, %k1
+; AVX512-NEXT: korq %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = icmp eq <64 x i8> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.or.v64i1(<64 x i1> %a)
+ ret i1 %b
+}
+
+declare i1 @llvm.experimental.vector.reduce.or.v2i1(<2 x i1>)
+declare i1 @llvm.experimental.vector.reduce.or.v4i1(<4 x i1>)
+declare i1 @llvm.experimental.vector.reduce.or.v8i1(<8 x i1>)
+declare i1 @llvm.experimental.vector.reduce.or.v16i1(<16 x i1>)
+declare i1 @llvm.experimental.vector.reduce.or.v32i1(<32 x i1>)
+declare i1 @llvm.experimental.vector.reduce.or.v64i1(<64 x i1>)
Added: llvm/trunk/test/CodeGen/X86/vector-reduce-xor-bool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-reduce-xor-bool.ll?rev=359385&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-reduce-xor-bool.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-reduce-xor-bool.ll Sat Apr 27 09:49:54 2019
@@ -0,0 +1,2556 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL
+
+;
+; Truncate
+;
+
+define i1 @trunc_v2i64_v2i1(<2 x i64>) {
+; SSE2-LABEL: trunc_v2i64_v2i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v2i64_v2i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pextrb $0, %xmm1, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc_v2i64_v2i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v2i64_v2i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v2i64_v2i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = trunc <2 x i64> %0 to <2 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v2i1(<2 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v4i32_v4i1(<4 x i32>) {
+; SSE2-LABEL: trunc_v4i32_v4i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v4i32_v4i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc_v4i32_v4i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v4i32_v4i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v4i32_v4i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = trunc <4 x i32> %0 to <4 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v8i16_v8i1(<8 x i8>) {
+; SSE2-LABEL: trunc_v8i16_v8i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v8i16_v8i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pextrb $0, %xmm1, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc_v8i16_v8i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v8i16_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovw2m %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v8i16_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllw $15, %xmm0, %xmm0
+; AVX512VL-NEXT: vpmovw2m %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <8 x i8> %0 to <8 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v16i8_v16i1(<16 x i8>) {
+; SSE2-LABEL: trunc_v16i8_v16i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrlw $8, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v16i8_v16i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc_v16i8_v16i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v16i8_v16i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
+; AVX512BW-NEXT: kshiftrw $8, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $4, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $2, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v16i8_v16i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllw $7, %xmm0, %xmm0
+; AVX512VL-NEXT: vpmovb2m %xmm0, %k0
+; AVX512VL-NEXT: kshiftrw $8, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $4, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $2, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $1, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = trunc <16 x i8> %0 to <16 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v4i64_v4i1(<4 x i64>) {
+; SSE2-LABEL: trunc_v4i64_v4i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v4i64_v4i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc_v4i64_v4i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-NEXT: vxorpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX-NEXT: vxorpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v4i64_v4i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllq $63, %ymm0, %ymm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v4i64_v4i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllq $63, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmq %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <4 x i64> %0 to <4 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v8i32_v8i1(<8 x i32>) {
+; SSE2-LABEL: trunc_v8i32_v8i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v8i32_v8i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pextrb $0, %xmm1, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v8i32_v8i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,2,2,3]
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
+; AVX1-NEXT: vpxor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v8i32_v8i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v8i32_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v8i32_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <8 x i32> %0 to <8 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v16i16_v16i1(<16 x i16>) {
+; SSE2-LABEL: trunc_v16i16_v16i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrlw $8, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v16i16_v16i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v16i16_v16i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v16i16_v16i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v16i16_v16i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $15, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
+; AVX512BW-NEXT: kshiftrw $8, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $4, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $2, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v16i16_v16i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllw $15, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovw2m %ymm0, %k0
+; AVX512VL-NEXT: kshiftrw $8, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $4, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $2, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $1, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <16 x i16> %0 to <16 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v32i8_v32i1(<32 x i8>) {
+; SSE2-LABEL: trunc_v32i8_v32i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrlw $8, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v32i8_v32i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v32i8_v32i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v32i8_v32i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v32i8_v32i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
+; AVX512BW-NEXT: kshiftrd $16, %k0, %k1
+; AVX512BW-NEXT: kxord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $8, %k0, %k1
+; AVX512BW-NEXT: kxord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $4, %k0, %k1
+; AVX512BW-NEXT: kxord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $2, %k0, %k1
+; AVX512BW-NEXT: kxord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $1, %k0, %k1
+; AVX512BW-NEXT: kxord %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v32i8_v32i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovb2m %ymm0, %k0
+; AVX512VL-NEXT: kshiftrd $16, %k0, %k1
+; AVX512VL-NEXT: kxord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $8, %k0, %k1
+; AVX512VL-NEXT: kxord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $4, %k0, %k1
+; AVX512VL-NEXT: kxord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $2, %k0, %k1
+; AVX512VL-NEXT: kxord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $1, %k0, %k1
+; AVX512VL-NEXT: kxord %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <32 x i8> %0 to <32 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v32i1(<32 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v8i64_v8i1(<8 x i64>) {
+; SSE2-LABEL: trunc_v8i64_v8i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v8i64_v8i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
+; SSE41-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm2[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc_v8i64_v8i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
+; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_v8i64_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_v8i64_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = trunc <8 x i64> %0 to <8 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v16i32_v16i1(<16 x i32>) {
+; SSE2-LABEL: trunc_v16i32_v16i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255]
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrlw $8, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v16i32_v16i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm4, %xmm3
+; SSE41-NEXT: pshufb %xmm4, %xmm2
+; SSE41-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm3, %xmm1
+; SSE41-NEXT: pshufb %xmm3, %xmm0
+; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm2[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrlw $8, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pextrb $0, %xmm1, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v16i32_v16i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm4
+; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v16i32_v16i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_v16i32_v16i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kshiftrw $8, %k0, %k1
+; AVX512-NEXT: kxorw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $4, %k0, %k1
+; AVX512-NEXT: kxorw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $2, %k0, %k1
+; AVX512-NEXT: kxorw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $1, %k0, %k1
+; AVX512-NEXT: kxorw %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = trunc <16 x i32> %0 to <16 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v32i16_v32i1(<32 x i16>) {
+; SSE2-LABEL: trunc_v32i16_v32i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrlw $8, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v32i16_v32i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm4, %xmm3
+; SSE41-NEXT: pshufb %xmm4, %xmm2
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE41-NEXT: pshufb %xmm4, %xmm1
+; SSE41-NEXT: pshufb %xmm4, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v32i16_v32i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v32i16_v32i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_v32i16_v32i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $15, %zmm0, %zmm0
+; AVX512-NEXT: vpmovw2m %zmm0, %k0
+; AVX512-NEXT: kshiftrd $16, %k0, %k1
+; AVX512-NEXT: kxord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $8, %k0, %k1
+; AVX512-NEXT: kxord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $4, %k0, %k1
+; AVX512-NEXT: kxord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $2, %k0, %k1
+; AVX512-NEXT: kxord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $1, %k0, %k1
+; AVX512-NEXT: kxord %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = trunc <32 x i16> %0 to <32 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v32i1(<32 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @trunc_v64i8_v64i1(<64 x i8>) {
+; SSE2-LABEL: trunc_v64i8_v64i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlw $8, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: trunc_v64i8_v64i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm3, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrlw $8, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pextrb $0, %xmm1, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_v64i8_v64i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_v64i8_v64i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_v64i8_v64i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllw $7, %zmm0, %zmm0
+; AVX512-NEXT: vpmovb2m %zmm0, %k0
+; AVX512-NEXT: kshiftrq $32, %k0, %k1
+; AVX512-NEXT: kxorq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $16, %k0, %k1
+; AVX512-NEXT: kxorq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $8, %k0, %k1
+; AVX512-NEXT: kxorq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $4, %k0, %k1
+; AVX512-NEXT: kxorq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $2, %k0, %k1
+; AVX512-NEXT: kxorq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $1, %k0, %k1
+; AVX512-NEXT: kxorq %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = trunc <64 x i8> %0 to <64 x i1>
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v64i1(<64 x i1> %a)
+ ret i1 %b
+}
+
+;
+; Comparison
+;
+
+define i1 @icmp_v2i64_v2i1(<2 x i64>) {
+; SSE2-LABEL: icmp_v2i64_v2i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,0,3,2]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v2i64_v2i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pcmpeqq %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: icmp_v2i64_v2i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v2i64_v2i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vptestnmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v2i64_v2i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmq %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512VL-NEXT: vpsllq $63, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmq %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = icmp eq <2 x i64> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v2i1(<2 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v4i32_v4i1(<4 x i32>) {
+; SSE2-LABEL: icmp_v4i32_v4i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v4i32_v4i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pextrb $0, %xmm1, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: icmp_v4i32_v4i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v4i32_v4i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vptestnmd %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v4i32_v4i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmd %xmm0, %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = icmp eq <4 x i32> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v8i16_v8i1(<8 x i8>) {
+; SSE2-LABEL: icmp_v8i16_v8i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqw %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v8i16_v8i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pcmpeqw %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: icmp_v8i16_v8i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v8i16_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX512BW-NEXT: vptestnmw %zmm1, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v8i16_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmw {{.*}}(%rip), %xmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <8 x i8> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v16i8_v16i1(<16 x i8>) {
+; SSE2-LABEL: icmp_v16i8_v16i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlw $8, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v16i8_v16i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrlw $8, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pextrb $0, %xmm1, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: icmp_v16i8_v16i1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v16i8_v16i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kshiftrw $8, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $4, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $2, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v16i8_v16i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmb %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: kshiftrw $8, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $4, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $2, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $1, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: retq
+ %a = icmp eq <16 x i8> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v4i64_v4i1(<4 x i64>) {
+; SSE2-LABEL: icmp_v4i64_v4i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,0,3,2]
+; SSE2-NEXT: pand %xmm1, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2]
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v4i64_v4i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pcmpeqq %xmm2, %xmm1
+; SSE41-NEXT: pcmpeqq %xmm2, %xmm0
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: icmp_v4i64_v4i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpackssdw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpxor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v4i64_v4i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpackssdw %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpxor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v4i64_v4i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vptestnmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512BW-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v4i64_v4i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmq %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX512VL-NEXT: vptestmd %xmm1, %xmm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX512VL-NEXT: vptestmd %xmm0, %xmm0, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <4 x i64> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v4i1(<4 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v8i32_v8i1(<8 x i32>) {
+; SSE2-LABEL: icmp_v8i32_v8i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v8i32_v8i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm1
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE41-NEXT: packssdw %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pextrb $0, %xmm1, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: icmp_v8i32_v8i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v8i32_v8i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v8i32_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vptestnmd %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v8i32_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmd %ymm0, %ymm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <8 x i32> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v16i16_v16i1(<16 x i16>) {
+; SSE2-LABEL: icmp_v16i16_v16i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpeqw %xmm2, %xmm1
+; SSE2-NEXT: pcmpeqw %xmm2, %xmm0
+; SSE2-NEXT: packsswb %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrlw $8, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v16i16_v16i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pcmpeqw %xmm2, %xmm1
+; SSE41-NEXT: pcmpeqw %xmm2, %xmm0
+; SSE41-NEXT: packsswb %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: icmp_v16i16_v16i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v16i16_v16i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v16i16_v16i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vptestnmw %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kshiftrw $8, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $4, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $2, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k1
+; AVX512BW-NEXT: kxorw %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v16i16_v16i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmw %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: kshiftrw $8, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $4, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $2, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrw $1, %k0, %k1
+; AVX512VL-NEXT: kxorw %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <16 x i16> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v32i8_v32i1(<32 x i8>) {
+; SSE2-LABEL: icmp_v32i8_v32i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpeqb %xmm2, %xmm1
+; SSE2-NEXT: pcmpeqb %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrlw $8, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v32i8_v32i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pcmpeqb %xmm2, %xmm1
+; SSE41-NEXT: pcmpeqb %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: icmp_v32i8_v32i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v32i8_v32i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v32i8_v32i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kshiftrd $16, %k0, %k1
+; AVX512BW-NEXT: kxord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $8, %k0, %k1
+; AVX512BW-NEXT: kxord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $4, %k0, %k1
+; AVX512BW-NEXT: kxord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $2, %k0, %k1
+; AVX512BW-NEXT: kxord %k1, %k0, %k0
+; AVX512BW-NEXT: kshiftrd $1, %k0, %k1
+; AVX512BW-NEXT: kxord %k1, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v32i8_v32i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmb %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: kshiftrd $16, %k0, %k1
+; AVX512VL-NEXT: kxord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $8, %k0, %k1
+; AVX512VL-NEXT: kxord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $4, %k0, %k1
+; AVX512VL-NEXT: kxord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $2, %k0, %k1
+; AVX512VL-NEXT: kxord %k1, %k0, %k0
+; AVX512VL-NEXT: kshiftrd $1, %k0, %k1
+; AVX512VL-NEXT: kxord %k1, %k0, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <32 x i8> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v32i1(<32 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v8i64_v8i1(<8 x i64>) {
+; SSE2-LABEL: icmp_v8i64_v8i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,0,3,2]
+; SSE2-NEXT: pand %xmm1, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,0,3,2]
+; SSE2-NEXT: pand %xmm0, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,0,3,2]
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,0,3,2]
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v8i64_v8i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pcmpeqq %xmm4, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
+; SSE41-NEXT: pcmpeqq %xmm4, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
+; SSE41-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE41-NEXT: pcmpeqq %xmm4, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE41-NEXT: pcmpeqq %xmm4, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm2[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: icmp_v8i64_v8i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v8i64_v8i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512BW-LABEL: icmp_v8i64_v8i1:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vptestnmq %zmm0, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kxorw %k0, %k1, %k0
+; AVX512BW-NEXT: kmovd %k0, %eax
+; AVX512BW-NEXT: # kill: def $al killed $al killed $eax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512VL-LABEL: icmp_v8i64_v8i1:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vptestnmq %zmm0, %zmm0, %k1
+; AVX512VL-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX512VL-NEXT: vpslld $31, %ymm1, %ymm1
+; AVX512VL-NEXT: vptestmd %ymm1, %ymm1, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k1
+; AVX512VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vptestmd %ymm0, %ymm0, %k0
+; AVX512VL-NEXT: kxorw %k0, %k1, %k0
+; AVX512VL-NEXT: kmovd %k0, %eax
+; AVX512VL-NEXT: # kill: def $al killed $al killed $eax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+ %a = icmp eq <8 x i64> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v8i1(<8 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v16i32_v16i1(<16 x i32>) {
+; SSE2-LABEL: icmp_v16i32_v16i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255]
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm2
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm0
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrlw $8, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v16i32_v16i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm3
+; SSE41-NEXT: movdqa {{.*#+}} xmm5 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm5, %xmm3
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm2
+; SSE41-NEXT: pshufb %xmm5, %xmm2
+; SSE41-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm1
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm3, %xmm1
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm0
+; SSE41-NEXT: pshufb %xmm3, %xmm0
+; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: palignr {{.*#+}} xmm0 = xmm2[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrlw $8, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pextrb $0, %xmm1, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: icmp_v16i32_v16i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v16i32_v16i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: icmp_v16i32_v16i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vptestnmd %zmm0, %zmm0, %k0
+; AVX512-NEXT: kshiftrw $8, %k0, %k1
+; AVX512-NEXT: kxorw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $4, %k0, %k1
+; AVX512-NEXT: kxorw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $2, %k0, %k1
+; AVX512-NEXT: kxorw %k1, %k0, %k0
+; AVX512-NEXT: kshiftrw $1, %k0, %k1
+; AVX512-NEXT: kxorw %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = icmp eq <16 x i32> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v16i1(<16 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v32i16_v32i1(<32 x i16>) {
+; SSE2-LABEL: icmp_v32i16_v32i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpeqw %xmm4, %xmm3
+; SSE2-NEXT: pcmpeqw %xmm4, %xmm2
+; SSE2-NEXT: packsswb %xmm3, %xmm2
+; SSE2-NEXT: pcmpeqw %xmm4, %xmm1
+; SSE2-NEXT: pcmpeqw %xmm4, %xmm0
+; SSE2-NEXT: packsswb %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrlw $8, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v32i16_v32i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pcmpeqw %xmm4, %xmm3
+; SSE41-NEXT: pcmpeqw %xmm4, %xmm2
+; SSE41-NEXT: packsswb %xmm3, %xmm2
+; SSE41-NEXT: pcmpeqw %xmm4, %xmm1
+; SSE41-NEXT: pcmpeqw %xmm4, %xmm0
+; SSE41-NEXT: packsswb %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pextrb $0, %xmm0, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: icmp_v32i16_v32i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v32i16_v32i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: icmp_v32i16_v32i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vptestnmw %zmm0, %zmm0, %k0
+; AVX512-NEXT: kshiftrd $16, %k0, %k1
+; AVX512-NEXT: kxord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $8, %k0, %k1
+; AVX512-NEXT: kxord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $4, %k0, %k1
+; AVX512-NEXT: kxord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $2, %k0, %k1
+; AVX512-NEXT: kxord %k1, %k0, %k0
+; AVX512-NEXT: kshiftrd $1, %k0, %k1
+; AVX512-NEXT: kxord %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = icmp eq <32 x i16> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v32i1(<32 x i1> %a)
+ ret i1 %b
+}
+
+define i1 @icmp_v64i8_v64i1(<64 x i8>) {
+; SSE2-LABEL: icmp_v64i8_v64i1:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpeqb %xmm4, %xmm2
+; SSE2-NEXT: pcmpeqb %xmm4, %xmm0
+; SSE2-NEXT: pcmpeqb %xmm4, %xmm3
+; SSE2-NEXT: pcmpeqb %xmm4, %xmm1
+; SSE2-NEXT: pxor %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrlw $8, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: icmp_v64i8_v64i1:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pcmpeqb %xmm4, %xmm2
+; SSE41-NEXT: pcmpeqb %xmm4, %xmm0
+; SSE41-NEXT: pcmpeqb %xmm4, %xmm3
+; SSE41-NEXT: pcmpeqb %xmm4, %xmm1
+; SSE41-NEXT: pxor %xmm3, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrlw $8, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: pextrb $0, %xmm1, %eax
+; SSE41-NEXT: # kill: def $al killed $al killed $eax
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: icmp_v64i8_v64i1:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: icmp_v64i8_v64i1:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: icmp_v64i8_v64i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vptestnmb %zmm0, %zmm0, %k0
+; AVX512-NEXT: kshiftrq $32, %k0, %k1
+; AVX512-NEXT: kxorq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $16, %k0, %k1
+; AVX512-NEXT: kxorq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $8, %k0, %k1
+; AVX512-NEXT: kxorq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $4, %k0, %k1
+; AVX512-NEXT: kxorq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $2, %k0, %k1
+; AVX512-NEXT: kxorq %k1, %k0, %k0
+; AVX512-NEXT: kshiftrq $1, %k0, %k1
+; AVX512-NEXT: kxorq %k1, %k0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %a = icmp eq <64 x i8> %0, zeroinitializer
+ %b = call i1 @llvm.experimental.vector.reduce.xor.v64i1(<64 x i1> %a)
+ ret i1 %b
+}
+
+declare i1 @llvm.experimental.vector.reduce.xor.v2i1(<2 x i1>)
+declare i1 @llvm.experimental.vector.reduce.xor.v4i1(<4 x i1>)
+declare i1 @llvm.experimental.vector.reduce.xor.v8i1(<8 x i1>)
+declare i1 @llvm.experimental.vector.reduce.xor.v16i1(<16 x i1>)
+declare i1 @llvm.experimental.vector.reduce.xor.v32i1(<32 x i1>)
+declare i1 @llvm.experimental.vector.reduce.xor.v64i1(<64 x i1>)
More information about the llvm-commits
mailing list