[llvm] 24780e1 - [X86] MatchVectorAllEqualTest - add support for icmp(reduce_and(X),-1) allof reduction patterns
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sat Apr 1 07:38:56 PDT 2023
Author: Simon Pilgrim
Date: 2023-04-01T15:38:38+01:00
New Revision: 24780e13e5be1501e34330148137a10fa9965166
URL: https://github.com/llvm/llvm-project/commit/24780e13e5be1501e34330148137a10fa9965166
DIFF: https://github.com/llvm/llvm-project/commit/24780e13e5be1501e34330148137a10fa9965166.diff
LOG: [X86] MatchVectorAllEqualTest - add support for icmp(reduce_and(X),-1) allof reduction patterns
Also, improve codegen in LowerVectorAllEqual for X == -1 cases to reduce over sized vector using a AND reduction
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index eacce087630d8..7db499647129d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -24383,15 +24383,25 @@ static SDValue LowerVectorAllEqual(const SDLoc &DL, SDValue LHS, SDValue RHS,
// Split down to 128/256/512-bit vector.
unsigned TestSize = UseKORTEST ? 512 : (Subtarget.hasAVX() ? 256 : 128);
if (VT.getSizeInBits() > TestSize) {
- // Convert to a ICMP_EQ(XOR(LHS,RHS),0) pattern.
- SDValue V = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
- while (VT.getSizeInBits() > TestSize) {
- auto Split = DAG.SplitVector(V, DL);
- VT = Split.first.getValueType();
- V = DAG.getNode(ISD::OR, DL, VT, Split.first, Split.second);
+ if (isAllOnesOrAllOnesSplat(RHS)) {
+ // If ICMP(LHS,-1) - reduce using AND splits.
+ while (VT.getSizeInBits() > TestSize) {
+ auto Split = DAG.SplitVector(LHS, DL);
+ VT = Split.first.getValueType();
+ LHS = DAG.getNode(ISD::AND, DL, VT, Split.first, Split.second);
+ }
+ RHS = DAG.getAllOnesConstant(DL, VT);
+ } else {
+ // Convert to a ICMP_EQ(XOR(LHS,RHS),0) pattern.
+ SDValue V = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
+ while (VT.getSizeInBits() > TestSize) {
+ auto Split = DAG.SplitVector(V, DL);
+ VT = Split.first.getValueType();
+ V = DAG.getNode(ISD::OR, DL, VT, Split.first, Split.second);
+ }
+ LHS = V;
+ RHS = DAG.getConstant(0, DL, VT);
}
- LHS = V;
- RHS = DAG.getConstant(0, DL, VT);
}
if (UseKORTEST && VT.is512BitVector()) {
@@ -24496,14 +24506,18 @@ static SDValue MatchVectorAllEqualTest(SDValue LHS, SDValue RHS,
return V;
}
- // TODO: Add CmpAllOnes support.
- if (CmpNull && Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
+ // Match icmp(reduce_or(X),0) anyof reduction patterns.
+ // Match icmp(reduce_and(X),-1) allof reduction patterns.
+ if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
+ ISD::NodeType LogicOp = CmpNull ? ISD::OR : ISD::AND;
ISD::NodeType BinOp;
if (SDValue Match =
- DAG.matchBinOpReduction(Op.getNode(), BinOp, {ISD::OR})) {
- if (SDValue V =
- LowerVectorAllZero(DL, Match, CC, Mask, Subtarget, DAG, X86CC))
- return V;
+ DAG.matchBinOpReduction(Op.getNode(), BinOp, {LogicOp})) {
+ EVT MatchVT = Match.getValueType();
+ return LowerVectorAllEqual(DL, Match,
+ CmpNull ? DAG.getConstant(0, DL, MatchVT)
+ : DAG.getAllOnesConstant(DL, MatchVT),
+ CC, Mask, Subtarget, DAG, X86CC);
}
}
diff --git a/llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll
index 80728793bbf3b..7f20ef7367e90 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll
@@ -12,128 +12,185 @@
;
define i1 @test_v2i64(<2 x i64> %a0) {
-; SSE-LABEL: test_v2i64:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movq %xmm1, %rax
-; SSE-NEXT: cmpq $-1, %rax
-; SSE-NEXT: sete %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v2i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: movmskps %xmm1, %eax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
;
-; AVX-LABEL: test_v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovq %xmm0, %rax
-; AVX-NEXT: cmpq $-1, %rax
-; AVX-NEXT: sete %al
-; AVX-NEXT: retq
+; SSE41-LABEL: test_v2i64:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setb %al
+; SSE41-NEXT: retq
+;
+; AVX1OR2-LABEL: test_v2i64:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1OR2-NEXT: vpxor %xmm1, %xmm0, %xmm1
+; AVX1OR2-NEXT: vptest %xmm1, %xmm0
+; AVX1OR2-NEXT: setb %al
+; AVX1OR2-NEXT: retq
+;
+; AVX512F-LABEL: test_v2i64:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512F-NEXT: vptest %xmm1, %xmm0
+; AVX512F-NEXT: setb %al
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: test_v2i64:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512BW-NEXT: vptest %xmm1, %xmm0
+; AVX512BW-NEXT: setb %al
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: test_v2i64:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vmovdqa %xmm0, %xmm1
+; AVX512BWVL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm1
+; AVX512BWVL-NEXT: vptest %xmm1, %xmm0
+; AVX512BWVL-NEXT: setb %al
+; AVX512BWVL-NEXT: retq
%1 = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %a0)
%2 = icmp eq i64 %1, -1
ret i1 %2
}
define i1 @test_v4i64(<4 x i64> %a0) {
-; SSE-LABEL: test_v4i64:
-; SSE: # %bb.0:
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movq %xmm1, %rax
-; SSE-NEXT: cmpq $-1, %rax
-; SSE-NEXT: setne %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v4i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: movmskps %xmm1, %eax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v4i64:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setae %al
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v4i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: cmpq $-1, %rax
-; AVX1-NEXT: setne %al
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm1
+; AVX1-NEXT: vptest %ymm1, %ymm0
+; AVX1-NEXT: setae %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: cmpq $-1, %rax
-; AVX2-NEXT: setne %al
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vptest %ymm1, %ymm0
+; AVX2-NEXT: setae %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_v4i64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: cmpq $-1, %rax
-; AVX512-NEXT: setne %al
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_v4i64:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512F-NEXT: vptest %ymm1, %ymm0
+; AVX512F-NEXT: setae %al
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: test_v4i64:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512BW-NEXT: vptest %ymm1, %ymm0
+; AVX512BW-NEXT: setae %al
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: test_v4i64:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vmovdqa %ymm0, %ymm1
+; AVX512BWVL-NEXT: vpternlogq $15, %ymm0, %ymm0, %ymm1
+; AVX512BWVL-NEXT: vptest %ymm1, %ymm0
+; AVX512BWVL-NEXT: setae %al
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
%1 = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %a0)
%2 = icmp ne i64 %1, -1
ret i1 %2
}
define i1 @test_v8i64(<8 x i64> %a0) {
-; SSE-LABEL: test_v8i64:
-; SSE: # %bb.0:
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movq %xmm1, %rax
-; SSE-NEXT: cmpq $-1, %rax
-; SSE-NEXT: sete %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v8i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: movmskps %xmm1, %eax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v8i64:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand %xmm3, %xmm1
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setb %al
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v8i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: cmpq $-1, %rax
-; AVX1-NEXT: sete %al
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm1
+; AVX1-NEXT: vptest %ymm1, %ymm0
+; AVX1-NEXT: setb %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: cmpq $-1, %rax
-; AVX2-NEXT: sete %al
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vptest %ymm1, %ymm0
+; AVX2-NEXT: setb %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: cmpq $-1, %rax
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpcmpneqd %zmm1, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -143,34 +200,47 @@ define i1 @test_v8i64(<8 x i64> %a0) {
}
define i1 @test_v16i64(<16 x i64> %a0) {
-; SSE-LABEL: test_v16i64:
-; SSE: # %bb.0:
-; SSE-NEXT: pand %xmm6, %xmm2
-; SSE-NEXT: pand %xmm4, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pand %xmm7, %xmm3
-; SSE-NEXT: pand %xmm5, %xmm1
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movq %xmm0, %rax
-; SSE-NEXT: cmpq $-1, %rax
-; SSE-NEXT: setne %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v16i64:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm6, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: movmskps %xmm1, %eax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v16i64:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand %xmm7, %xmm3
+; SSE41-NEXT: pand %xmm5, %xmm1
+; SSE41-NEXT: pand %xmm3, %xmm1
+; SSE41-NEXT: pand %xmm6, %xmm2
+; SSE41-NEXT: pand %xmm4, %xmm0
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setae %al
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v16i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: cmpq $-1, %rax
-; AVX1-NEXT: setne %al
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm1
+; AVX1-NEXT: vptest %ymm1, %ymm0
+; AVX1-NEXT: setae %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -179,27 +249,19 @@ define i1 @test_v16i64(<16 x i64> %a0) {
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: cmpq $-1, %rax
-; AVX2-NEXT: setne %al
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vptest %ymm1, %ymm0
+; AVX2-NEXT: setae %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: cmpq $-1, %rax
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpcmpneqd %zmm1, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -215,19 +277,15 @@ define i1 @test_v16i64(<16 x i64> %a0) {
define i1 @test_v2i32(<2 x i32> %a0) {
; SSE-LABEL: test_v2i32:
; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: cmpl $-1, %eax
+; SSE-NEXT: movq %xmm0, %rax
+; SSE-NEXT: cmpq $-1, %rax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2i32:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: cmpl $-1, %eax
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: cmpq $-1, %rax
; AVX-NEXT: sete %al
; AVX-NEXT: retq
%1 = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %a0)
@@ -236,148 +294,185 @@ define i1 @test_v2i32(<2 x i32> %a0) {
}
define i1 @test_v4i32(<4 x i32> %a0) {
-; SSE-LABEL: test_v4i32:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: cmpl $-1, %eax
-; SSE-NEXT: setne %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: movmskps %xmm1, %eax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
;
-; AVX-LABEL: test_v4i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: cmpl $-1, %eax
-; AVX-NEXT: setne %al
-; AVX-NEXT: retq
+; SSE41-LABEL: test_v4i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setae %al
+; SSE41-NEXT: retq
+;
+; AVX1OR2-LABEL: test_v4i32:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1OR2-NEXT: vpxor %xmm1, %xmm0, %xmm1
+; AVX1OR2-NEXT: vptest %xmm1, %xmm0
+; AVX1OR2-NEXT: setae %al
+; AVX1OR2-NEXT: retq
+;
+; AVX512F-LABEL: test_v4i32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512F-NEXT: vptest %xmm1, %xmm0
+; AVX512F-NEXT: setae %al
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: test_v4i32:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512BW-NEXT: vptest %xmm1, %xmm0
+; AVX512BW-NEXT: setae %al
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: test_v4i32:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vmovdqa %xmm0, %xmm1
+; AVX512BWVL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm1
+; AVX512BWVL-NEXT: vptest %xmm1, %xmm0
+; AVX512BWVL-NEXT: setae %al
+; AVX512BWVL-NEXT: retq
%1 = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %a0)
%2 = icmp ne i32 %1, -1
ret i1 %2
}
define i1 @test_v8i32(<8 x i32> %a0) {
-; SSE-LABEL: test_v8i32:
-; SSE: # %bb.0:
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: cmpl $-1, %eax
-; SSE-NEXT: sete %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v8i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: movmskps %xmm1, %eax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v8i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setb %al
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: cmpl $-1, %eax
-; AVX1-NEXT: sete %al
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm1
+; AVX1-NEXT: vptest %ymm1, %ymm0
+; AVX1-NEXT: setb %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: cmpl $-1, %eax
-; AVX2-NEXT: sete %al
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vptest %ymm1, %ymm0
+; AVX2-NEXT: setb %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_v8i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: cmpl $-1, %eax
-; AVX512-NEXT: sete %al
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_v8i32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512F-NEXT: vptest %ymm1, %ymm0
+; AVX512F-NEXT: setb %al
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: test_v8i32:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512BW-NEXT: vptest %ymm1, %ymm0
+; AVX512BW-NEXT: setb %al
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: test_v8i32:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vmovdqa %ymm0, %ymm1
+; AVX512BWVL-NEXT: vpternlogq $15, %ymm0, %ymm0, %ymm1
+; AVX512BWVL-NEXT: vptest %ymm1, %ymm0
+; AVX512BWVL-NEXT: setb %al
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
%1 = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %a0)
%2 = icmp eq i32 %1, -1
ret i1 %2
}
define i1 @test_v16i32(<16 x i32> %a0) {
-; SSE-LABEL: test_v16i32:
-; SSE: # %bb.0:
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: cmpl $-1, %eax
-; SSE-NEXT: setne %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v16i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: movmskps %xmm1, %eax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v16i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand %xmm3, %xmm1
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setae %al
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v16i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: cmpl $-1, %eax
-; AVX1-NEXT: setne %al
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm1
+; AVX1-NEXT: vptest %ymm1, %ymm0
+; AVX1-NEXT: setae %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: cmpl $-1, %eax
-; AVX2-NEXT: setne %al
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vptest %ymm1, %ymm0
+; AVX2-NEXT: setae %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: cmpl $-1, %eax
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpcmpneqd %zmm1, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -387,38 +482,47 @@ define i1 @test_v16i32(<16 x i32> %a0) {
}
define i1 @test_v32i32(<32 x i32> %a0) {
-; SSE-LABEL: test_v32i32:
-; SSE: # %bb.0:
-; SSE-NEXT: pand %xmm6, %xmm2
-; SSE-NEXT: pand %xmm4, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pand %xmm7, %xmm3
-; SSE-NEXT: pand %xmm5, %xmm1
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: cmpl $-1, %eax
-; SSE-NEXT: sete %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v32i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm6, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm1
+; SSE2-NEXT: movmskps %xmm1, %eax
+; SSE2-NEXT: xorl $15, %eax
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v32i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand %xmm7, %xmm3
+; SSE41-NEXT: pand %xmm5, %xmm1
+; SSE41-NEXT: pand %xmm3, %xmm1
+; SSE41-NEXT: pand %xmm6, %xmm2
+; SSE41-NEXT: pand %xmm4, %xmm0
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setb %al
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v32i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: cmpl $-1, %eax
-; AVX1-NEXT: sete %al
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm1
+; AVX1-NEXT: vptest %ymm1, %ymm0
+; AVX1-NEXT: setb %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -427,31 +531,19 @@ define i1 @test_v32i32(<32 x i32> %a0) {
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: cmpl $-1, %eax
-; AVX2-NEXT: sete %al
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vptest %ymm1, %ymm0
+; AVX2-NEXT: setb %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v32i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: cmpl $-1, %eax
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpcmpneqd %zmm1, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -467,20 +559,15 @@ define i1 @test_v32i32(<32 x i32> %a0) {
define i1 @test_v2i16(<2 x i16> %a0) {
; SSE-LABEL: test_v2i16:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: cmpw $-1, %ax
+; SSE-NEXT: movd %xmm0, %eax
+; SSE-NEXT: cmpl $-1, %eax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2i16:
; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: cmpw $-1, %ax
+; AVX-NEXT: cmpl $-1, %eax
; AVX-NEXT: sete %al
; AVX-NEXT: retq
%1 = call i16 @llvm.vector.reduce.and.v2i16(<2 x i16> %a0)
@@ -491,24 +578,15 @@ define i1 @test_v2i16(<2 x i16> %a0) {
define i1 @test_v4i16(<4 x i16> %a0) {
; SSE-LABEL: test_v4i16:
; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: cmpw $-1, %ax
+; SSE-NEXT: movq %xmm0, %rax
+; SSE-NEXT: cmpq $-1, %rax
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4i16:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: cmpw $-1, %ax
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: cmpq $-1, %rax
; AVX-NEXT: setne %al
; AVX-NEXT: retq
%1 = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %a0)
@@ -517,171 +595,185 @@ define i1 @test_v4i16(<4 x i16> %a0) {
}
define i1 @test_v8i16(<8 x i16> %a0) {
-; SSE-LABEL: test_v8i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: cmpw $-1, %ax
-; SSE-NEXT: sete %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v8i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE2-NEXT: pmovmskb %xmm1, %eax
+; SSE2-NEXT: xorl $65535, %eax # imm = 0xFFFF
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
;
-; AVX-LABEL: test_v8i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: cmpw $-1, %ax
-; AVX-NEXT: sete %al
-; AVX-NEXT: retq
+; SSE41-LABEL: test_v8i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setb %al
+; SSE41-NEXT: retq
+;
+; AVX1OR2-LABEL: test_v8i16:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1OR2-NEXT: vpxor %xmm1, %xmm0, %xmm1
+; AVX1OR2-NEXT: vptest %xmm1, %xmm0
+; AVX1OR2-NEXT: setb %al
+; AVX1OR2-NEXT: retq
+;
+; AVX512F-LABEL: test_v8i16:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512F-NEXT: vptest %xmm1, %xmm0
+; AVX512F-NEXT: setb %al
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: test_v8i16:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512BW-NEXT: vptest %xmm1, %xmm0
+; AVX512BW-NEXT: setb %al
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: test_v8i16:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vmovdqa %xmm0, %xmm1
+; AVX512BWVL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm1
+; AVX512BWVL-NEXT: vptest %xmm1, %xmm0
+; AVX512BWVL-NEXT: setb %al
+; AVX512BWVL-NEXT: retq
%1 = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %a0)
%2 = icmp eq i16 %1, -1
ret i1 %2
}
define i1 @test_v16i16(<16 x i16> %a0) {
-; SSE-LABEL: test_v16i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: cmpw $-1, %ax
-; SSE-NEXT: setne %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v16i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE2-NEXT: pmovmskb %xmm1, %eax
+; SSE2-NEXT: xorl $65535, %eax # imm = 0xFFFF
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v16i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setae %al
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v16i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: cmpw $-1, %ax
-; AVX1-NEXT: setne %al
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm1
+; AVX1-NEXT: vptest %ymm1, %ymm0
+; AVX1-NEXT: setae %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: cmpw $-1, %ax
-; AVX2-NEXT: setne %al
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vptest %ymm1, %ymm0
+; AVX2-NEXT: setae %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_v16i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: cmpw $-1, %ax
-; AVX512-NEXT: setne %al
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_v16i16:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512F-NEXT: vptest %ymm1, %ymm0
+; AVX512F-NEXT: setae %al
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: test_v16i16:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512BW-NEXT: vptest %ymm1, %ymm0
+; AVX512BW-NEXT: setae %al
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: test_v16i16:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vmovdqa %ymm0, %ymm1
+; AVX512BWVL-NEXT: vpternlogq $15, %ymm0, %ymm0, %ymm1
+; AVX512BWVL-NEXT: vptest %ymm1, %ymm0
+; AVX512BWVL-NEXT: setae %al
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
%1 = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %a0)
%2 = icmp ne i16 %1, -1
ret i1 %2
}
define i1 @test_v32i16(<32 x i16> %a0) {
-; SSE-LABEL: test_v32i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: cmpw $-1, %ax
-; SSE-NEXT: sete %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v32i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE2-NEXT: pmovmskb %xmm1, %eax
+; SSE2-NEXT: xorl $65535, %eax # imm = 0xFFFF
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v32i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand %xmm3, %xmm1
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setb %al
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v32i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: cmpw $-1, %ax
-; AVX1-NEXT: sete %al
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm1
+; AVX1-NEXT: vptest %ymm1, %ymm0
+; AVX1-NEXT: setb %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v32i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: cmpw $-1, %ax
-; AVX2-NEXT: sete %al
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vptest %ymm1, %ymm0
+; AVX2-NEXT: setb %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v32i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: cmpw $-1, %ax
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpcmpneqd %zmm1, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -691,43 +783,47 @@ define i1 @test_v32i16(<32 x i16> %a0) {
}
define i1 @test_v64i16(<64 x i16> %a0) {
-; SSE-LABEL: test_v64i16:
-; SSE: # %bb.0:
-; SSE-NEXT: pand %xmm6, %xmm2
-; SSE-NEXT: pand %xmm4, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pand %xmm7, %xmm3
-; SSE-NEXT: pand %xmm5, %xmm1
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: cmpw $-1, %ax
-; SSE-NEXT: setne %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v64i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm6, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE2-NEXT: pmovmskb %xmm1, %eax
+; SSE2-NEXT: xorl $65535, %eax # imm = 0xFFFF
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v64i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand %xmm7, %xmm3
+; SSE41-NEXT: pand %xmm5, %xmm1
+; SSE41-NEXT: pand %xmm3, %xmm1
+; SSE41-NEXT: pand %xmm6, %xmm2
+; SSE41-NEXT: pand %xmm4, %xmm0
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setae %al
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v64i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: cmpw $-1, %ax
-; AVX1-NEXT: setne %al
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm1
+; AVX1-NEXT: vptest %ymm1, %ymm0
+; AVX1-NEXT: setae %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -736,35 +832,19 @@ define i1 @test_v64i16(<64 x i16> %a0) {
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: cmpw $-1, %ax
-; AVX2-NEXT: setne %al
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vptest %ymm1, %ymm0
+; AVX2-NEXT: setae %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v64i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: cmpw $-1, %ax
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpcmpneqd %zmm1, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -780,20 +860,15 @@ define i1 @test_v64i16(<64 x i16> %a0) {
define i1 @test_v2i8(<2 x i8> %a0) {
; SSE-LABEL: test_v2i8:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrlw $8, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: cmpb $-1, %al
+; SSE-NEXT: movd %xmm0, %eax
+; SSE-NEXT: cmpw $-1, %ax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2i8:
; AVX: # %bb.0:
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: cmpb $-1, %al
+; AVX-NEXT: cmpw $-1, %ax
; AVX-NEXT: sete %al
; AVX-NEXT: retq
%1 = call i8 @llvm.vector.reduce.and.v2i8(<2 x i8> %a0)
@@ -804,25 +879,15 @@ define i1 @test_v2i8(<2 x i8> %a0) {
define i1 @test_v4i8(<4 x i8> %a0) {
; SSE-LABEL: test_v4i8:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrlw $8, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: cmpb $-1, %al
+; SSE-NEXT: cmpl $-1, %eax
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4i8:
; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: cmpb $-1, %al
+; AVX-NEXT: cmpl $-1, %eax
; AVX-NEXT: setne %al
; AVX-NEXT: retq
%1 = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> %a0)
@@ -833,29 +898,15 @@ define i1 @test_v4i8(<4 x i8> %a0) {
define i1 @test_v8i8(<8 x i8> %a0) {
; SSE-LABEL: test_v8i8:
; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrlw $8, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: cmpb $-1, %al
+; SSE-NEXT: movq %xmm0, %rax
+; SSE-NEXT: cmpq $-1, %rax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8i8:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: cmpb $-1, %al
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: cmpq $-1, %rax
; AVX-NEXT: sete %al
; AVX-NEXT: retq
%1 = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %a0)
@@ -864,194 +915,185 @@ define i1 @test_v8i8(<8 x i8> %a0) {
}
define i1 @test_v16i8(<16 x i8> %a0) {
-; SSE-LABEL: test_v16i8:
-; SSE: # %bb.0:
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrlw $8, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: cmpb $-1, %al
-; SSE-NEXT: setne %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v16i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE2-NEXT: pmovmskb %xmm1, %eax
+; SSE2-NEXT: xorl $65535, %eax # imm = 0xFFFF
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
;
-; AVX-LABEL: test_v16i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: cmpb $-1, %al
-; AVX-NEXT: setne %al
-; AVX-NEXT: retq
+; SSE41-LABEL: test_v16i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setae %al
+; SSE41-NEXT: retq
+;
+; AVX1OR2-LABEL: test_v16i8:
+; AVX1OR2: # %bb.0:
+; AVX1OR2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1OR2-NEXT: vpxor %xmm1, %xmm0, %xmm1
+; AVX1OR2-NEXT: vptest %xmm1, %xmm0
+; AVX1OR2-NEXT: setae %al
+; AVX1OR2-NEXT: retq
+;
+; AVX512F-LABEL: test_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512F-NEXT: vptest %xmm1, %xmm0
+; AVX512F-NEXT: setae %al
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: test_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512BW-NEXT: vptest %xmm1, %xmm0
+; AVX512BW-NEXT: setae %al
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: test_v16i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vmovdqa %xmm0, %xmm1
+; AVX512BWVL-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm1
+; AVX512BWVL-NEXT: vptest %xmm1, %xmm0
+; AVX512BWVL-NEXT: setae %al
+; AVX512BWVL-NEXT: retq
%1 = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %a0)
%2 = icmp ne i8 %1, -1
ret i1 %2
}
define i1 @test_v32i8(<32 x i8> %a0) {
-; SSE-LABEL: test_v32i8:
-; SSE: # %bb.0:
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrlw $8, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: cmpb $-1, %al
-; SSE-NEXT: sete %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v32i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE2-NEXT: pmovmskb %xmm1, %eax
+; SSE2-NEXT: xorl $65535, %eax # imm = 0xFFFF
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v32i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setb %al
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: cmpb $-1, %al
-; AVX1-NEXT: sete %al
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm1
+; AVX1-NEXT: vptest %ymm1, %ymm0
+; AVX1-NEXT: setb %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: cmpb $-1, %al
-; AVX2-NEXT: sete %al
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vptest %ymm1, %ymm0
+; AVX2-NEXT: setb %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_v32i8:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: cmpb $-1, %al
-; AVX512-NEXT: sete %al
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_v32i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512F-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512F-NEXT: vptest %ymm1, %ymm0
+; AVX512F-NEXT: setb %al
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: test_v32i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1
+; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm1
+; AVX512BW-NEXT: vptest %ymm1, %ymm0
+; AVX512BW-NEXT: setb %al
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: test_v32i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vmovdqa %ymm0, %ymm1
+; AVX512BWVL-NEXT: vpternlogq $15, %ymm0, %ymm0, %ymm1
+; AVX512BWVL-NEXT: vptest %ymm1, %ymm0
+; AVX512BWVL-NEXT: setb %al
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
%1 = call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %a0)
%2 = icmp eq i8 %1, -1
ret i1 %2
}
define i1 @test_v64i8(<64 x i8> %a0) {
-; SSE-LABEL: test_v64i8:
-; SSE: # %bb.0:
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrlw $8, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: cmpb $-1, %al
-; SSE-NEXT: setne %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v64i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE2-NEXT: pmovmskb %xmm1, %eax
+; SSE2-NEXT: xorl $65535, %eax # imm = 0xFFFF
+; SSE2-NEXT: setne %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v64i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand %xmm3, %xmm1
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setae %al
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v64i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: cmpb $-1, %al
-; AVX1-NEXT: setne %al
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm1
+; AVX1-NEXT: vptest %ymm1, %ymm0
+; AVX1-NEXT: setae %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_v64i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: cmpb $-1, %al
-; AVX2-NEXT: setne %al
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vptest %ymm1, %ymm0
+; AVX2-NEXT: setae %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v64i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: cmpb $-1, %al
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpcmpneqd %zmm1, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -1061,48 +1103,47 @@ define i1 @test_v64i8(<64 x i8> %a0) {
}
define i1 @test_v128i8(<128 x i8> %a0) {
-; SSE-LABEL: test_v128i8:
-; SSE: # %bb.0:
-; SSE-NEXT: pand %xmm6, %xmm2
-; SSE-NEXT: pand %xmm4, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pand %xmm7, %xmm3
-; SSE-NEXT: pand %xmm5, %xmm1
-; SSE-NEXT: pand %xmm3, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrld $16, %xmm0
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrlw $8, %xmm1
-; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: cmpb $-1, %al
-; SSE-NEXT: sete %al
-; SSE-NEXT: retq
+; SSE2-LABEL: test_v128i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pand %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm6, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; SSE2-NEXT: pmovmskb %xmm1, %eax
+; SSE2-NEXT: xorl $65535, %eax # imm = 0xFFFF
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: test_v128i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: pand %xmm7, %xmm3
+; SSE41-NEXT: pand %xmm5, %xmm1
+; SSE41-NEXT: pand %xmm3, %xmm1
+; SSE41-NEXT: pand %xmm6, %xmm2
+; SSE41-NEXT: pand %xmm4, %xmm0
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: pand %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm1
+; SSE41-NEXT: ptest %xmm1, %xmm0
+; SSE41-NEXT: setb %al
+; SSE41-NEXT: retq
;
; AVX1-LABEL: test_v128i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: cmpb $-1, %al
-; AVX1-NEXT: sete %al
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vcmptrueps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm1
+; AVX1-NEXT: vptest %ymm1, %ymm0
+; AVX1-NEXT: setb %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1111,39 +1152,19 @@ define i1 @test_v128i8(<128 x i8> %a0) {
; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: cmpb $-1, %al
-; AVX2-NEXT: sete %al
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-NEXT: vptest %ymm1, %ymm0
+; AVX2-NEXT: setb %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v128i8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
-; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: cmpb $-1, %al
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
+; AVX512-NEXT: vpcmpneqd %zmm1, %zmm0, %k0
+; AVX512-NEXT: kortestw %k0, %k0
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -1177,10 +1198,3 @@ declare i8 @llvm.vector.reduce.and.v16i8(<16 x i8>)
declare i8 @llvm.vector.reduce.and.v32i8(<32 x i8>)
declare i8 @llvm.vector.reduce.and.v64i8(<64 x i8>)
declare i8 @llvm.vector.reduce.and.v128i8(<128 x i8>)
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; AVX1OR2: {{.*}}
-; AVX512BW: {{.*}}
-; AVX512BWVL: {{.*}}
-; AVX512F: {{.*}}
-; SSE2: {{.*}}
-; SSE41: {{.*}}
More information about the llvm-commits
mailing list