[llvm] r264867 - [X86][SSE] Test the legalization of vector comparison results
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 30 06:55:00 PDT 2016
Author: rksimon
Date: Wed Mar 30 08:55:00 2016
New Revision: 264867
URL: http://llvm.org/viewvc/llvm-project?rev=264867&view=rev
Log:
[X86][SSE] Test the legalization of vector comparison results
We are currently doing a REALLY bad job of packing results of vector comparisons into the legalized <X x i1> result equivalents - a mixture of PACKSS/PMOVMSKB would be much better here.
Added:
llvm/trunk/test/CodeGen/X86/vector-compare-results.ll
Added: llvm/trunk/test/CodeGen/X86/vector-compare-results.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-compare-results.ll?rev=264867&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-compare-results.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-compare-results.ll Wed Mar 30 08:55:00 2016
@@ -0,0 +1,1971 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+
+;
+; 128-bit vector comparisons
+;
+
+define <2 x i1> @test_cmp_v2f64(<2 x double> %a0, <2 x double> %a1) nounwind {
+; SSE-LABEL: test_cmp_v2f64:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltpd %xmm0, %xmm1
+; SSE-NEXT: movapd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_cmp_v2f64:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %1 = fcmp ogt <2 x double> %a0, %a1
+ ret <2 x i1> %1
+}
+
+define <4 x i1> @test_cmp_v4f32(<4 x float> %a0, <4 x float> %a1) nounwind {
+; SSE-LABEL: test_cmp_v4f32:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltps %xmm0, %xmm1
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_cmp_v4f32:
+; AVX: # BB#0:
+; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %1 = fcmp ogt <4 x float> %a0, %a1
+ ret <4 x i1> %1
+}
+
+define <2 x i1> @test_cmp_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
+; SSE2-LABEL: test_cmp_v2i64:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
+; SSE2-NEXT: pxor %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: test_cmp_v2i64:
+; SSE42: # BB#0:
+; SSE42-NEXT: pcmpgtq %xmm1, %xmm0
+; SSE42-NEXT: retq
+;
+; AVX-LABEL: test_cmp_v2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = icmp sgt <2 x i64> %a0, %a1
+ ret <2 x i1> %1
+}
+
+define <4 x i1> @test_cmp_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
+; SSE-LABEL: test_cmp_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_cmp_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = icmp sgt <4 x i32> %a0, %a1
+ ret <4 x i1> %1
+}
+
+define <8 x i1> @test_cmp_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
+; SSE-LABEL: test_cmp_v8i16:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_cmp_v8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = icmp sgt <8 x i16> %a0, %a1
+ ret <8 x i1> %1
+}
+
+define <16 x i1> @test_cmp_v16i8(<16 x i8> %a0, <16 x i8> %a1) nounwind {
+; SSE-LABEL: test_cmp_v16i8:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_cmp_v16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = icmp sgt <16 x i8> %a0, %a1
+ ret <16 x i1> %1
+}
+
+;
+; 256-bit vector comparisons
+;
+
+define <4 x i1> @test_cmp_v4f64(<4 x double> %a0, <4 x double> %a1) nounwind {
+; SSE2-LABEL: test_cmp_v4f64:
+; SSE2: # BB#0:
+; SSE2-NEXT: cmpltpd %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT: cmpltpd %xmm0, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: test_cmp_v4f64:
+; SSE42: # BB#0:
+; SSE42-NEXT: cmpltpd %xmm1, %xmm3
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,1,0,2]
+; SSE42-NEXT: cmpltpd %xmm0, %xmm2
+; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE42-NEXT: retq
+;
+; AVX1-LABEL: test_cmp_v4f64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_cmp_v4f64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %1 = fcmp ogt <4 x double> %a0, %a1
+ ret <4 x i1> %1
+}
+
+define <8 x i1> @test_cmp_v8f32(<8 x float> %a0, <8 x float> %a1) nounwind {
+; SSE2-LABEL: test_cmp_v8f32:
+; SSE2: # BB#0:
+; SSE2-NEXT: cmpltps %xmm1, %xmm3
+; SSE2-NEXT: pslld $16, %xmm3
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: cmpltps %xmm0, %xmm2
+; SSE2-NEXT: pslld $16, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: packssdw %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: test_cmp_v8f32:
+; SSE42: # BB#0:
+; SSE42-NEXT: cmpltps %xmm1, %xmm3
+; SSE42-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE42-NEXT: pshufb %xmm1, %xmm3
+; SSE42-NEXT: cmpltps %xmm0, %xmm2
+; SSE42-NEXT: pshufb %xmm1, %xmm2
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE42-NEXT: movdqa %xmm2, %xmm0
+; SSE42-NEXT: retq
+;
+; AVX1-LABEL: test_cmp_v8f32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_cmp_v8f32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %1 = fcmp ogt <8 x float> %a0, %a1
+ ret <8 x i1> %1
+}
+
+define <4 x i1> @test_cmp_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
+; SSE2-LABEL: test_cmp_v4i64:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
+; SSE2-NEXT: pxor %xmm4, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT: pxor %xmm4, %xmm2
+; SSE2-NEXT: pxor %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: test_cmp_v4i64:
+; SSE42: # BB#0:
+; SSE42-NEXT: pcmpgtq %xmm3, %xmm1
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
+; SSE42-NEXT: pcmpgtq %xmm2, %xmm0
+; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE42-NEXT: retq
+;
+; AVX1-LABEL: test_cmp_v4i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_cmp_v4i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %1 = icmp sgt <4 x i64> %a0, %a1
+ ret <4 x i1> %1
+}
+
+define <8 x i1> @test_cmp_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
+; SSE2-LABEL: test_cmp_v8i32:
+; SSE2: # BB#0:
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm1
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm0
+; SSE2-NEXT: pslld $16, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: test_cmp_v8i32:
+; SSE42: # BB#0:
+; SSE42-NEXT: pcmpgtd %xmm3, %xmm1
+; SSE42-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE42-NEXT: pshufb %xmm3, %xmm1
+; SSE42-NEXT: pcmpgtd %xmm2, %xmm0
+; SSE42-NEXT: pshufb %xmm3, %xmm0
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE42-NEXT: retq
+;
+; AVX1-LABEL: test_cmp_v8i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_cmp_v8i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %1 = icmp sgt <8 x i32> %a0, %a1
+ ret <8 x i1> %1
+}
+
+define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
+; SSE2-LABEL: test_cmp_v16i16:
+; SSE2: # BB#0:
+; SSE2-NEXT: pcmpgtw %xmm3, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pcmpgtw %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: test_cmp_v16i16:
+; SSE42: # BB#0:
+; SSE42-NEXT: pcmpgtw %xmm3, %xmm1
+; SSE42-NEXT: movdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE42-NEXT: pshufb %xmm3, %xmm1
+; SSE42-NEXT: pcmpgtw %xmm2, %xmm0
+; SSE42-NEXT: pshufb %xmm3, %xmm0
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE42-NEXT: retq
+;
+; AVX1-LABEL: test_cmp_v16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_cmp_v16i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %1 = icmp sgt <16 x i16> %a0, %a1
+ ret <16 x i1> %1
+}
+
+define <32 x i1> @test_cmp_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
+; SSE2-LABEL: test_cmp_v32i8:
+; SSE2: # BB#0:
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm0
+; SSE2-NEXT: pcmpgtb %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movq %rdi, %rax
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: test_cmp_v32i8:
+; SSE42: # BB#0:
+; SSE42-NEXT: pcmpgtb %xmm2, %xmm0
+; SSE42-NEXT: pcmpgtb %xmm3, %xmm1
+; SSE42-NEXT: pextrb $15, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $14, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $13, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $12, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $11, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $10, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $9, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $8, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $7, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $6, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $5, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $4, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $3, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $2, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $1, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $0, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $15, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $14, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $13, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $12, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $11, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $10, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $9, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $8, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $7, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $6, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $5, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $4, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $3, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $2, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $1, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $0, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: movq %rdi, %rax
+; SSE42-NEXT: retq
+;
+; AVX1-LABEL: test_cmp_v32i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_cmp_v32i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %1 = icmp sgt <32 x i8> %a0, %a1
+ ret <32 x i1> %1
+}
+
+;
+; 512-bit vector comparisons
+;
+
+define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
+; SSE2-LABEL: test_cmp_v8f64:
+; SSE2: # BB#0:
+; SSE2-NEXT: cmpltpd %xmm3, %xmm7
+; SSE2-NEXT: cmpltpd %xmm1, %xmm5
+; SSE2-NEXT: pextrw $4, %xmm5, %eax
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3]
+; SSE2-NEXT: cmpltpd %xmm2, %xmm6
+; SSE2-NEXT: cmpltpd %xmm0, %xmm4
+; SSE2-NEXT: pextrw $4, %xmm4, %ecx
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; SSE2-NEXT: pextrw $4, %xmm7, %edx
+; SSE2-NEXT: movd %edx, %xmm0
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: pextrw $4, %xmm6, %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: test_cmp_v8f64:
+; SSE42: # BB#0:
+; SSE42-NEXT: cmpltpd %xmm3, %xmm7
+; SSE42-NEXT: xorpd %xmm3, %xmm3
+; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0],xmm3[1,2,3],xmm7[4],xmm3[5,6,7]
+; SSE42-NEXT: cmpltpd %xmm2, %xmm6
+; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0],xmm3[1,2,3],xmm6[4],xmm3[5,6,7]
+; SSE42-NEXT: packusdw %xmm7, %xmm6
+; SSE42-NEXT: cmpltpd %xmm1, %xmm5
+; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0],xmm3[1,2,3],xmm5[4],xmm3[5,6,7]
+; SSE42-NEXT: cmpltpd %xmm0, %xmm4
+; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3],xmm4[4],xmm3[5,6,7]
+; SSE42-NEXT: packusdw %xmm5, %xmm3
+; SSE42-NEXT: packusdw %xmm6, %xmm3
+; SSE42-NEXT: movdqa %xmm3, %xmm0
+; SSE42-NEXT: retq
+;
+; AVX1-LABEL: test_cmp_v8f64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_cmp_v8f64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
+; AVX2-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,0,2,4,6,4,6]
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,3,2,3]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %1 = fcmp ogt <8 x double> %a0, %a1
+ ret <8 x i1> %1
+}
+
+define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
+; SSE-LABEL: test_cmp_v16f32:
+; SSE: # BB#0:
+; SSE-NEXT: cmpltps %xmm3, %xmm7
+; SSE-NEXT: movaps {{.*#+}} xmm3 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: andps %xmm3, %xmm7
+; SSE-NEXT: cmpltps %xmm2, %xmm6
+; SSE-NEXT: andps %xmm3, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: cmpltps %xmm1, %xmm5
+; SSE-NEXT: andps %xmm3, %xmm5
+; SSE-NEXT: cmpltps %xmm0, %xmm4
+; SSE-NEXT: andps %xmm4, %xmm3
+; SSE-NEXT: packuswb %xmm5, %xmm3
+; SSE-NEXT: packuswb %xmm6, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_cmp_v16f32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vmovaps {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; AVX1-NEXT: vandps %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vandps %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandps %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_cmp_v16f32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
+; AVX2-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %1 = fcmp ogt <16 x float> %a0, %a1
+ ret <16 x i1> %1
+}
+
+define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
+; SSE2-LABEL: test_cmp_v8i64:
+; SSE2: # BB#0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,0,2147483648,0]
+; SSE2-NEXT: pxor %xmm8, %xmm7
+; SSE2-NEXT: pxor %xmm8, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm9
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm9
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm9[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm7, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm9[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm9
+; SSE2-NEXT: pxor %xmm8, %xmm5
+; SSE2-NEXT: pxor %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm5, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm3, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: pextrw $4, %xmm1, %eax
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3]
+; SSE2-NEXT: pxor %xmm8, %xmm6
+; SSE2-NEXT: pxor %xmm8, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm6, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm8, %xmm4
+; SSE2-NEXT: pxor %xmm8, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: pextrw $4, %xmm9, %ecx
+; SSE2-NEXT: movd %ecx, %xmm2
+; SSE2-NEXT: pextrw $4, %xmm0, %ecx
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: pextrw $4, %xmm3, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movd %ecx, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: test_cmp_v8i64:
+; SSE42: # BB#0:
+; SSE42-NEXT: pcmpgtq %xmm7, %xmm3
+; SSE42-NEXT: pxor %xmm7, %xmm7
+; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm7[1,2,3],xmm3[4],xmm7[5,6,7]
+; SSE42-NEXT: pcmpgtq %xmm6, %xmm2
+; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm7[1,2,3],xmm2[4],xmm7[5,6,7]
+; SSE42-NEXT: packusdw %xmm3, %xmm2
+; SSE42-NEXT: pcmpgtq %xmm5, %xmm1
+; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm7[1,2,3],xmm1[4],xmm7[5,6,7]
+; SSE42-NEXT: pcmpgtq %xmm4, %xmm0
+; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
+; SSE42-NEXT: packusdw %xmm1, %xmm0
+; SSE42-NEXT: packusdw %xmm2, %xmm0
+; SSE42-NEXT: retq
+;
+; AVX1-LABEL: test_cmp_v8i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2,3],xmm4[4],xmm5[5,6,7]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1,2,3],xmm1[4],xmm5[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1,2,3],xmm3[4],xmm5[5,6,7]
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1,2,3],xmm0[4],xmm5[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_cmp_v8i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,2,4,6,4,6]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
+; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,0,2,4,6,4,6]
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,3,2,3]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %1 = icmp sgt <8 x i64> %a0, %a1
+ ret <8 x i1> %1
+}
+
+define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
+; SSE-LABEL: test_cmp_v16i32:
+; SSE: # BB#0:
+; SSE-NEXT: pcmpgtd %xmm7, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: pand %xmm7, %xmm3
+; SSE-NEXT: pcmpgtd %xmm6, %xmm2
+; SSE-NEXT: pand %xmm7, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pcmpgtd %xmm5, %xmm1
+; SSE-NEXT: pand %xmm7, %xmm1
+; SSE-NEXT: pcmpgtd %xmm4, %xmm0
+; SSE-NEXT: pand %xmm7, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_cmp_v16i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpcmpgtd %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpcmpgtd %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_cmp_v16i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128,0,1,4,5,8,9,12,13,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %1 = icmp sgt <16 x i32> %a0, %a1
+ ret <16 x i1> %1
+}
+
+define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
+; SSE2-LABEL: test_cmp_v32i16:
+; SSE2: # BB#0:
+; SSE2-NEXT: pcmpgtw %xmm5, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: pcmpgtw %xmm4, %xmm0
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: pcmpgtw %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: pcmpgtw %xmm6, %xmm2
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movq %rdi, %rax
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: test_cmp_v32i16:
+; SSE42: # BB#0:
+; SSE42-NEXT: pcmpgtw %xmm5, %xmm1
+; SSE42-NEXT: movdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE42-NEXT: pshufb %xmm5, %xmm1
+; SSE42-NEXT: pcmpgtw %xmm4, %xmm0
+; SSE42-NEXT: pshufb %xmm5, %xmm0
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE42-NEXT: pcmpgtw %xmm7, %xmm3
+; SSE42-NEXT: pshufb %xmm5, %xmm3
+; SSE42-NEXT: pcmpgtw %xmm6, %xmm2
+; SSE42-NEXT: pshufb %xmm5, %xmm2
+; SSE42-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE42-NEXT: pextrb $15, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $14, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $13, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $12, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $11, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $10, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $9, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $8, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $7, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $6, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $5, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $4, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $3, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $2, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $1, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $0, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $15, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $14, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $13, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $12, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $11, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $10, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $9, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $8, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $7, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $6, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $5, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $4, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $3, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $2, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $1, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $0, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: movq %rdi, %rax
+; SSE42-NEXT: retq
+;
+; AVX1-LABEL: test_cmp_v32i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpcmpgtw %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpcmpgtw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpshufb %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_cmp_v32i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm3
+; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %1 = icmp sgt <32 x i16> %a0, %a1
+ ret <32 x i1> %1
+}
+
+define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
+; SSE2-LABEL: test_cmp_v64i8:
+; SSE2: # BB#0:
+; SSE2-NEXT: pcmpgtb %xmm4, %xmm0
+; SSE2-NEXT: pcmpgtb %xmm5, %xmm1
+; SSE2-NEXT: pcmpgtb %xmm6, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm7, %xmm3
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 6(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 4(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, 2(%rdi)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: andb $1, %al
+; SSE2-NEXT: movb %al, (%rdi)
+; SSE2-NEXT: movq %rdi, %rax
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: test_cmp_v64i8:
+; SSE42: # BB#0:
+; SSE42-NEXT: pcmpgtb %xmm4, %xmm0
+; SSE42-NEXT: pcmpgtb %xmm5, %xmm1
+; SSE42-NEXT: pcmpgtb %xmm6, %xmm2
+; SSE42-NEXT: pcmpgtb %xmm7, %xmm3
+; SSE42-NEXT: pextrb $15, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $14, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $13, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $12, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $11, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $10, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $9, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $8, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $7, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $6, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $5, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $4, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $3, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $2, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $1, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $0, %xmm3, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 6(%rdi)
+; SSE42-NEXT: pextrb $15, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $14, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $13, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $12, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $11, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $10, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $9, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $8, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $7, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $6, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $5, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $4, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $3, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $2, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $1, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $0, %xmm2, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 4(%rdi)
+; SSE42-NEXT: pextrb $15, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $14, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $13, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $12, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $11, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $10, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $9, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $8, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $7, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $6, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $5, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $4, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $3, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $2, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $1, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $0, %xmm1, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, 2(%rdi)
+; SSE42-NEXT: pextrb $15, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $14, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $13, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $12, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $11, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $10, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $9, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $8, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $7, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $6, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $5, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $4, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $3, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $2, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $1, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: pextrb $0, %xmm0, %eax
+; SSE42-NEXT: andb $1, %al
+; SSE42-NEXT: movb %al, (%rdi)
+; SSE42-NEXT: movq %rdi, %rax
+; SSE42-NEXT: retq
+;
+; AVX1-LABEL: test_cmp_v64i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpextrb $15, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $14, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $13, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $12, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $11, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $10, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $9, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $8, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $7, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $6, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $5, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $4, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $3, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $2, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $1, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $0, %xmm1, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $15, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $14, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $13, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $12, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $11, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $10, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $9, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $8, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $7, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $6, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $5, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $4, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $3, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $2, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $1, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $0, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, 4(%rdi)
+; AVX1-NEXT: vpextrb $15, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $14, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $13, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $12, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $11, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $10, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $9, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $8, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $7, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $6, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $5, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $4, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $3, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $2, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $1, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $15, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $14, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $13, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $12, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $11, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $10, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $9, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $8, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $7, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $6, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $5, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $4, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $3, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $2, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $1, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: vpextrb $0, %xmm4, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, (%rdi)
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_cmp_v64i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vpextrb $15, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $14, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $13, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $12, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $11, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $10, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $9, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $8, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $7, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $6, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $5, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $4, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $3, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $2, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $1, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $0, %xmm2, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $15, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $14, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $13, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $12, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $11, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $10, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $9, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $8, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $7, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $6, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $5, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $4, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $3, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $2, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $1, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vpextrb $0, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, 4(%rdi)
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrb $15, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $14, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $13, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $12, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $11, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $10, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $9, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $8, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $7, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $6, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $5, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $4, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $3, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $2, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $1, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $0, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $15, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $14, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $13, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $12, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $11, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $10, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $9, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $8, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $7, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $6, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $5, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $4, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $3, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $2, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $1, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rdi)
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %1 = icmp sgt <64 x i8> %a0, %a1
+ ret <64 x i1> %1
+}
More information about the llvm-commits
mailing list