[llvm] r304171 - [X86] Add tests for (ix bitcast (vxi1 and ...)). NFC.

Zvi Rackover via llvm-commits llvm-commits at lists.llvm.org
Mon May 29 12:00:57 PDT 2017


Author: zvi
Date: Mon May 29 14:00:57 2017
New Revision: 304171

URL: http://llvm.org/viewvc/llvm-project?rev=304171&view=rev
Log:
[X86] Add tests for (ix bitcast (vxi1 and ...)). NFC.

To be improved by D33311.

Added:
    llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-128.ll   (with props)
    llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-256.ll   (with props)

Added: llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-128.ll?rev=304171&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-128.ll (added)
+++ llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-128.ll Mon May 29 14:00:57 2017
@@ -0,0 +1,1155 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+ssse3 < %s | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx < %s | FileCheck %s --check-prefixes=AVX12,AVX1
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefixes=AVX12,AVX2
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefixes=AVX512
+
+define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
+; SSE2-SSSE3-LABEL: v8i16:
+; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3-NEXT:    pcmpgtw %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtw %xmm3, %xmm2
+; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm2
+; SSE2-SSSE3-NEXT:    pextrw $7, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pextrw $6, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pextrw $5, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pextrw $4, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pextrw $3, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pextrw $2, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pextrw $1, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movd %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    retq
+;
+; AVX12-LABEL: v8i16:
+; AVX12:       ## BB#0:
+; AVX12-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpcmpgtw %xmm3, %xmm2, %xmm1
+; AVX12-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpextrw $7, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrw $6, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrw $4, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vmovd %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX12-NEXT:    retq
+;
+; AVX512-LABEL: v8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpcmpgtw %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpgtw %xmm3, %xmm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <8 x i16> %a, %b
+  %x1 = icmp sgt <8 x i16> %c, %d
+  %y = and <8 x i1> %x0, %x1
+  %res = bitcast <8 x i1> %y to i8
+  ret i8 %res
+}
+
+define i4 @v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
+; SSE2-SSSE3-LABEL: v4i32:
+; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm2
+; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm2
+; SSE2-SSSE3-NEXT:    movd %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3]
+; SSE2-SSSE3-NEXT:    movd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
+; SSE2-SSSE3-NEXT:    movd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
+; SSE2-SSSE3-NEXT:    movd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    retq
+;
+; AVX12-LABEL: v4i32:
+; AVX12:       ## BB#0:
+; AVX12-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpcmpgtd %xmm3, %xmm2, %xmm1
+; AVX12-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vmovd %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX12-NEXT:    retq
+;
+; AVX512-LABEL: v4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpgtd %xmm3, %xmm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <4 x i32> %a, %b
+  %x1 = icmp sgt <4 x i32> %c, %d
+  %y = and <4 x i1> %x0, %x1
+  %res = bitcast <4 x i1> %y to i4
+  ret i4 %res
+}
+
+define i4 @v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d) {
+; SSE2-SSSE3-LABEL: v4f32:
+; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3-NEXT:    cmpltps %xmm0, %xmm1
+; SSE2-SSSE3-NEXT:    cmpltps %xmm2, %xmm3
+; SSE2-SSSE3-NEXT:    andps %xmm1, %xmm3
+; SSE2-SSSE3-NEXT:    movd %xmm3, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[3,1,2,3]
+; SSE2-SSSE3-NEXT:    movd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
+; SSE2-SSSE3-NEXT:    movd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
+; SSE2-SSSE3-NEXT:    movd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    retq
+;
+; AVX12-LABEL: v4f32:
+; AVX12:       ## BB#0:
+; AVX12-NEXT:    vcmpltps %xmm0, %xmm1, %xmm0
+; AVX12-NEXT:    vcmpltps %xmm2, %xmm3, %xmm1
+; AVX12-NEXT:    vandps %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vmovd %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX12-NEXT:    retq
+;
+; AVX512-LABEL: v4f32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vcmpltps %xmm0, %xmm1, %k1
+; AVX512-NEXT:    vcmpltps %xmm2, %xmm3, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    retq
+  %x0 = fcmp ogt <4 x float> %a, %b
+  %x1 = fcmp ogt <4 x float> %c, %d
+  %y = and <4 x i1> %x0, %x1
+  %res = bitcast <4 x i1> %y to i4
+  ret i4 %res
+}
+
+define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
+; SSE2-SSSE3-LABEL: v16i8:
+; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3-NEXT:    pcmpgtb %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtb %xmm3, %xmm2
+; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm2
+; SSE2-SSSE3-NEXT:    movdqa %xmm2, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-SSSE3-NEXT:    andb $1, %cl
+; SSE2-SSSE3-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    andb $1, %al
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT:    retq
+;
+; AVX12-LABEL: v16i8:
+; AVX12:       ## BB#0:
+; AVX12-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpcmpgtb %xmm3, %xmm2, %xmm1
+; AVX12-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpextrb $15, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $14, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $13, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $12, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $11, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $10, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $9, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $8, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $7, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $6, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $5, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $4, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $3, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $2, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $1, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX12-NEXT:    andb $1, %al
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX12-NEXT:    retq
+;
+; AVX512-LABEL: v16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpcmpgtb %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpgtb %xmm3, %xmm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <16 x i8> %a, %b
+  %x1 = icmp sgt <16 x i8> %c, %d
+  %y = and <16 x i1> %x0, %x1
+  %res = bitcast <16 x i1> %y to i16
+  ret i16 %res
+}
+
+define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
+; SSE2-SSSE3-LABEL: v2i8:
+; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3-NEXT:    psllq $56, %xmm2
+; SSE2-SSSE3-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-SSSE3-NEXT:    psrad $31, %xmm4
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
+; SSE2-SSSE3-NEXT:    psrad $24, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-SSSE3-NEXT:    psllq $56, %xmm3
+; SSE2-SSSE3-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-SSSE3-NEXT:    psrad $31, %xmm4
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
+; SSE2-SSSE3-NEXT:    psrad $24, %xmm3
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
+; SSE2-SSSE3-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSE2-SSSE3-NEXT:    psllq $56, %xmm0
+; SSE2-SSSE3-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-SSSE3-NEXT:    psrad $31, %xmm4
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
+; SSE2-SSSE3-NEXT:    psrad $24, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; SSE2-SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-SSSE3-NEXT:    psllq $56, %xmm1
+; SSE2-SSSE3-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-SSSE3-NEXT:    psrad $31, %xmm4
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
+; SSE2-SSSE3-NEXT:    psrad $24, %xmm1
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; SSE2-SSSE3-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
+; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm1
+; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm0
+; SSE2-SSSE3-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm5
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pand %xmm6, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
+; SSE2-SSSE3-NEXT:    por %xmm0, %xmm1
+; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm3
+; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm2
+; SSE2-SSSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm3, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pand %xmm4, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT:    por %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    movq %xmm0, %rax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-SSSE3-NEXT:    movq %xmm0, %rax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: v2i8:
+; AVX1:       ## BB#0:
+; AVX1-NEXT:    vpsllq $56, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrad $31, %xmm3, %xmm4
+; AVX1-NEXT:    vpsrad $24, %xmm3, %xmm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpsllq $56, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrad $31, %xmm2, %xmm4
+; AVX1-NEXT:    vpsrad $24, %xmm2, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpsllq $56, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm4
+; AVX1-NEXT:    vpsrad $24, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpsllq $56, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm4
+; AVX1-NEXT:    vpsrad $24, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm1
+; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vmovq %xmm0, %rax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i8:
+; AVX2:       ## BB#0:
+; AVX2-NEXT:    vpsllq $56, %xmm3, %xmm3
+; AVX2-NEXT:    vpsrad $31, %xmm3, %xmm4
+; AVX2-NEXT:    vpsrad $24, %xmm3, %xmm3
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
+; AVX2-NEXT:    vpsllq $56, %xmm2, %xmm2
+; AVX2-NEXT:    vpsrad $31, %xmm2, %xmm4
+; AVX2-NEXT:    vpsrad $24, %xmm2, %xmm2
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3]
+; AVX2-NEXT:    vpsllq $56, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrad $31, %xmm1, %xmm4
+; AVX2-NEXT:    vpsrad $24, %xmm1, %xmm1
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3]
+; AVX2-NEXT:    vpsllq $56, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrad $31, %xmm0, %xmm4
+; AVX2-NEXT:    vpsrad $24, %xmm0, %xmm0
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3]
+; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm1
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovq %xmm0, %rax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllq $56, %xmm3, %xmm3
+; AVX512-NEXT:    vpsraq $56, %xmm3, %xmm3
+; AVX512-NEXT:    vpsllq $56, %xmm2, %xmm2
+; AVX512-NEXT:    vpsraq $56, %xmm2, %xmm2
+; AVX512-NEXT:    vpsllq $56, %xmm1, %xmm1
+; AVX512-NEXT:    vpsraq $56, %xmm1, %xmm1
+; AVX512-NEXT:    vpsllq $56, %xmm0, %xmm0
+; AVX512-NEXT:    vpsraq $56, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpgtq %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <2 x i8> %a, %b
+  %x1 = icmp sgt <2 x i8> %c, %d
+  %y = and <2 x i1> %x0, %x1
+  %res = bitcast <2 x i1> %y to i2
+  ret i2 %res
+}
+
+define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
+; SSE2-SSSE3-LABEL: v2i16:
+; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3-NEXT:    psllq $48, %xmm2
+; SSE2-SSSE3-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-SSSE3-NEXT:    psrad $31, %xmm4
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
+; SSE2-SSSE3-NEXT:    psrad $16, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-SSSE3-NEXT:    psllq $48, %xmm3
+; SSE2-SSSE3-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-SSSE3-NEXT:    psrad $31, %xmm4
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
+; SSE2-SSSE3-NEXT:    psrad $16, %xmm3
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
+; SSE2-SSSE3-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSE2-SSSE3-NEXT:    psllq $48, %xmm0
+; SSE2-SSSE3-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-SSSE3-NEXT:    psrad $31, %xmm4
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
+; SSE2-SSSE3-NEXT:    psrad $16, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; SSE2-SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-SSSE3-NEXT:    psllq $48, %xmm1
+; SSE2-SSSE3-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-SSSE3-NEXT:    psrad $31, %xmm4
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
+; SSE2-SSSE3-NEXT:    psrad $16, %xmm1
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-SSSE3-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; SSE2-SSSE3-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
+; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm1
+; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm0
+; SSE2-SSSE3-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm5
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pand %xmm6, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
+; SSE2-SSSE3-NEXT:    por %xmm0, %xmm1
+; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm3
+; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm2
+; SSE2-SSSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm3, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pand %xmm4, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT:    por %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    movq %xmm0, %rax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-SSSE3-NEXT:    movq %xmm0, %rax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: v2i16:
+; AVX1:       ## BB#0:
+; AVX1-NEXT:    vpsllq $48, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrad $31, %xmm3, %xmm4
+; AVX1-NEXT:    vpsrad $16, %xmm3, %xmm3
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpsllq $48, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrad $31, %xmm2, %xmm4
+; AVX1-NEXT:    vpsrad $16, %xmm2, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpsllq $48, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm4
+; AVX1-NEXT:    vpsrad $16, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpsllq $48, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm4
+; AVX1-NEXT:    vpsrad $16, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm1
+; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vmovq %xmm0, %rax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i16:
+; AVX2:       ## BB#0:
+; AVX2-NEXT:    vpsllq $48, %xmm3, %xmm3
+; AVX2-NEXT:    vpsrad $31, %xmm3, %xmm4
+; AVX2-NEXT:    vpsrad $16, %xmm3, %xmm3
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
+; AVX2-NEXT:    vpsllq $48, %xmm2, %xmm2
+; AVX2-NEXT:    vpsrad $31, %xmm2, %xmm4
+; AVX2-NEXT:    vpsrad $16, %xmm2, %xmm2
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3]
+; AVX2-NEXT:    vpsllq $48, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrad $31, %xmm1, %xmm4
+; AVX2-NEXT:    vpsrad $16, %xmm1, %xmm1
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3]
+; AVX2-NEXT:    vpsllq $48, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrad $31, %xmm0, %xmm4
+; AVX2-NEXT:    vpsrad $16, %xmm0, %xmm0
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3]
+; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm1
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovq %xmm0, %rax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllq $48, %xmm3, %xmm3
+; AVX512-NEXT:    vpsraq $48, %xmm3, %xmm3
+; AVX512-NEXT:    vpsllq $48, %xmm2, %xmm2
+; AVX512-NEXT:    vpsraq $48, %xmm2, %xmm2
+; AVX512-NEXT:    vpsllq $48, %xmm1, %xmm1
+; AVX512-NEXT:    vpsraq $48, %xmm1, %xmm1
+; AVX512-NEXT:    vpsllq $48, %xmm0, %xmm0
+; AVX512-NEXT:    vpsraq $48, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpgtq %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <2 x i16> %a, %b
+  %x1 = icmp sgt <2 x i16> %c, %d
+  %y = and <2 x i1> %x0, %x1
+  %res = bitcast <2 x i1> %y to i2
+  ret i2 %res
+}
+
+define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
+; SSE2-SSSE3-LABEL: v2i32:
+; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3-NEXT:    psllq $32, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[1,3,2,3]
+; SSE2-SSSE3-NEXT:    psrad $31, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-SSSE3-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSE2-SSSE3-NEXT:    psllq $32, %xmm3
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,3,2,3]
+; SSE2-SSSE3-NEXT:    psrad $31, %xmm3
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
+; SSE2-SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE2-SSSE3-NEXT:    psllq $32, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; SSE2-SSSE3-NEXT:    psrad $31, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; SSE2-SSSE3-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; SSE2-SSSE3-NEXT:    psllq $32, %xmm1
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; SSE2-SSSE3-NEXT:    psrad $31, %xmm1
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-SSSE3-NEXT:    movdqa {{.*#+}} xmm1 = [2147483648,0,2147483648,0]
+; SSE2-SSSE3-NEXT:    pxor %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pxor %xmm1, %xmm3
+; SSE2-SSSE3-NEXT:    movdqa %xmm3, %xmm5
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm0, %xmm5
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm0, %xmm3
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pand %xmm6, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
+; SSE2-SSSE3-NEXT:    por %xmm0, %xmm3
+; SSE2-SSSE3-NEXT:    pxor %xmm1, %xmm2
+; SSE2-SSSE3-NEXT:    pxor %xmm1, %xmm4
+; SSE2-SSSE3-NEXT:    movdqa %xmm4, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm2, %xmm4
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT:    por %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    pand %xmm3, %xmm0
+; SSE2-SSSE3-NEXT:    movq %xmm0, %rax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-SSSE3-NEXT:    movq %xmm0, %rax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: v2i32:
+; AVX1:       ## BB#0:
+; AVX1-NEXT:    vpsllq $32, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrad $31, %xmm3, %xmm4
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrad $31, %xmm2, %xmm4
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm4
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm4
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm1
+; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vmovq %xmm0, %rax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: v2i32:
+; AVX2:       ## BB#0:
+; AVX2-NEXT:    vpsllq $32, %xmm3, %xmm3
+; AVX2-NEXT:    vpsrad $31, %xmm3, %xmm4
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
+; AVX2-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX2-NEXT:    vpsrad $31, %xmm2, %xmm4
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3]
+; AVX2-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrad $31, %xmm1, %xmm4
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3]
+; AVX2-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrad $31, %xmm0, %xmm4
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3]
+; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm1
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovq %xmm0, %rax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v2i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllq $32, %xmm3, %xmm3
+; AVX512-NEXT:    vpsraq $32, %xmm3, %xmm3
+; AVX512-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512-NEXT:    vpsraq $32, %xmm2, %xmm2
+; AVX512-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX512-NEXT:    vpsraq $32, %xmm1, %xmm1
+; AVX512-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX512-NEXT:    vpsraq $32, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpgtq %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <2 x i32> %a, %b
+  %x1 = icmp sgt <2 x i32> %c, %d
+  %y = and <2 x i1> %x0, %x1
+  %res = bitcast <2 x i1> %y to i2
+  ret i2 %res
+}
+
+define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) {
+; SSE2-SSSE3-LABEL: v2i64:
+; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,0,2147483648,0]
+; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm1
+; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm0
+; SSE2-SSSE3-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm5
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pand %xmm6, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
+; SSE2-SSSE3-NEXT:    por %xmm0, %xmm1
+; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm3
+; SSE2-SSSE3-NEXT:    pxor %xmm4, %xmm2
+; SSE2-SSSE3-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm0
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT:    pcmpeqd %xmm3, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-SSSE3-NEXT:    pand %xmm4, %xmm2
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT:    por %xmm2, %xmm0
+; SSE2-SSSE3-NEXT:    pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    movq %xmm0, %rax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-SSSE3-NEXT:    movq %xmm0, %rax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    retq
+;
+; AVX12-LABEL: v2i64:
+; AVX12:       ## BB#0:
+; AVX12-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm1
+; AVX12-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vmovq %xmm0, %rax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX12-NEXT:    retq
+;
+; AVX512-LABEL: v2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpcmpgtq %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpgtq %xmm3, %xmm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <2 x i64> %a, %b
+  %x1 = icmp sgt <2 x i64> %c, %d
+  %y = and <2 x i1> %x0, %x1
+  %res = bitcast <2 x i1> %y to i2
+  ret i2 %res
+}
+
+define i2 @v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %d) {
+; SSE2-SSSE3-LABEL: v2f64:
+; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3-NEXT:    cmpltpd %xmm0, %xmm1
+; SSE2-SSSE3-NEXT:    cmpltpd %xmm2, %xmm3
+; SSE2-SSSE3-NEXT:    andpd %xmm1, %xmm3
+; SSE2-SSSE3-NEXT:    movq %xmm3, %rax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
+; SSE2-SSSE3-NEXT:    movq %xmm0, %rax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    retq
+;
+; AVX12-LABEL: v2f64:
+; AVX12:       ## BB#0:
+; AVX12-NEXT:    vcmpltpd %xmm0, %xmm1, %xmm0
+; AVX12-NEXT:    vcmpltpd %xmm2, %xmm3, %xmm1
+; AVX12-NEXT:    vandpd %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vmovq %xmm0, %rax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX12-NEXT:    retq
+;
+; AVX512-LABEL: v2f64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vcmpltpd %xmm0, %xmm1, %k1
+; AVX512-NEXT:    vcmpltpd %xmm2, %xmm3, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    retq
+  %x0 = fcmp ogt <2 x double> %a, %b
+  %x1 = fcmp ogt <2 x double> %c, %d
+  %y = and <2 x i1> %x0, %x1
+  %res = bitcast <2 x i1> %y to i2
+  ret i2 %res
+}
+
+define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
+; SSE2-SSSE3-LABEL: v4i8:
+; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3-NEXT:    pslld $24, %xmm3
+; SSE2-SSSE3-NEXT:    psrad $24, %xmm3
+; SSE2-SSSE3-NEXT:    pslld $24, %xmm2
+; SSE2-SSSE3-NEXT:    psrad $24, %xmm2
+; SSE2-SSSE3-NEXT:    pslld $24, %xmm1
+; SSE2-SSSE3-NEXT:    psrad $24, %xmm1
+; SSE2-SSSE3-NEXT:    pslld $24, %xmm0
+; SSE2-SSSE3-NEXT:    psrad $24, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm2
+; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm2
+; SSE2-SSSE3-NEXT:    movd %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3]
+; SSE2-SSSE3-NEXT:    movd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
+; SSE2-SSSE3-NEXT:    movd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
+; SSE2-SSSE3-NEXT:    movd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    retq
+;
+; AVX12-LABEL: v4i8:
+; AVX12:       ## BB#0:
+; AVX12-NEXT:    vpslld $24, %xmm3, %xmm3
+; AVX12-NEXT:    vpsrad $24, %xmm3, %xmm3
+; AVX12-NEXT:    vpslld $24, %xmm2, %xmm2
+; AVX12-NEXT:    vpsrad $24, %xmm2, %xmm2
+; AVX12-NEXT:    vpslld $24, %xmm1, %xmm1
+; AVX12-NEXT:    vpsrad $24, %xmm1, %xmm1
+; AVX12-NEXT:    vpslld $24, %xmm0, %xmm0
+; AVX12-NEXT:    vpsrad $24, %xmm0, %xmm0
+; AVX12-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpcmpgtd %xmm3, %xmm2, %xmm1
+; AVX12-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vmovd %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX12-NEXT:    retq
+;
+; AVX512-LABEL: v4i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpslld $24, %xmm3, %xmm3
+; AVX512-NEXT:    vpsrad $24, %xmm3, %xmm3
+; AVX512-NEXT:    vpslld $24, %xmm2, %xmm2
+; AVX512-NEXT:    vpsrad $24, %xmm2, %xmm2
+; AVX512-NEXT:    vpslld $24, %xmm1, %xmm1
+; AVX512-NEXT:    vpsrad $24, %xmm1, %xmm1
+; AVX512-NEXT:    vpslld $24, %xmm0, %xmm0
+; AVX512-NEXT:    vpsrad $24, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpgtd %xmm3, %xmm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <4 x i8> %a, %b
+  %x1 = icmp sgt <4 x i8> %c, %d
+  %y = and <4 x i1> %x0, %x1
+  %res = bitcast <4 x i1> %y to i4
+  ret i4 %res
+}
+
+define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) {
+; SSE2-SSSE3-LABEL: v4i16:
+; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3-NEXT:    pslld $16, %xmm3
+; SSE2-SSSE3-NEXT:    psrad $16, %xmm3
+; SSE2-SSSE3-NEXT:    pslld $16, %xmm2
+; SSE2-SSSE3-NEXT:    psrad $16, %xmm2
+; SSE2-SSSE3-NEXT:    pslld $16, %xmm1
+; SSE2-SSSE3-NEXT:    psrad $16, %xmm1
+; SSE2-SSSE3-NEXT:    pslld $16, %xmm0
+; SSE2-SSSE3-NEXT:    psrad $16, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtd %xmm3, %xmm2
+; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm2
+; SSE2-SSSE3-NEXT:    movd %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[3,1,2,3]
+; SSE2-SSSE3-NEXT:    movd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
+; SSE2-SSSE3-NEXT:    movd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[1,1,2,3]
+; SSE2-SSSE3-NEXT:    movd %xmm0, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    retq
+;
+; AVX12-LABEL: v4i16:
+; AVX12:       ## BB#0:
+; AVX12-NEXT:    vpslld $16, %xmm3, %xmm3
+; AVX12-NEXT:    vpsrad $16, %xmm3, %xmm3
+; AVX12-NEXT:    vpslld $16, %xmm2, %xmm2
+; AVX12-NEXT:    vpsrad $16, %xmm2, %xmm2
+; AVX12-NEXT:    vpslld $16, %xmm1, %xmm1
+; AVX12-NEXT:    vpsrad $16, %xmm1, %xmm1
+; AVX12-NEXT:    vpslld $16, %xmm0, %xmm0
+; AVX12-NEXT:    vpsrad $16, %xmm0, %xmm0
+; AVX12-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpcmpgtd %xmm3, %xmm2, %xmm1
+; AVX12-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vmovd %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX12-NEXT:    retq
+;
+; AVX512-LABEL: v4i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpslld $16, %xmm3, %xmm3
+; AVX512-NEXT:    vpsrad $16, %xmm3, %xmm3
+; AVX512-NEXT:    vpslld $16, %xmm2, %xmm2
+; AVX512-NEXT:    vpsrad $16, %xmm2, %xmm2
+; AVX512-NEXT:    vpslld $16, %xmm1, %xmm1
+; AVX512-NEXT:    vpsrad $16, %xmm1, %xmm1
+; AVX512-NEXT:    vpslld $16, %xmm0, %xmm0
+; AVX512-NEXT:    vpsrad $16, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpgtd %xmm3, %xmm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <4 x i16> %a, %b
+  %x1 = icmp sgt <4 x i16> %c, %d
+  %y = and <4 x i1> %x0, %x1
+  %res = bitcast <4 x i1> %y to i4
+  ret i4 %res
+}
+
+define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
+; SSE2-SSSE3-LABEL: v8i8:
+; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3-NEXT:    psllw $8, %xmm3
+; SSE2-SSSE3-NEXT:    psraw $8, %xmm3
+; SSE2-SSSE3-NEXT:    psllw $8, %xmm2
+; SSE2-SSSE3-NEXT:    psraw $8, %xmm2
+; SSE2-SSSE3-NEXT:    psllw $8, %xmm1
+; SSE2-SSSE3-NEXT:    psraw $8, %xmm1
+; SSE2-SSSE3-NEXT:    psllw $8, %xmm0
+; SSE2-SSSE3-NEXT:    psraw $8, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtw %xmm1, %xmm0
+; SSE2-SSSE3-NEXT:    pcmpgtw %xmm3, %xmm2
+; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm2
+; SSE2-SSSE3-NEXT:    pextrw $7, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pextrw $6, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pextrw $5, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pextrw $4, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pextrw $3, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pextrw $2, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    pextrw $1, %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movd %xmm2, %eax
+; SSE2-SSSE3-NEXT:    andl $1, %eax
+; SSE2-SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT:    retq
+;
+; AVX12-LABEL: v8i8:
+; AVX12:       ## BB#0:
+; AVX12-NEXT:    vpsllw $8, %xmm3, %xmm3
+; AVX12-NEXT:    vpsraw $8, %xmm3, %xmm3
+; AVX12-NEXT:    vpsllw $8, %xmm2, %xmm2
+; AVX12-NEXT:    vpsraw $8, %xmm2, %xmm2
+; AVX12-NEXT:    vpsllw $8, %xmm1, %xmm1
+; AVX12-NEXT:    vpsraw $8, %xmm1, %xmm1
+; AVX12-NEXT:    vpsllw $8, %xmm0, %xmm0
+; AVX12-NEXT:    vpsraw $8, %xmm0, %xmm0
+; AVX12-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpcmpgtw %xmm3, %xmm2, %xmm1
+; AVX12-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX12-NEXT:    vpextrw $7, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrw $6, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrw $4, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    vmovd %xmm0, %eax
+; AVX12-NEXT:    andl $1, %eax
+; AVX12-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX12-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX12-NEXT:    retq
+;
+; AVX512-LABEL: v8i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllw $8, %xmm3, %xmm3
+; AVX512-NEXT:    vpsraw $8, %xmm3, %xmm3
+; AVX512-NEXT:    vpsllw $8, %xmm2, %xmm2
+; AVX512-NEXT:    vpsraw $8, %xmm2, %xmm2
+; AVX512-NEXT:    vpsllw $8, %xmm1, %xmm1
+; AVX512-NEXT:    vpsraw $8, %xmm1, %xmm1
+; AVX512-NEXT:    vpsllw $8, %xmm0, %xmm0
+; AVX512-NEXT:    vpsraw $8, %xmm0, %xmm0
+; AVX512-NEXT:    vpcmpgtw %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpgtw %xmm3, %xmm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <8 x i8> %a, %b
+  %x1 = icmp sgt <8 x i8> %c, %d
+  %y = and <8 x i1> %x0, %x1
+  %res = bitcast <8 x i1> %y to i8
+  ret i8 %res
+}

Propchange: llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-128.ll
------------------------------------------------------------------------------
    svn:eol-style = native

Added: llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-256.ll?rev=304171&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-256.ll (added)
+++ llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-256.ll Mon May 29 14:00:57 2017
@@ -0,0 +1,403 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX2
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefix=AVX512
+
+define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
+; AVX2-LABEL: v4i64:
+; AVX2:       ## BB#0:
+; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm2, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpcmpgtq %ymm1, %ymm0, %k1
+; AVX512-NEXT:    vpcmpgtq %ymm3, %ymm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <4 x i64> %a, %b
+  %x1 = icmp sgt <4 x i64> %c, %d
+  %y = and <4 x i1> %x0, %x1
+  %res = bitcast <4 x i1> %y to i4
+  ret i4 %res
+}
+
+define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double> %d) {
+; AVX2-LABEL: v4f64:
+; AVX2:       ## BB#0:
+; AVX2-NEXT:    vcmpltpd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vcmpltpd %ymm2, %ymm3, %ymm1
+; AVX2-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v4f64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vcmpltpd %ymm0, %ymm1, %k1
+; AVX512-NEXT:    vcmpltpd %ymm2, %ymm3, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x0 = fcmp ogt <4 x double> %a, %b
+  %x1 = fcmp ogt <4 x double> %c, %d
+  %y = and <4 x i1> %x0, %x1
+  %res = bitcast <4 x i1> %y to i4
+  ret i4 %res
+}
+
+define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
+; AVX2-LABEL: v16i16:
+; AVX2:       ## BB#0:
+; AVX2-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpgtw %ymm3, %ymm2, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrb $15, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $14, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $13, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $12, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $11, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $10, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $9, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $8, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $7, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $6, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $5, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $4, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $3, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $2, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $1, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpcmpgtw %ymm1, %ymm0, %k1
+; AVX512-NEXT:    vpcmpgtw %ymm3, %ymm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <16 x i16> %a, %b
+  %x1 = icmp sgt <16 x i16> %c, %d
+  %y = and <16 x i1> %x0, %x1
+  %res = bitcast <16 x i1> %y to i16
+  ret i16 %res
+}
+
+define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
+; AVX2-LABEL: v8i32:
+; AVX2:       ## BB#0:
+; AVX2-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpgtd %ymm3, %ymm2, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrw $7, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $6, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $4, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpcmpgtd %ymm1, %ymm0, %k1
+; AVX512-NEXT:    vpcmpgtd %ymm3, %ymm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <8 x i32> %a, %b
+  %x1 = icmp sgt <8 x i32> %c, %d
+  %y = and <8 x i1> %x0, %x1
+  %res = bitcast <8 x i1> %y to i8
+  ret i8 %res
+}
+
+define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) {
+; AVX2-LABEL: v8f32:
+; AVX2:       ## BB#0:
+; AVX2-NEXT:    vcmpltps %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vcmpltps %ymm2, %ymm3, %ymm1
+; AVX2-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrw $7, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $6, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $4, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v8f32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vcmpltps %ymm0, %ymm1, %k1
+; AVX512-NEXT:    vcmpltps %ymm2, %ymm3, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x0 = fcmp ogt <8 x float> %a, %b
+  %x1 = fcmp ogt <8 x float> %c, %d
+  %y = and <8 x i1> %x0, %x1
+  %res = bitcast <8 x i1> %y to i8
+  ret i8 %res
+}
+
+define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
+; AVX2-LABEL: v32i8:
+; AVX2:       ## BB#0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:  Lcfi0:
+; AVX2-NEXT:    .cfi_def_cfa_offset 16
+; AVX2-NEXT:  Lcfi1:
+; AVX2-NEXT:    .cfi_offset %rbp, -16
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:  Lcfi2:
+; AVX2-NEXT:    .cfi_def_cfa_register %rbp
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $32, %rsp
+; AVX2-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpcmpgtb %ymm3, %ymm2, %ymm1
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpextrb $15, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $14, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $13, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $12, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $11, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $10, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $9, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $8, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $7, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $6, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $5, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $4, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $3, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $2, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $1, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $0, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $15, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $14, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $13, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $12, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $11, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $10, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $9, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $8, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $7, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $6, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $5, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $4, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $3, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $2, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $1, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    movl (%rsp), %eax
+; AVX2-NEXT:    movq %rbp, %rsp
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpcmpgtb %ymm1, %ymm0, %k1
+; AVX512-NEXT:    vpcmpgtb %ymm3, %ymm2, %k0 {%k1}
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x0 = icmp sgt <32 x i8> %a, %b
+  %x1 = icmp sgt <32 x i8> %c, %d
+  %y = and <32 x i1> %x0, %x1
+  %res = bitcast <32 x i1> %y to i32
+  ret i32 %res
+}

Propchange: llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-256.ll
------------------------------------------------------------------------------
    svn:eol-style = native




More information about the llvm-commits mailing list