[llvm] r303328 - [X86] Adding tests for scalar bitcasts from vsetcc. NFC.

Zvi Rackover via llvm-commits llvm-commits at lists.llvm.org
Thu May 18 00:04:48 PDT 2017


Author: zvi
Date: Thu May 18 02:04:48 2017
New Revision: 303328

URL: http://llvm.org/viewvc/llvm-project?rev=303328&view=rev
Log:
[X86] Adding tests for scalar bitcasts from vsetcc. NFC.

Added:
    llvm/trunk/test/CodeGen/X86/bitcast-setcc-128.ll   (with props)
    llvm/trunk/test/CodeGen/X86/bitcast-setcc-256.ll   (with props)

Added: llvm/trunk/test/CodeGen/X86/bitcast-setcc-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bitcast-setcc-128.ll?rev=303328&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bitcast-setcc-128.ll (added)
+++ llvm/trunk/test/CodeGen/X86/bitcast-setcc-128.ll Thu May 18 02:04:48 2017
@@ -0,0 +1,553 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: llc -mcpu=x86-64 -mattr=+ssse3 < %s | FileCheck %s --check-prefixes=CHECK,SSSE3
+; RUN: llc -mcpu=x86-64 -mattr=+avx < %s | FileCheck %s --check-prefixes=CHECK,AVX1
+; RUN: llc -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefixes=CHECK,AVX512
+
+define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) {
+; SSE2-LABEL: v8i16:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    pcmpgtw %xmm1, %xmm0
+; SSE2-NEXT:    pextrw $7, %xmm0, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    pextrw $6, %xmm0, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    pextrw $5, %xmm0, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    pextrw $4, %xmm0, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    pextrw $3, %xmm0, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    pextrw $2, %xmm0, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    pextrw $1, %xmm0, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v8i16:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    pcmpgtw %xmm1, %xmm0
+; SSSE3-NEXT:    pextrw $7, %xmm0, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    pextrw $6, %xmm0, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    pextrw $5, %xmm0, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    pextrw $4, %xmm0, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    pextrw $3, %xmm0, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    pextrw $2, %xmm0, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    pextrw $1, %xmm0, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movd %xmm0, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: v8i16:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpextrw $7, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrw $6, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrw $4, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: v8i16:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpcmpgtw %xmm1, %xmm0, %k0
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT:    retq
+  %x = icmp sgt <8 x i16> %a, %b
+  %res = bitcast <8 x i1> %x to i8
+  ret i8 %res
+}
+
+define i4 @v4i32(<4 x i32> %a, <4 x i32> %b) {
+; SSE2-LABEL: v4i32:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v4i32:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm0
+; SSSE3-NEXT:    movd %xmm0, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm1, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm1, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm0, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: v4i32:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: v4i32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %k0
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    retq
+  %x = icmp sgt <4 x i32> %a, %b
+  %res = bitcast <4 x i1> %x to i4
+  ret i4 %res
+}
+
+define i4 @v4f32(<4 x float> %a, <4 x float> %b) {
+; SSE2-LABEL: v4f32:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    cmpltps %xmm0, %xmm1
+; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movaps %xmm1, %xmm0
+; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movd %xmm0, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSE2-NEXT:    movd %xmm1, %eax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v4f32:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    cmpltps %xmm0, %xmm1
+; SSSE3-NEXT:    movd %xmm1, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movaps %xmm1, %xmm0
+; SSSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSSE3-NEXT:    movd %xmm0, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSSE3-NEXT:    movd %xmm0, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSSE3-NEXT:    movd %xmm1, %eax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: v4f32:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vcmpltps %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vextractps $3, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vextractps $2, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vextractps $1, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vextractps $0, %xmm0, %eax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: v4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vcmpltps %xmm0, %xmm1, %k0
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    retq
+  %x = fcmp ogt <4 x float> %a, %b
+  %res = bitcast <4 x i1> %x to i4
+  ret i4 %res
+}
+
+define i16 @v16i8(<16 x i8> %a, <16 x i8> %b) {
+; SSE2-LABEL: v16i8:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm0
+; SSE2-NEXT:    movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-NEXT:    andb $1, %cl
+; SSE2-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    andb $1, %al
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v16i8:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    pcmpgtb %xmm1, %xmm0
+; SSSE3-NEXT:    movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %cl
+; SSSE3-NEXT:    andb $1, %cl
+; SSSE3-NEXT:    movb %cl, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    andb $1, %al
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movzwl -{{[0-9]+}}(%rsp), %eax
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: v16i8:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpextrb $15, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $14, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $13, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $12, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $11, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $10, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $9, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $8, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $7, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $6, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $5, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $4, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $3, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $2, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $1, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX1-NEXT:    andb $1, %al
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: v16i8:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpcmpgtb %xmm1, %xmm0, %k0
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT:    retq
+  %x = icmp sgt <16 x i8> %a, %b
+  %res = bitcast <16 x i1> %x to i16
+  ret i16 %res
+}
+
+define i2 @v2i64(<2 x i64> %a, <2 x i64> %b) {
+; SSE2-LABEL: v2i64:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
+; SSE2-NEXT:    pxor %xmm2, %xmm1
+; SSE2-NEXT:    pxor %xmm2, %xmm0
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-NEXT:    por %xmm0, %xmm1
+; SSE2-NEXT:    movq %xmm1, %rax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movq %xmm0, %rax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v2i64:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
+; SSSE3-NEXT:    pxor %xmm2, %xmm1
+; SSSE3-NEXT:    pxor %xmm2, %xmm0
+; SSSE3-NEXT:    movdqa %xmm0, %xmm2
+; SSSE3-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSSE3-NEXT:    pcmpeqd %xmm1, %xmm0
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT:    pand %xmm3, %xmm0
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSSE3-NEXT:    por %xmm0, %xmm1
+; SSSE3-NEXT:    movq %xmm1, %rax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSSE3-NEXT:    movq %xmm0, %rax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: v2i64:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vmovq %xmm0, %rax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: v2i64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpcmpgtq %xmm1, %xmm0, %k0
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    retq
+  %x = icmp sgt <2 x i64> %a, %b
+  %res = bitcast <2 x i1> %x to i2
+  ret i2 %res
+}
+
+define i2 @v2f64(<2 x double> %a, <2 x double> %b) {
+; SSE2-LABEL: v2f64:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    cmpltpd %xmm0, %xmm1
+; SSE2-NEXT:    movq %xmm1, %rax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT:    movq %xmm0, %rax
+; SSE2-NEXT:    andl $1, %eax
+; SSE2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: v2f64:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    cmpltpd %xmm0, %xmm1
+; SSSE3-NEXT:    movq %xmm1, %rax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSSE3-NEXT:    movq %xmm0, %rax
+; SSSE3-NEXT:    andl $1, %eax
+; SSSE3-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: v2f64:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vcmpltpd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    vmovq %xmm0, %rax
+; AVX1-NEXT:    andl $1, %eax
+; AVX1-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT:    retq
+;
+; AVX512-LABEL: v2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vcmpltpd %xmm0, %xmm1, %k0
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    retq
+  %x = fcmp ogt <2 x double> %a, %b
+  %res = bitcast <2 x i1> %x to i2
+  ret i2 %res
+}

Propchange: llvm/trunk/test/CodeGen/X86/bitcast-setcc-128.ll
------------------------------------------------------------------------------
    svn:eol-style = native

Added: llvm/trunk/test/CodeGen/X86/bitcast-setcc-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bitcast-setcc-256.ll?rev=303328&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bitcast-setcc-256.ll (added)
+++ llvm/trunk/test/CodeGen/X86/bitcast-setcc-256.ll Thu May 18 02:04:48 2017
@@ -0,0 +1,363 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX2
+; RUN: llc -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefix=AVX512
+
+define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
+; AVX2-LABEL: v16i16:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrb $15, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $14, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $13, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $12, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $11, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $10, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $9, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $8, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $7, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $6, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $5, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $4, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $3, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $2, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $1, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v16i16:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpcmpgtw %ymm1, %ymm0, %k0
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x = icmp sgt <16 x i16> %a, %b
+  %res = bitcast <16 x i1> %x to i16
+  ret i16 %res
+}
+
+define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
+; AVX2-LABEL: v8i32:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrw $7, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $6, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $4, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v8i32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpcmpgtd %ymm1, %ymm0, %k0
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x = icmp sgt <8 x i32> %a, %b
+  %res = bitcast <8 x i1> %x to i8
+  ret i8 %res
+}
+
+define i8 @v8f32(<8 x float> %a, <8 x float> %b) {
+; AVX2-LABEL: v8f32:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vcmpltps %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrw $7, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $6, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $4, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vcmpltps %ymm0, %ymm1, %k0
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x = fcmp ogt <8 x float> %a, %b
+  %res = bitcast <8 x i1> %x to i8
+  ret i8 %res
+}
+
+define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) {
+; AVX2-LABEL: v32i8:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:  .Lcfi0:
+; AVX2-NEXT:    .cfi_def_cfa_offset 16
+; AVX2-NEXT:  .Lcfi1:
+; AVX2-NEXT:    .cfi_offset %rbp, -16
+; AVX2-NEXT:    movq %rsp, %rbp
+; AVX2-NEXT:  .Lcfi2:
+; AVX2-NEXT:    .cfi_def_cfa_register %rbp
+; AVX2-NEXT:    andq $-32, %rsp
+; AVX2-NEXT:    subq $32, %rsp
+; AVX2-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpextrb $15, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $14, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $13, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $12, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $11, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $10, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $9, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $8, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $7, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $6, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $5, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $4, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $3, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $2, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $1, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $0, %xmm1, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $15, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $14, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $13, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $12, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $11, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $10, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $9, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $8, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $7, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $6, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $5, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $4, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $3, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $2, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $1, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    vpextrb $0, %xmm0, %eax
+; AVX2-NEXT:    andb $1, %al
+; AVX2-NEXT:    movb %al, (%rsp)
+; AVX2-NEXT:    movl (%rsp), %eax
+; AVX2-NEXT:    movq %rbp, %rsp
+; AVX2-NEXT:    popq %rbp
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v32i8:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpcmpgtb %ymm1, %ymm0, %k0
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x = icmp sgt <32 x i8> %a, %b
+  %res = bitcast <32 x i1> %x to i32
+  ret i32 %res
+}
+
+define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
+; AVX2-LABEL: v4i64:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v4i64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpcmpgtq %ymm1, %ymm0, %k0
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x = icmp sgt <4 x i64> %a, %b
+  %res = bitcast <4 x i1> %x to i4
+  ret i4 %res
+}
+
+define i4 @v4f64(<4 x double> %a, <4 x double> %b) {
+; AVX2-LABEL: v4f64:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vcmpltpd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpextrd $3, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    andl $1, %eax
+; AVX2-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: v4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vcmpltpd %ymm0, %ymm1, %k0
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT:    movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %x = fcmp ogt <4 x double> %a, %b
+  %res = bitcast <4 x i1> %x to i4
+  ret i4 %res
+}

Propchange: llvm/trunk/test/CodeGen/X86/bitcast-setcc-256.ll
------------------------------------------------------------------------------
    svn:eol-style = native




More information about the llvm-commits mailing list