[llvm] r359116 - [X86][SSE] Add tests for bitcasting vXi1 bool vectors to non-simple types.
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 24 10:25:45 PDT 2019
Author: rksimon
Date: Wed Apr 24 10:25:45 2019
New Revision: 359116
URL: http://llvm.org/viewvc/llvm-project?rev=359116&view=rev
Log:
[X86][SSE] Add tests for bitcasting vXi1 bool vectors to non-simple types.
Added:
llvm/trunk/test/CodeGen/X86/bitcast-vector-bool.ll
Added: llvm/trunk/test/CodeGen/X86/bitcast-vector-bool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/bitcast-vector-bool.ll?rev=359116&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/bitcast-vector-bool.ll (added)
+++ llvm/trunk/test/CodeGen/X86/bitcast-vector-bool.ll Wed Apr 24 10:25:45 2019
@@ -0,0 +1,2319 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512
+
+;
+; 128-bit vectors
+;
+
+define i1 @bitcast_v2i64_to_v2i1(<2 x i64> %a0) nounwind {
+; SSE2-SSSE3-LABEL: bitcast_v2i64_to_v2i1:
+; SSE2-SSSE3: # %bb.0:
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,2147483648]
+; SSE2-SSSE3-NEXT: pxor %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: bitcast_v2i64_to_v2i1:
+; AVX12: # %bb.0:
+; AVX12-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX12-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
+; AVX12-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX12-NEXT: vpextrb $8, %xmm0, %eax
+; AVX12-NEXT: addb %cl, %al
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: bitcast_v2i64_to_v2i1:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpcmpgtq %xmm0, %xmm1, %k0
+; AVX512-NEXT: kshiftrw $1, %k0, %k1
+; AVX512-NEXT: kmovd %k1, %ecx
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: addb %cl, %al
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: retq
+ %1 = icmp slt <2 x i64> %a0, zeroinitializer
+ %2 = bitcast <2 x i1> %1 to <2 x i1>
+ %3 = extractelement <2 x i1> %2, i32 0
+ %4 = extractelement <2 x i1> %2, i32 1
+ %5 = add i1 %3, %4
+ ret i1 %5
+}
+
+define i2 @bitcast_v4i32_to_v2i2(<4 x i32> %a0) nounwind {
+; SSE2-SSSE3-LABEL: bitcast_v4i32_to_v2i2:
+; SSE2-SSSE3: # %bb.0:
+; SSE2-SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: movd %xmm1, %eax
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE2-SSSE3-NEXT: movd %xmm0, %ecx
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: addb %cl, %cl
+; SSE2-SSSE3-NEXT: subb %al, %cl
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-SSSE3-NEXT: movd %xmm0, %eax
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: shlb $2, %al
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE2-SSSE3-NEXT: movd %xmm0, %edx
+; SSE2-SSSE3-NEXT: shlb $3, %dl
+; SSE2-SSSE3-NEXT: orb %al, %dl
+; SSE2-SSSE3-NEXT: orb %cl, %dl
+; SSE2-SSSE3-NEXT: andb $15, %dl
+; SSE2-SSSE3-NEXT: movzbl %dl, %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: movq %rcx, %xmm0
+; SSE2-SSSE3-NEXT: andl $3, %eax
+; SSE2-SSSE3-NEXT: movq %rax, %xmm1
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: bitcast_v4i32_to_v2i2:
+; AVX12: # %bb.0:
+; AVX12-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX12-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; AVX12-NEXT: vmovd %xmm0, %eax
+; AVX12-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX12-NEXT: andb $1, %cl
+; AVX12-NEXT: addb %cl, %cl
+; AVX12-NEXT: subb %al, %cl
+; AVX12-NEXT: vpextrd $2, %xmm0, %eax
+; AVX12-NEXT: andb $1, %al
+; AVX12-NEXT: shlb $2, %al
+; AVX12-NEXT: vpextrd $3, %xmm0, %edx
+; AVX12-NEXT: shlb $3, %dl
+; AVX12-NEXT: orb %al, %dl
+; AVX12-NEXT: orb %cl, %dl
+; AVX12-NEXT: andb $15, %dl
+; AVX12-NEXT: movzbl %dl, %eax
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $2, %ecx
+; AVX12-NEXT: vmovd %ecx, %xmm0
+; AVX12-NEXT: andl $3, %eax
+; AVX12-NEXT: vmovd %eax, %xmm1
+; AVX12-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX12-NEXT: vpextrb $0, %xmm0, %eax
+; AVX12-NEXT: addb %cl, %al
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: bitcast_v4i32_to_v2i2:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movzbl %al, %ecx
+; AVX512-NEXT: shrl $2, %ecx
+; AVX512-NEXT: andl $3, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm0
+; AVX512-NEXT: andl $3, %eax
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX512-NEXT: vpextrb $0, %xmm0, %eax
+; AVX512-NEXT: addb %cl, %al
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: retq
+ %1 = icmp slt <4 x i32> %a0, zeroinitializer
+ %2 = bitcast <4 x i1> %1 to <2 x i2>
+ %3 = extractelement <2 x i2> %2, i32 0
+ %4 = extractelement <2 x i2> %2, i32 1
+ %5 = add i2 %3, %4
+ ret i2 %5
+}
+
+define i4 @bitcast_v8i16_to_v2i4(<8 x i16> %a0) nounwind {
+; SSE2-SSSE3-LABEL: bitcast_v8i16_to_v2i4:
+; SSE2-SSSE3: # %bb.0:
+; SSE2-SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSE2-SSSE3-NEXT: pcmpgtw %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: movd %xmm1, %ecx
+; SSE2-SSSE3-NEXT: pextrw $1, %xmm1, %eax
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: addb %al, %al
+; SSE2-SSSE3-NEXT: subb %cl, %al
+; SSE2-SSSE3-NEXT: pextrw $2, %xmm1, %ecx
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: shlb $2, %cl
+; SSE2-SSSE3-NEXT: pextrw $3, %xmm1, %edx
+; SSE2-SSSE3-NEXT: andb $1, %dl
+; SSE2-SSSE3-NEXT: shlb $3, %dl
+; SSE2-SSSE3-NEXT: orb %cl, %dl
+; SSE2-SSSE3-NEXT: pextrw $4, %xmm1, %ecx
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: shlb $4, %cl
+; SSE2-SSSE3-NEXT: orb %dl, %cl
+; SSE2-SSSE3-NEXT: pextrw $5, %xmm1, %edx
+; SSE2-SSSE3-NEXT: andb $1, %dl
+; SSE2-SSSE3-NEXT: shlb $5, %dl
+; SSE2-SSSE3-NEXT: orb %cl, %dl
+; SSE2-SSSE3-NEXT: pextrw $6, %xmm1, %ecx
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: shlb $6, %cl
+; SSE2-SSSE3-NEXT: orb %dl, %cl
+; SSE2-SSSE3-NEXT: pextrw $7, %xmm1, %edx
+; SSE2-SSSE3-NEXT: shlb $7, %dl
+; SSE2-SSSE3-NEXT: orb %cl, %dl
+; SSE2-SSSE3-NEXT: orb %al, %dl
+; SSE2-SSSE3-NEXT: movzbl %dl, %eax
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shrq $4, %rcx
+; SSE2-SSSE3-NEXT: movq %rcx, %xmm0
+; SSE2-SSSE3-NEXT: andl $15, %eax
+; SSE2-SSSE3-NEXT: movq %rax, %xmm1
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: bitcast_v8i16_to_v2i4:
+; AVX12: # %bb.0:
+; AVX12-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX12-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; AVX12-NEXT: vmovd %xmm0, %ecx
+; AVX12-NEXT: vpextrw $1, %xmm0, %eax
+; AVX12-NEXT: andb $1, %al
+; AVX12-NEXT: addb %al, %al
+; AVX12-NEXT: subb %cl, %al
+; AVX12-NEXT: vpextrw $2, %xmm0, %ecx
+; AVX12-NEXT: andb $1, %cl
+; AVX12-NEXT: shlb $2, %cl
+; AVX12-NEXT: vpextrw $3, %xmm0, %edx
+; AVX12-NEXT: andb $1, %dl
+; AVX12-NEXT: shlb $3, %dl
+; AVX12-NEXT: orb %cl, %dl
+; AVX12-NEXT: vpextrw $4, %xmm0, %ecx
+; AVX12-NEXT: andb $1, %cl
+; AVX12-NEXT: shlb $4, %cl
+; AVX12-NEXT: orb %dl, %cl
+; AVX12-NEXT: vpextrw $5, %xmm0, %edx
+; AVX12-NEXT: andb $1, %dl
+; AVX12-NEXT: shlb $5, %dl
+; AVX12-NEXT: orb %cl, %dl
+; AVX12-NEXT: vpextrw $6, %xmm0, %ecx
+; AVX12-NEXT: andb $1, %cl
+; AVX12-NEXT: shlb $6, %cl
+; AVX12-NEXT: orb %dl, %cl
+; AVX12-NEXT: vpextrw $7, %xmm0, %edx
+; AVX12-NEXT: shlb $7, %dl
+; AVX12-NEXT: orb %cl, %dl
+; AVX12-NEXT: orb %al, %dl
+; AVX12-NEXT: movzbl %dl, %eax
+; AVX12-NEXT: movl %eax, %ecx
+; AVX12-NEXT: shrl $4, %ecx
+; AVX12-NEXT: vmovd %ecx, %xmm0
+; AVX12-NEXT: andl $15, %eax
+; AVX12-NEXT: vmovd %eax, %xmm1
+; AVX12-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX12-NEXT: vpextrb $0, %xmm0, %eax
+; AVX12-NEXT: addb %cl, %al
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: bitcast_v8i16_to_v2i4:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovw2m %xmm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movzbl %al, %ecx
+; AVX512-NEXT: shrl $4, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm0
+; AVX512-NEXT: andl $15, %eax
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX512-NEXT: vpextrb $0, %xmm0, %eax
+; AVX512-NEXT: addb %cl, %al
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: retq
+ %1 = icmp slt <8 x i16> %a0, zeroinitializer
+ %2 = bitcast <8 x i1> %1 to <2 x i4>
+ %3 = extractelement <2 x i4> %2, i32 0
+ %4 = extractelement <2 x i4> %2, i32 1
+ %5 = add i4 %3, %4
+ ret i4 %5
+}
+
+define i8 @bitcast_v16i8_to_v2i8(<16 x i8> %a0) nounwind {
+; SSE2-LABEL: bitcast_v16i8_to_v2i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: leal (%rcx,%rax,2), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: leal (%rax,%rcx,4), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: leal (%rax,%rcx,8), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $4, %ecx
+; SSE2-NEXT: orl %eax, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: shll $5, %eax
+; SSE2-NEXT: orl %ecx, %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $6, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: andl $1, %edx
+; SSE2-NEXT: shll $7, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $8, %ecx
+; SSE2-NEXT: orl %edx, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: andl $1, %edx
+; SSE2-NEXT: shll $9, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $10, %ecx
+; SSE2-NEXT: orl %edx, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: andl $1, %edx
+; SSE2-NEXT: shll $11, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $12, %ecx
+; SSE2-NEXT: orl %edx, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: andl $1, %edx
+; SSE2-NEXT: shll $13, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $14, %ecx
+; SSE2-NEXT: orl %edx, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: shll $15, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: orl %eax, %edx
+; SSE2-NEXT: movd %edx, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: addb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: bitcast_v16i8_to_v2i8:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: pcmpgtb %xmm0, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSSE3-NEXT: andl $1, %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: leal (%rcx,%rax,2), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: leal (%rax,%rcx,4), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: leal (%rax,%rcx,8), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $4, %ecx
+; SSSE3-NEXT: orl %eax, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSSE3-NEXT: andl $1, %eax
+; SSSE3-NEXT: shll $5, %eax
+; SSSE3-NEXT: orl %ecx, %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $6, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: andl $1, %edx
+; SSSE3-NEXT: shll $7, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $8, %ecx
+; SSSE3-NEXT: orl %edx, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: andl $1, %edx
+; SSSE3-NEXT: shll $9, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $10, %ecx
+; SSSE3-NEXT: orl %edx, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: andl $1, %edx
+; SSSE3-NEXT: shll $11, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $12, %ecx
+; SSSE3-NEXT: orl %edx, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: andl $1, %edx
+; SSSE3-NEXT: shll $13, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $14, %ecx
+; SSSE3-NEXT: orl %edx, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: shll $15, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: orl %eax, %edx
+; SSSE3-NEXT: movd %edx, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT: retq
+;
+; AVX12-LABEL: bitcast_v16i8_to_v2i8:
+; AVX12: # %bb.0:
+; AVX12-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX12-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; AVX12-NEXT: vpextrb $1, %xmm0, %eax
+; AVX12-NEXT: andl $1, %eax
+; AVX12-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: leal (%rcx,%rax,2), %eax
+; AVX12-NEXT: vpextrb $2, %xmm0, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: leal (%rax,%rcx,4), %eax
+; AVX12-NEXT: vpextrb $3, %xmm0, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: leal (%rax,%rcx,8), %eax
+; AVX12-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: shll $4, %ecx
+; AVX12-NEXT: orl %eax, %ecx
+; AVX12-NEXT: vpextrb $5, %xmm0, %eax
+; AVX12-NEXT: andl $1, %eax
+; AVX12-NEXT: shll $5, %eax
+; AVX12-NEXT: orl %ecx, %eax
+; AVX12-NEXT: vpextrb $6, %xmm0, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: shll $6, %ecx
+; AVX12-NEXT: vpextrb $7, %xmm0, %edx
+; AVX12-NEXT: andl $1, %edx
+; AVX12-NEXT: shll $7, %edx
+; AVX12-NEXT: orl %ecx, %edx
+; AVX12-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: shll $8, %ecx
+; AVX12-NEXT: orl %edx, %ecx
+; AVX12-NEXT: vpextrb $9, %xmm0, %edx
+; AVX12-NEXT: andl $1, %edx
+; AVX12-NEXT: shll $9, %edx
+; AVX12-NEXT: orl %ecx, %edx
+; AVX12-NEXT: vpextrb $10, %xmm0, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: shll $10, %ecx
+; AVX12-NEXT: orl %edx, %ecx
+; AVX12-NEXT: vpextrb $11, %xmm0, %edx
+; AVX12-NEXT: andl $1, %edx
+; AVX12-NEXT: shll $11, %edx
+; AVX12-NEXT: orl %ecx, %edx
+; AVX12-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: shll $12, %ecx
+; AVX12-NEXT: orl %edx, %ecx
+; AVX12-NEXT: vpextrb $13, %xmm0, %edx
+; AVX12-NEXT: andl $1, %edx
+; AVX12-NEXT: shll $13, %edx
+; AVX12-NEXT: orl %ecx, %edx
+; AVX12-NEXT: vpextrb $14, %xmm0, %ecx
+; AVX12-NEXT: andl $1, %ecx
+; AVX12-NEXT: shll $14, %ecx
+; AVX12-NEXT: orl %edx, %ecx
+; AVX12-NEXT: vpextrb $15, %xmm0, %edx
+; AVX12-NEXT: shll $15, %edx
+; AVX12-NEXT: orl %ecx, %edx
+; AVX12-NEXT: orl %eax, %edx
+; AVX12-NEXT: vmovd %edx, %xmm0
+; AVX12-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX12-NEXT: vpextrb $1, %xmm0, %eax
+; AVX12-NEXT: addb %cl, %al
+; AVX12-NEXT: # kill: def $al killed $al killed $eax
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: bitcast_v16i8_to_v2i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovb2m %xmm0, %k0
+; AVX512-NEXT: kmovw %k0, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX512-NEXT: vpextrb $1, %xmm0, %eax
+; AVX512-NEXT: addb %cl, %al
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: retq
+ %1 = icmp slt <16 x i8> %a0, zeroinitializer
+ %2 = bitcast <16 x i1> %1 to <2 x i8>
+ %3 = extractelement <2 x i8> %2, i32 0
+ %4 = extractelement <2 x i8> %2, i32 1
+ %5 = add i8 %3, %4
+ ret i8 %5
+}
+
+;
+; 256-bit vectors
+;
+
+define i2 @bitcast_v4i64_to_v2i2(<4 x i64> %a0) nounwind {
+; SSE2-SSSE3-LABEL: bitcast_v4i64_to_v2i2:
+; SSE2-SSSE3: # %bb.0:
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm5
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; SSE2-SSSE3-NEXT: por %xmm5, %xmm1
+; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSE2-SSSE3-NEXT: por %xmm0, %xmm2
+; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,2],xmm1[0,2]
+; SSE2-SSSE3-NEXT: movd %xmm0, %eax
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: addb %al, %al
+; SSE2-SSSE3-NEXT: movd %xmm2, %ecx
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: orb %al, %cl
+; SSE2-SSSE3-NEXT: movd %xmm1, %eax
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: shlb $2, %al
+; SSE2-SSSE3-NEXT: orb %cl, %al
+; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
+; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; SSE2-SSSE3-NEXT: movd %xmm2, %ecx
+; SSE2-SSSE3-NEXT: shlb $3, %cl
+; SSE2-SSSE3-NEXT: orb %al, %cl
+; SSE2-SSSE3-NEXT: andb $15, %cl
+; SSE2-SSSE3-NEXT: movzbl %cl, %eax
+; SSE2-SSSE3-NEXT: movl %eax, %ecx
+; SSE2-SSSE3-NEXT: shrl $2, %ecx
+; SSE2-SSSE3-NEXT: movq %rcx, %xmm0
+; SSE2-SSSE3-NEXT: andl $3, %eax
+; SSE2-SSSE3-NEXT: movq %rax, %xmm1
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: bitcast_v4i64_to_v2i2:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX1-NEXT: andb $1, %cl
+; AVX1-NEXT: addb %cl, %cl
+; AVX1-NEXT: orb %al, %cl
+; AVX1-NEXT: vpextrd $2, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: shlb $2, %al
+; AVX1-NEXT: orb %cl, %al
+; AVX1-NEXT: vpextrd $3, %xmm0, %ecx
+; AVX1-NEXT: shlb $3, %cl
+; AVX1-NEXT: orb %al, %cl
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movzbl %cl, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $2, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: andl $3, %eax
+; AVX1-NEXT: vmovd %eax, %xmm1
+; AVX1-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: bitcast_v4i64_to_v2i2:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX2-NEXT: andb $1, %cl
+; AVX2-NEXT: addb %cl, %cl
+; AVX2-NEXT: orb %al, %cl
+; AVX2-NEXT: vpextrd $2, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: shlb $2, %al
+; AVX2-NEXT: orb %cl, %al
+; AVX2-NEXT: vpextrd $3, %xmm0, %ecx
+; AVX2-NEXT: shlb $3, %cl
+; AVX2-NEXT: orb %al, %cl
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movzbl %cl, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $2, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm0
+; AVX2-NEXT: andl $3, %eax
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: bitcast_v4i64_to_v2i2:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpcmpgtq %ymm0, %ymm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movzbl %al, %ecx
+; AVX512-NEXT: shrl $2, %ecx
+; AVX512-NEXT: andl $3, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm0
+; AVX512-NEXT: andl $3, %eax
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX512-NEXT: vpextrb $0, %xmm0, %eax
+; AVX512-NEXT: addb %cl, %al
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <4 x i64> %a0, zeroinitializer
+ %2 = bitcast <4 x i1> %1 to <2 x i2>
+ %3 = extractelement <2 x i2> %2, i32 0
+ %4 = extractelement <2 x i2> %2, i32 1
+ %5 = add i2 %3, %4
+ ret i2 %5
+}
+
+define i4 @bitcast_v8i32_to_v2i4(<8 x i32> %a0) nounwind {
+; SSE2-SSSE3-LABEL: bitcast_v8i32_to_v2i4:
+; SSE2-SSSE3: # %bb.0:
+; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSE2-SSSE3-NEXT: pxor %xmm3, %xmm3
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE2-SSSE3-NEXT: pextrw $0, %xmm3, %ecx
+; SSE2-SSSE3-NEXT: pextrw $2, %xmm3, %eax
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: addb %al, %al
+; SSE2-SSSE3-NEXT: subb %cl, %al
+; SSE2-SSSE3-NEXT: pextrw $4, %xmm3, %ecx
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: shlb $2, %cl
+; SSE2-SSSE3-NEXT: pextrw $6, %xmm3, %edx
+; SSE2-SSSE3-NEXT: andb $1, %dl
+; SSE2-SSSE3-NEXT: shlb $3, %dl
+; SSE2-SSSE3-NEXT: orb %cl, %dl
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pextrw $0, %xmm2, %ecx
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: shlb $4, %cl
+; SSE2-SSSE3-NEXT: orb %dl, %cl
+; SSE2-SSSE3-NEXT: pextrw $2, %xmm2, %edx
+; SSE2-SSSE3-NEXT: andb $1, %dl
+; SSE2-SSSE3-NEXT: shlb $5, %dl
+; SSE2-SSSE3-NEXT: orb %cl, %dl
+; SSE2-SSSE3-NEXT: pextrw $4, %xmm2, %ecx
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: shlb $6, %cl
+; SSE2-SSSE3-NEXT: orb %dl, %cl
+; SSE2-SSSE3-NEXT: pextrw $6, %xmm2, %edx
+; SSE2-SSSE3-NEXT: shlb $7, %dl
+; SSE2-SSSE3-NEXT: orb %cl, %dl
+; SSE2-SSSE3-NEXT: orb %al, %dl
+; SSE2-SSSE3-NEXT: movzbl %dl, %eax
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shrq $4, %rcx
+; SSE2-SSSE3-NEXT: movq %rcx, %xmm0
+; SSE2-SSSE3-NEXT: andl $15, %eax
+; SSE2-SSSE3-NEXT: movq %rax, %xmm1
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: bitcast_v8i32_to_v2i4:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm2
+; AVX1-NEXT: vpextrw $0, %xmm2, %ecx
+; AVX1-NEXT: vpextrw $2, %xmm2, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: addb %al, %al
+; AVX1-NEXT: subb %cl, %al
+; AVX1-NEXT: vpextrw $4, %xmm2, %ecx
+; AVX1-NEXT: andb $1, %cl
+; AVX1-NEXT: shlb $2, %cl
+; AVX1-NEXT: vpextrw $6, %xmm2, %edx
+; AVX1-NEXT: andb $1, %dl
+; AVX1-NEXT: shlb $3, %dl
+; AVX1-NEXT: orb %cl, %dl
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpextrw $0, %xmm0, %ecx
+; AVX1-NEXT: andb $1, %cl
+; AVX1-NEXT: shlb $4, %cl
+; AVX1-NEXT: orb %dl, %cl
+; AVX1-NEXT: vpextrw $2, %xmm0, %edx
+; AVX1-NEXT: andb $1, %dl
+; AVX1-NEXT: shlb $5, %dl
+; AVX1-NEXT: orb %cl, %dl
+; AVX1-NEXT: vpextrw $4, %xmm0, %ecx
+; AVX1-NEXT: andb $1, %cl
+; AVX1-NEXT: shlb $6, %cl
+; AVX1-NEXT: orb %dl, %cl
+; AVX1-NEXT: vpextrw $6, %xmm0, %edx
+; AVX1-NEXT: shlb $7, %dl
+; AVX1-NEXT: orb %cl, %dl
+; AVX1-NEXT: orb %al, %dl
+; AVX1-NEXT: movzbl %dl, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vmovd %eax, %xmm1
+; AVX1-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: bitcast_v8i32_to_v2i4:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpextrw $0, %xmm0, %ecx
+; AVX2-NEXT: vpextrw $2, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: addb %al, %al
+; AVX2-NEXT: subb %cl, %al
+; AVX2-NEXT: vpextrw $4, %xmm0, %ecx
+; AVX2-NEXT: andb $1, %cl
+; AVX2-NEXT: shlb $2, %cl
+; AVX2-NEXT: vpextrw $6, %xmm0, %edx
+; AVX2-NEXT: andb $1, %dl
+; AVX2-NEXT: shlb $3, %dl
+; AVX2-NEXT: orb %cl, %dl
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpextrw $0, %xmm0, %ecx
+; AVX2-NEXT: andb $1, %cl
+; AVX2-NEXT: shlb $4, %cl
+; AVX2-NEXT: orb %dl, %cl
+; AVX2-NEXT: vpextrw $2, %xmm0, %edx
+; AVX2-NEXT: andb $1, %dl
+; AVX2-NEXT: shlb $5, %dl
+; AVX2-NEXT: orb %cl, %dl
+; AVX2-NEXT: vpextrw $4, %xmm0, %ecx
+; AVX2-NEXT: andb $1, %cl
+; AVX2-NEXT: shlb $6, %cl
+; AVX2-NEXT: orb %dl, %cl
+; AVX2-NEXT: vpextrw $6, %xmm0, %edx
+; AVX2-NEXT: shlb $7, %dl
+; AVX2-NEXT: orb %cl, %dl
+; AVX2-NEXT: orb %al, %dl
+; AVX2-NEXT: movzbl %dl, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm0
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: bitcast_v8i32_to_v2i4:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpcmpgtd %ymm0, %ymm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movzbl %al, %ecx
+; AVX512-NEXT: shrl $4, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm0
+; AVX512-NEXT: andl $15, %eax
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX512-NEXT: vpextrb $0, %xmm0, %eax
+; AVX512-NEXT: addb %cl, %al
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <8 x i32> %a0, zeroinitializer
+ %2 = bitcast <8 x i1> %1 to <2 x i4>
+ %3 = extractelement <2 x i4> %2, i32 0
+ %4 = extractelement <2 x i4> %2, i32 1
+ %5 = add i4 %3, %4
+ ret i4 %5
+}
+
+define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind {
+; SSE2-LABEL: bitcast_v16i16_to_v2i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtw %xmm1, %xmm3
+; SSE2-NEXT: pcmpgtw %xmm0, %xmm2
+; SSE2-NEXT: packsswb %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: leal (%rcx,%rax,2), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: leal (%rax,%rcx,4), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: leal (%rax,%rcx,8), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $4, %ecx
+; SSE2-NEXT: orl %eax, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: shll $5, %eax
+; SSE2-NEXT: orl %ecx, %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $6, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: andl $1, %edx
+; SSE2-NEXT: shll $7, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $8, %ecx
+; SSE2-NEXT: orl %edx, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: andl $1, %edx
+; SSE2-NEXT: shll $9, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $10, %ecx
+; SSE2-NEXT: orl %edx, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: andl $1, %edx
+; SSE2-NEXT: shll $11, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $12, %ecx
+; SSE2-NEXT: orl %edx, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: andl $1, %edx
+; SSE2-NEXT: shll $13, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $14, %ecx
+; SSE2-NEXT: orl %edx, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: shll $15, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: orl %eax, %edx
+; SSE2-NEXT: movd %edx, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: addb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: bitcast_v16i16_to_v2i8:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: pxor %xmm3, %xmm3
+; SSSE3-NEXT: pcmpgtw %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtw %xmm0, %xmm2
+; SSSE3-NEXT: packsswb %xmm3, %xmm2
+; SSSE3-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSSE3-NEXT: andl $1, %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: leal (%rcx,%rax,2), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: leal (%rax,%rcx,4), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: leal (%rax,%rcx,8), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $4, %ecx
+; SSSE3-NEXT: orl %eax, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSSE3-NEXT: andl $1, %eax
+; SSSE3-NEXT: shll $5, %eax
+; SSSE3-NEXT: orl %ecx, %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $6, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: andl $1, %edx
+; SSSE3-NEXT: shll $7, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $8, %ecx
+; SSSE3-NEXT: orl %edx, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: andl $1, %edx
+; SSSE3-NEXT: shll $9, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $10, %ecx
+; SSSE3-NEXT: orl %edx, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: andl $1, %edx
+; SSSE3-NEXT: shll $11, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $12, %ecx
+; SSSE3-NEXT: orl %edx, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: andl $1, %edx
+; SSSE3-NEXT: shll $13, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $14, %ecx
+; SSSE3-NEXT: orl %edx, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: shll $15, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: orl %eax, %edx
+; SSSE3-NEXT: movd %edx, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT: retq
+;
+; AVX1-LABEL: bitcast_v16i16_to_v2i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm2
+; AVX1-NEXT: vpextrb $2, %xmm2, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpextrb $0, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: leal (%rcx,%rax,2), %eax
+; AVX1-NEXT: vpextrb $4, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: leal (%rax,%rcx,4), %eax
+; AVX1-NEXT: vpextrb $6, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: leal (%rax,%rcx,8), %eax
+; AVX1-NEXT: vpextrb $8, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $4, %ecx
+; AVX1-NEXT: orl %eax, %ecx
+; AVX1-NEXT: vpextrb $10, %xmm2, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: shll $5, %eax
+; AVX1-NEXT: orl %ecx, %eax
+; AVX1-NEXT: vpextrb $12, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $6, %ecx
+; AVX1-NEXT: vpextrb $14, %xmm2, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $7, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpgtw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $8, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $2, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $9, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $10, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $6, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $11, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $12, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $10, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $13, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $14, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $14, %xmm0, %edx
+; AVX1-NEXT: shll $15, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: orl %eax, %edx
+; AVX1-NEXT: vmovd %edx, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX1-NEXT: vpextrb $1, %xmm0, %eax
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: bitcast_v16i16_to_v2i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpextrb $2, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: leal (%rcx,%rax,2), %eax
+; AVX2-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: leal (%rax,%rcx,4), %eax
+; AVX2-NEXT: vpextrb $6, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: leal (%rax,%rcx,8), %eax
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $4, %ecx
+; AVX2-NEXT: orl %eax, %ecx
+; AVX2-NEXT: vpextrb $10, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: shll $5, %eax
+; AVX2-NEXT: orl %ecx, %eax
+; AVX2-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $6, %ecx
+; AVX2-NEXT: vpextrb $14, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $7, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $8, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $2, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $9, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $10, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $6, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $11, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $12, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $10, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $13, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $14, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $14, %xmm0, %edx
+; AVX2-NEXT: shll $15, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: orl %eax, %edx
+; AVX2-NEXT: vmovd %edx, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: vpextrb $1, %xmm0, %eax
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: bitcast_v16i16_to_v2i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovw2m %ymm0, %k0
+; AVX512-NEXT: kmovw %k0, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX512-NEXT: vpextrb $1, %xmm0, %eax
+; AVX512-NEXT: addb %cl, %al
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <16 x i16> %a0, zeroinitializer
+ %2 = bitcast <16 x i1> %1 to <2 x i8>
+ %3 = extractelement <2 x i8> %2, i32 0
+ %4 = extractelement <2 x i8> %2, i32 1
+ %5 = add i8 %3, %4
+ ret i8 %5
+}
+
+define i16 @bitcast_v32i8_to_v2i16(<32 x i8> %a0) nounwind {
+; SSE2-SSSE3-LABEL: bitcast_v32i8_to_v2i16:
+; SSE2-SSSE3: # %bb.0:
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: bitcast_v32i8_to_v2i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $32, %rsp
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm2
+; AVX1-NEXT: vpextrb $1, %xmm2, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpextrb $0, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: leal (%rcx,%rax,2), %eax
+; AVX1-NEXT: vpextrb $2, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: leal (%rax,%rcx,4), %eax
+; AVX1-NEXT: vpextrb $3, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: leal (%rax,%rcx,8), %eax
+; AVX1-NEXT: vpextrb $4, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $4, %ecx
+; AVX1-NEXT: orl %eax, %ecx
+; AVX1-NEXT: vpextrb $5, %xmm2, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: shll $5, %eax
+; AVX1-NEXT: orl %ecx, %eax
+; AVX1-NEXT: vpextrb $6, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $6, %ecx
+; AVX1-NEXT: vpextrb $7, %xmm2, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $7, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $8, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $8, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $9, %xmm2, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $9, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $10, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $10, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $11, %xmm2, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $11, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $12, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $12, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $13, %xmm2, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $13, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $14, %xmm2, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $14, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $15, %xmm2, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $15, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $16, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $1, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $17, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $2, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $18, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $3, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $19, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $20, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $5, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $21, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $6, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $22, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $7, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $23, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $24, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $9, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $25, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $10, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $26, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $11, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $27, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $28, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $13, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $29, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $14, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $30, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $15, %xmm0, %edx
+; AVX1-NEXT: shll $31, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: orl %eax, %edx
+; AVX1-NEXT: vmovd %edx, %xmm0
+; AVX1-NEXT: vpextrw $0, %xmm0, %ecx
+; AVX1-NEXT: vpextrw $1, %xmm0, %eax
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: bitcast_v32i8_to_v2i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $32, %rsp
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpextrb $1, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: leal (%rcx,%rax,2), %eax
+; AVX2-NEXT: vpextrb $2, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: leal (%rax,%rcx,4), %eax
+; AVX2-NEXT: vpextrb $3, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: leal (%rax,%rcx,8), %eax
+; AVX2-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $4, %ecx
+; AVX2-NEXT: orl %eax, %ecx
+; AVX2-NEXT: vpextrb $5, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: shll $5, %eax
+; AVX2-NEXT: orl %ecx, %eax
+; AVX2-NEXT: vpextrb $6, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $6, %ecx
+; AVX2-NEXT: vpextrb $7, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $7, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $8, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $9, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $9, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $10, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $10, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $11, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $11, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $12, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $13, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $13, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $14, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $14, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $15, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $15, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $16, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $1, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $17, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $2, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $18, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $3, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $19, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $20, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $5, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $21, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $6, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $22, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $7, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $23, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $24, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $9, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $25, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $10, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $26, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $11, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $27, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $28, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $13, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $29, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $14, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $30, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $15, %xmm0, %edx
+; AVX2-NEXT: shll $31, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: orl %eax, %edx
+; AVX2-NEXT: vmovd %edx, %xmm0
+; AVX2-NEXT: vpextrw $0, %xmm0, %ecx
+; AVX2-NEXT: vpextrw $1, %xmm0, %eax
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: bitcast_v32i8_to_v2i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: movq %rsp, %rbp
+; AVX512-NEXT: andq $-32, %rsp
+; AVX512-NEXT: subq $32, %rsp
+; AVX512-NEXT: vpmovb2m %ymm0, %k0
+; AVX512-NEXT: kmovd %k0, (%rsp)
+; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vpextrw $0, %xmm0, %ecx
+; AVX512-NEXT: vpextrw $1, %xmm0, %eax
+; AVX512-NEXT: addl %ecx, %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-NEXT: movq %rbp, %rsp
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <32 x i8> %a0, zeroinitializer
+ %2 = bitcast <32 x i1> %1 to <2 x i16>
+ %3 = extractelement <2 x i16> %2, i32 0
+ %4 = extractelement <2 x i16> %2, i32 1
+ %5 = add i16 %3, %4
+ ret i16 %5
+}
+
+;
+; 512-bit vectors
+;
+
+define i4 @bitcast_v8i64_to_v2i4(<8 x i64> %a0) nounwind {
+; SSE2-SSSE3-LABEL: bitcast_v8i64_to_v2i4:
+; SSE2-SSSE3: # %bb.0:
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
+; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm5
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm0, %xmm5
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm6, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-SSSE3-NEXT: por %xmm0, %xmm5
+; SSE2-SSSE3-NEXT: pextrw $0, %xmm5, %eax
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: pextrw $4, %xmm5, %ecx
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: addb %cl, %cl
+; SSE2-SSSE3-NEXT: orb %al, %cl
+; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm5, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT: por %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pextrw $0, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: shlb $2, %al
+; SSE2-SSSE3-NEXT: orb %cl, %al
+; SSE2-SSSE3-NEXT: pextrw $4, %xmm0, %ecx
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: shlb $3, %cl
+; SSE2-SSSE3-NEXT: orb %al, %cl
+; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: pextrw $0, %xmm0, %edx
+; SSE2-SSSE3-NEXT: andb $1, %dl
+; SSE2-SSSE3-NEXT: shlb $4, %dl
+; SSE2-SSSE3-NEXT: orb %cl, %dl
+; SSE2-SSSE3-NEXT: pextrw $4, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: shlb $5, %al
+; SSE2-SSSE3-NEXT: orb %dl, %al
+; SSE2-SSSE3-NEXT: pxor %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: movdqa %xmm4, %xmm0
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm4, %xmm3
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: pextrw $0, %xmm0, %ecx
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: shlb $6, %cl
+; SSE2-SSSE3-NEXT: pextrw $4, %xmm0, %edx
+; SSE2-SSSE3-NEXT: shlb $7, %dl
+; SSE2-SSSE3-NEXT: orb %cl, %dl
+; SSE2-SSSE3-NEXT: orb %al, %dl
+; SSE2-SSSE3-NEXT: movzbl %dl, %eax
+; SSE2-SSSE3-NEXT: movq %rax, %rcx
+; SSE2-SSSE3-NEXT: shrq $4, %rcx
+; SSE2-SSSE3-NEXT: movq %rcx, %xmm0
+; SSE2-SSSE3-NEXT: andl $15, %eax
+; SSE2-SSSE3-NEXT: movq %rax, %xmm1
+; SSE2-SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE2-SSSE3-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: bitcast_v8i64_to_v2i4:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: vpextrw $1, %xmm0, %ecx
+; AVX1-NEXT: andb $1, %cl
+; AVX1-NEXT: addb %cl, %cl
+; AVX1-NEXT: orb %al, %cl
+; AVX1-NEXT: vpextrw $2, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: shlb $2, %al
+; AVX1-NEXT: orb %cl, %al
+; AVX1-NEXT: vpextrw $3, %xmm0, %ecx
+; AVX1-NEXT: andb $1, %cl
+; AVX1-NEXT: shlb $3, %cl
+; AVX1-NEXT: orb %al, %cl
+; AVX1-NEXT: vpextrw $4, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: shlb $4, %al
+; AVX1-NEXT: orb %cl, %al
+; AVX1-NEXT: vpextrw $5, %xmm0, %ecx
+; AVX1-NEXT: andb $1, %cl
+; AVX1-NEXT: shlb $5, %cl
+; AVX1-NEXT: orb %al, %cl
+; AVX1-NEXT: vpextrw $6, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: shlb $6, %al
+; AVX1-NEXT: vpextrw $7, %xmm0, %edx
+; AVX1-NEXT: shlb $7, %dl
+; AVX1-NEXT: orb %al, %dl
+; AVX1-NEXT: orb %cl, %dl
+; AVX1-NEXT: movzbl %dl, %eax
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $4, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vmovd %eax, %xmm1
+; AVX1-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: bitcast_v8i64_to_v2i4:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: vpextrw $1, %xmm0, %ecx
+; AVX2-NEXT: andb $1, %cl
+; AVX2-NEXT: addb %cl, %cl
+; AVX2-NEXT: orb %al, %cl
+; AVX2-NEXT: vpextrw $2, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: shlb $2, %al
+; AVX2-NEXT: orb %cl, %al
+; AVX2-NEXT: vpextrw $3, %xmm0, %ecx
+; AVX2-NEXT: andb $1, %cl
+; AVX2-NEXT: shlb $3, %cl
+; AVX2-NEXT: orb %al, %cl
+; AVX2-NEXT: vpextrw $4, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: shlb $4, %al
+; AVX2-NEXT: orb %cl, %al
+; AVX2-NEXT: vpextrw $5, %xmm0, %ecx
+; AVX2-NEXT: andb $1, %cl
+; AVX2-NEXT: shlb $5, %cl
+; AVX2-NEXT: orb %al, %cl
+; AVX2-NEXT: vpextrw $6, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: shlb $6, %al
+; AVX2-NEXT: vpextrw $7, %xmm0, %edx
+; AVX2-NEXT: shlb $7, %dl
+; AVX2-NEXT: orb %al, %dl
+; AVX2-NEXT: orb %cl, %dl
+; AVX2-NEXT: movzbl %dl, %eax
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $4, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm0
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: bitcast_v8i64_to_v2i4:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpcmpgtq %zmm0, %zmm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movzbl %al, %ecx
+; AVX512-NEXT: shrl $4, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm0
+; AVX512-NEXT: andl $15, %eax
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX512-NEXT: vpextrb $0, %xmm0, %eax
+; AVX512-NEXT: addb %cl, %al
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <8 x i64> %a0, zeroinitializer
+ %2 = bitcast <8 x i1> %1 to <2 x i4>
+ %3 = extractelement <2 x i4> %2, i32 0
+ %4 = extractelement <2 x i4> %2, i32 1
+ %5 = add i4 %3, %4
+ ret i4 %5
+}
+
+define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind {
+; SSE2-LABEL: bitcast_v16i32_to_v2i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pxor %xmm5, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255]
+; SSE2-NEXT: pand %xmm3, %xmm5
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm6
+; SSE2-NEXT: pand %xmm3, %xmm6
+; SSE2-NEXT: packuswb %xmm5, %xmm6
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: packuswb %xmm2, %xmm4
+; SSE2-NEXT: packuswb %xmm6, %xmm4
+; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: leal (%rcx,%rax,2), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: leal (%rax,%rcx,4), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: leal (%rax,%rcx,8), %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $4, %ecx
+; SSE2-NEXT: orl %eax, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: andl $1, %eax
+; SSE2-NEXT: shll $5, %eax
+; SSE2-NEXT: orl %ecx, %eax
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $6, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: andl $1, %edx
+; SSE2-NEXT: shll $7, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $8, %ecx
+; SSE2-NEXT: orl %edx, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: andl $1, %edx
+; SSE2-NEXT: shll $9, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $10, %ecx
+; SSE2-NEXT: orl %edx, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: andl $1, %edx
+; SSE2-NEXT: shll $11, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $12, %ecx
+; SSE2-NEXT: orl %edx, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: andl $1, %edx
+; SSE2-NEXT: shll $13, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE2-NEXT: andl $1, %ecx
+; SSE2-NEXT: shll $14, %ecx
+; SSE2-NEXT: orl %edx, %ecx
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE2-NEXT: shll $15, %edx
+; SSE2-NEXT: orl %ecx, %edx
+; SSE2-NEXT: orl %eax, %edx
+; SSE2-NEXT: movd %edx, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: addb -{{[0-9]+}}(%rsp), %al
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: bitcast_v16i32_to_v2i8:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: pxor %xmm4, %xmm4
+; SSSE3-NEXT: pxor %xmm5, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm5
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; SSSE3-NEXT: pshufb %xmm1, %xmm5
+; SSSE3-NEXT: pxor %xmm6, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm6
+; SSSE3-NEXT: pshufb %xmm1, %xmm6
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; SSSE3-NEXT: pxor %xmm0, %xmm0
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
+; SSSE3-NEXT: pshufb %xmm1, %xmm0
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4
+; SSSE3-NEXT: pshufb %xmm1, %xmm4
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
+; SSSE3-NEXT: movsd {{.*#+}} xmm4 = xmm6[0],xmm4[1]
+; SSSE3-NEXT: movapd %xmm4, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSSE3-NEXT: andl $1, %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: leal (%rcx,%rax,2), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: leal (%rax,%rcx,4), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: leal (%rax,%rcx,8), %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $4, %ecx
+; SSSE3-NEXT: orl %eax, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSSE3-NEXT: andl $1, %eax
+; SSSE3-NEXT: shll $5, %eax
+; SSSE3-NEXT: orl %ecx, %eax
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $6, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: andl $1, %edx
+; SSSE3-NEXT: shll $7, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $8, %ecx
+; SSSE3-NEXT: orl %edx, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: andl $1, %edx
+; SSSE3-NEXT: shll $9, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $10, %ecx
+; SSSE3-NEXT: orl %edx, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: andl $1, %edx
+; SSSE3-NEXT: shll $11, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $12, %ecx
+; SSSE3-NEXT: orl %edx, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: andl $1, %edx
+; SSSE3-NEXT: shll $13, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: shll $14, %ecx
+; SSSE3-NEXT: orl %edx, %ecx
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSSE3-NEXT: shll $15, %edx
+; SSSE3-NEXT: orl %ecx, %edx
+; SSSE3-NEXT: orl %eax, %edx
+; SSSE3-NEXT: movd %edx, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al
+; SSSE3-NEXT: retq
+;
+; AVX1-LABEL: bitcast_v16i32_to_v2i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm3
+; AVX1-NEXT: vpextrb $4, %xmm3, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpextrb $0, %xmm3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: leal (%rcx,%rax,2), %eax
+; AVX1-NEXT: vpextrb $8, %xmm3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: leal (%rax,%rcx,4), %eax
+; AVX1-NEXT: vpextrb $12, %xmm3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: leal (%rax,%rcx,8), %eax
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $4, %ecx
+; AVX1-NEXT: orl %eax, %ecx
+; AVX1-NEXT: vpextrb $4, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: shll $5, %eax
+; AVX1-NEXT: orl %ecx, %eax
+; AVX1-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $6, %ecx
+; AVX1-NEXT: vpextrb $12, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $7, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpcmpgtd %xmm1, %xmm2, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $8, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $4, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $9, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $10, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $12, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $11, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
+; AVX1-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $12, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $4, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $13, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $14, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $12, %xmm0, %edx
+; AVX1-NEXT: shll $15, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: orl %eax, %edx
+; AVX1-NEXT: vmovd %edx, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX1-NEXT: vpextrb $1, %xmm0, %eax
+; AVX1-NEXT: addb %cl, %al
+; AVX1-NEXT: # kill: def $al killed $al killed $eax
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: bitcast_v16i32_to_v2i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpgtd %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpextrb $4, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: leal (%rcx,%rax,2), %eax
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: leal (%rax,%rcx,4), %eax
+; AVX2-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: leal (%rax,%rcx,8), %eax
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $4, %ecx
+; AVX2-NEXT: orl %eax, %ecx
+; AVX2-NEXT: vpextrb $4, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: shll $5, %eax
+; AVX2-NEXT: orl %ecx, %eax
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $6, %ecx
+; AVX2-NEXT: vpextrb $12, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $7, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpcmpgtd %ymm1, %ymm2, %ymm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $8, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $4, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $9, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $10, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $12, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $11, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $12, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $4, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $13, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $14, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $12, %xmm0, %edx
+; AVX2-NEXT: shll $15, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: orl %eax, %edx
+; AVX2-NEXT: vmovd %edx, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: vpextrb $1, %xmm0, %eax
+; AVX2-NEXT: addb %cl, %al
+; AVX2-NEXT: # kill: def $al killed $al killed $eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: bitcast_v16i32_to_v2i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpcmpgtd %zmm0, %zmm1, %k0
+; AVX512-NEXT: kmovw %k0, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX512-NEXT: vpextrb $1, %xmm0, %eax
+; AVX512-NEXT: addb %cl, %al
+; AVX512-NEXT: # kill: def $al killed $al killed $eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <16 x i32> %a0, zeroinitializer
+ %2 = bitcast <16 x i1> %1 to <2 x i8>
+ %3 = extractelement <2 x i8> %2, i32 0
+ %4 = extractelement <2 x i8> %2, i32 1
+ %5 = add i8 %3, %4
+ ret i8 %5
+}
+
+define i16 @bitcast_v32i16_to_v2i16(<32 x i16> %a0) nounwind {
+; SSE2-SSSE3-LABEL: bitcast_v32i16_to_v2i16:
+; SSE2-SSSE3: # %bb.0:
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: bitcast_v32i16_to_v2i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $32, %rsp
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtw %xmm0, %xmm2, %xmm3
+; AVX1-NEXT: vpextrb $2, %xmm3, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpextrb $0, %xmm3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: leal (%rcx,%rax,2), %eax
+; AVX1-NEXT: vpextrb $4, %xmm3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: leal (%rax,%rcx,4), %eax
+; AVX1-NEXT: vpextrb $6, %xmm3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: leal (%rax,%rcx,8), %eax
+; AVX1-NEXT: vpextrb $8, %xmm3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $4, %ecx
+; AVX1-NEXT: orl %eax, %ecx
+; AVX1-NEXT: vpextrb $10, %xmm3, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: shll $5, %eax
+; AVX1-NEXT: orl %ecx, %eax
+; AVX1-NEXT: vpextrb $12, %xmm3, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $6, %ecx
+; AVX1-NEXT: vpextrb $14, %xmm3, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $7, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpgtw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $8, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $2, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $9, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $10, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $6, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $11, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $12, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $10, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $13, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $14, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $14, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $15, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpcmpgtw %xmm1, %xmm2, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $16, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $2, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $17, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $18, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $6, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $19, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $20, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $10, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $21, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $22, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $14, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $23, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
+; AVX1-NEXT: vpcmpgtw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $24, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $2, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $25, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $26, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $6, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $27, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $28, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $10, %xmm0, %edx
+; AVX1-NEXT: andl $1, %edx
+; AVX1-NEXT: shll $29, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: shll $30, %ecx
+; AVX1-NEXT: orl %edx, %ecx
+; AVX1-NEXT: vpextrb $14, %xmm0, %edx
+; AVX1-NEXT: shll $31, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: orl %eax, %edx
+; AVX1-NEXT: vmovd %edx, %xmm0
+; AVX1-NEXT: vpextrw $0, %xmm0, %ecx
+; AVX1-NEXT: vpextrw $1, %xmm0, %eax
+; AVX1-NEXT: addl %ecx, %eax
+; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: bitcast_v32i16_to_v2i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $32, %rsp
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpextrb $2, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: leal (%rcx,%rax,2), %eax
+; AVX2-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: leal (%rax,%rcx,4), %eax
+; AVX2-NEXT: vpextrb $6, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: leal (%rax,%rcx,8), %eax
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $4, %ecx
+; AVX2-NEXT: orl %eax, %ecx
+; AVX2-NEXT: vpextrb $10, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: shll $5, %eax
+; AVX2-NEXT: orl %ecx, %eax
+; AVX2-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $6, %ecx
+; AVX2-NEXT: vpextrb $14, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $7, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $8, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $2, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $9, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $10, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $6, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $11, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $12, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $10, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $13, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $14, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $14, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $15, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpcmpgtw %ymm1, %ymm2, %ymm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $16, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $2, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $17, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $18, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $6, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $19, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $20, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $10, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $21, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $22, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $14, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $23, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $24, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $2, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $25, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $4, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $26, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $6, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $27, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $8, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $28, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $10, %xmm0, %edx
+; AVX2-NEXT: andl $1, %edx
+; AVX2-NEXT: shll $29, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpextrb $12, %xmm0, %ecx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: shll $30, %ecx
+; AVX2-NEXT: orl %edx, %ecx
+; AVX2-NEXT: vpextrb $14, %xmm0, %edx
+; AVX2-NEXT: shll $31, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: orl %eax, %edx
+; AVX2-NEXT: vmovd %edx, %xmm0
+; AVX2-NEXT: vpextrw $0, %xmm0, %ecx
+; AVX2-NEXT: vpextrw $1, %xmm0, %eax
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: bitcast_v32i16_to_v2i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: movq %rsp, %rbp
+; AVX512-NEXT: andq $-32, %rsp
+; AVX512-NEXT: subq $32, %rsp
+; AVX512-NEXT: vpmovw2m %zmm0, %k0
+; AVX512-NEXT: kmovd %k0, (%rsp)
+; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: vpextrw $0, %xmm0, %ecx
+; AVX512-NEXT: vpextrw $1, %xmm0, %eax
+; AVX512-NEXT: addl %ecx, %eax
+; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX512-NEXT: movq %rbp, %rsp
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <32 x i16> %a0, zeroinitializer
+ %2 = bitcast <32 x i1> %1 to <2 x i16>
+ %3 = extractelement <2 x i16> %2, i32 0
+ %4 = extractelement <2 x i16> %2, i32 1
+ %5 = add i16 %3, %4
+ ret i16 %5
+}
+
+define i32 @bitcast_v64i8_to_v2i8(<64 x i8> %a0) nounwind {
+; SSE2-SSSE3-LABEL: bitcast_v64i8_to_v2i8:
+; SSE2-SSSE3: # %bb.0:
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX12-LABEL: bitcast_v64i8_to_v2i8:
+; AVX12: # %bb.0:
+; AVX12-NEXT: retq
+;
+; AVX512-LABEL: bitcast_v64i8_to_v2i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovb2m %zmm0, %k0
+; AVX512-NEXT: kmovq %k0, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: vmovd %xmm0, %ecx
+; AVX512-NEXT: vpextrd $1, %xmm0, %eax
+; AVX512-NEXT: addl %ecx, %eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <64 x i8> %a0, zeroinitializer
+ %2 = bitcast <64 x i1> %1 to <2 x i32>
+ %3 = extractelement <2 x i32> %2, i32 0
+ %4 = extractelement <2 x i32> %2, i32 1
+ %5 = add i32 %3, %4
+ ret i32 %5
+}
More information about the llvm-commits
mailing list