[llvm] r294296 - [X86][SSE] Added 256-bit vector tests cases
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 7 04:01:36 PST 2017
Author: rksimon
Date: Tue Feb 7 06:01:36 2017
New Revision: 294296
URL: http://llvm.org/viewvc/llvm-project?rev=294296&view=rev
Log:
[X86][SSE] Added 256-bit vector tests cases
Exposes some poor codegen with identity shuffle due to bad interaction with insert_subvector(extract_subvector) / concat_subvectors
Modified:
llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll
Modified: llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll?rev=294296&r1=294295&r2=294296&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll (original)
+++ llvm/trunk/test/CodeGen/X86/clear_upper_vector_element_bits.ll Tue Feb 7 06:01:36 2017
@@ -35,6 +35,47 @@ define <2 x i64> @_clearupper2xi64a(<2 x
ret <2 x i64> %v1
}
+; FIXME: Unnecessary vblendps/vpblendd on AVX targets
+define <4 x i64> @_clearupper4xi64a(<4 x i64>) nounwind {
+; SSE-LABEL: _clearupper4xi64a:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [4294967295,4294967295]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper4xi64a:
+; AVX1: # BB#0:
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper4xi64a:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-NEXT: retq
+ %x0 = extractelement <4 x i64> %0, i32 0
+ %x1 = extractelement <4 x i64> %0, i32 1
+ %x2 = extractelement <4 x i64> %0, i32 2
+ %x3 = extractelement <4 x i64> %0, i32 3
+ %trunc0 = trunc i64 %x0 to i32
+ %trunc1 = trunc i64 %x1 to i32
+ %trunc2 = trunc i64 %x2 to i32
+ %trunc3 = trunc i64 %x3 to i32
+ %ext0 = zext i32 %trunc0 to i64
+ %ext1 = zext i32 %trunc1 to i64
+ %ext2 = zext i32 %trunc2 to i64
+ %ext3 = zext i32 %trunc3 to i64
+ %v0 = insertelement <4 x i64> undef, i64 %ext0, i32 0
+ %v1 = insertelement <4 x i64> %v0, i64 %ext1, i32 1
+ %v2 = insertelement <4 x i64> %v1, i64 %ext2, i32 2
+ %v3 = insertelement <4 x i64> %v2, i64 %ext3, i32 3
+ ret <4 x i64> %v3
+}
+
define <4 x i32> @_clearupper4xi32a(<4 x i32>) nounwind {
; SSE-LABEL: _clearupper4xi32a:
; SSE: # BB#0:
@@ -65,6 +106,63 @@ define <4 x i32> @_clearupper4xi32a(<4 x
ret <4 x i32> %v3
}
+; FIXME: Unnecessary vblendps on AVX1 target
+; FIXME: Missed vpblendw on AVX2 target
+define <8 x i32> @_clearupper8xi32a(<8 x i32>) nounwind {
+; SSE-LABEL: _clearupper8xi32a:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,65535,65535,65535]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper8xi32a:
+; AVX1: # BB#0:
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper8xi32a:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %x0 = extractelement <8 x i32> %0, i32 0
+ %x1 = extractelement <8 x i32> %0, i32 1
+ %x2 = extractelement <8 x i32> %0, i32 2
+ %x3 = extractelement <8 x i32> %0, i32 3
+ %x4 = extractelement <8 x i32> %0, i32 4
+ %x5 = extractelement <8 x i32> %0, i32 5
+ %x6 = extractelement <8 x i32> %0, i32 6
+ %x7 = extractelement <8 x i32> %0, i32 7
+ %trunc0 = trunc i32 %x0 to i16
+ %trunc1 = trunc i32 %x1 to i16
+ %trunc2 = trunc i32 %x2 to i16
+ %trunc3 = trunc i32 %x3 to i16
+ %trunc4 = trunc i32 %x4 to i16
+ %trunc5 = trunc i32 %x5 to i16
+ %trunc6 = trunc i32 %x6 to i16
+ %trunc7 = trunc i32 %x7 to i16
+ %ext0 = zext i16 %trunc0 to i32
+ %ext1 = zext i16 %trunc1 to i32
+ %ext2 = zext i16 %trunc2 to i32
+ %ext3 = zext i16 %trunc3 to i32
+ %ext4 = zext i16 %trunc4 to i32
+ %ext5 = zext i16 %trunc5 to i32
+ %ext6 = zext i16 %trunc6 to i32
+ %ext7 = zext i16 %trunc7 to i32
+ %v0 = insertelement <8 x i32> undef, i32 %ext0, i32 0
+ %v1 = insertelement <8 x i32> %v0, i32 %ext1, i32 1
+ %v2 = insertelement <8 x i32> %v1, i32 %ext2, i32 2
+ %v3 = insertelement <8 x i32> %v2, i32 %ext3, i32 3
+ %v4 = insertelement <8 x i32> %v3, i32 %ext4, i32 4
+ %v5 = insertelement <8 x i32> %v4, i32 %ext5, i32 5
+ %v6 = insertelement <8 x i32> %v5, i32 %ext6, i32 6
+ %v7 = insertelement <8 x i32> %v6, i32 %ext7, i32 7
+ ret <8 x i32> %v7
+}
+
define <8 x i16> @_clearupper8xi16a(<8 x i16>) nounwind {
; SSE-LABEL: _clearupper8xi16a:
; SSE: # BB#0:
@@ -131,6 +229,145 @@ define <8 x i16> @_clearupper8xi16a(<8 x
ret <8 x i16> %v7
}
+; FIXME: Unnecessary vblendps/vpblendd on AVX targets
+define <16 x i16> @_clearupper16xi16a(<16 x i16>) nounwind {
+; SSE-LABEL: _clearupper16xi16a:
+; SSE: # BB#0:
+; SSE-NEXT: pushq %rbp
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %r12
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: pextrw $1, %xmm0, %edi
+; SSE-NEXT: pextrw $2, %xmm0, %eax
+; SSE-NEXT: pextrw $3, %xmm0, %ecx
+; SSE-NEXT: pextrw $4, %xmm0, %edx
+; SSE-NEXT: pextrw $5, %xmm0, %esi
+; SSE-NEXT: pextrw $6, %xmm0, %ebx
+; SSE-NEXT: pextrw $7, %xmm0, %ebp
+; SSE-NEXT: pextrw $1, %xmm1, %r10d
+; SSE-NEXT: pextrw $2, %xmm1, %r9d
+; SSE-NEXT: pextrw $3, %xmm1, %r14d
+; SSE-NEXT: pextrw $4, %xmm1, %r8d
+; SSE-NEXT: pextrw $5, %xmm1, %r15d
+; SSE-NEXT: pextrw $6, %xmm1, %r11d
+; SSE-NEXT: pextrw $7, %xmm1, %r12d
+; SSE-NEXT: movd %ebp, %xmm2
+; SSE-NEXT: movd %ecx, %xmm3
+; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE-NEXT: movd %esi, %xmm2
+; SSE-NEXT: movd %edi, %xmm4
+; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE-NEXT: movd %ebx, %xmm2
+; SSE-NEXT: movd %eax, %xmm3
+; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE-NEXT: movd %edx, %xmm2
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movd %r12d, %xmm3
+; SSE-NEXT: movd %r14d, %xmm4
+; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE-NEXT: movd %r15d, %xmm3
+; SSE-NEXT: movd %r10d, %xmm5
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE-NEXT: movd %r11d, %xmm3
+; SSE-NEXT: movd %r9d, %xmm4
+; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE-NEXT: movd %r8d, %xmm3
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r12
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: popq %rbp
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper16xi16a:
+; AVX1: # BB#0:
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper16xi16a:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,7]
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %x0 = extractelement <16 x i16> %0, i32 0
+ %x1 = extractelement <16 x i16> %0, i32 1
+ %x2 = extractelement <16 x i16> %0, i32 2
+ %x3 = extractelement <16 x i16> %0, i32 3
+ %x4 = extractelement <16 x i16> %0, i32 4
+ %x5 = extractelement <16 x i16> %0, i32 5
+ %x6 = extractelement <16 x i16> %0, i32 6
+ %x7 = extractelement <16 x i16> %0, i32 7
+ %x8 = extractelement <16 x i16> %0, i32 8
+ %x9 = extractelement <16 x i16> %0, i32 9
+ %x10 = extractelement <16 x i16> %0, i32 10
+ %x11 = extractelement <16 x i16> %0, i32 11
+ %x12 = extractelement <16 x i16> %0, i32 12
+ %x13 = extractelement <16 x i16> %0, i32 13
+ %x14 = extractelement <16 x i16> %0, i32 14
+ %x15 = extractelement <16 x i16> %0, i32 15
+ %trunc0 = trunc i16 %x0 to i8
+ %trunc1 = trunc i16 %x1 to i8
+ %trunc2 = trunc i16 %x2 to i8
+ %trunc3 = trunc i16 %x3 to i8
+ %trunc4 = trunc i16 %x4 to i8
+ %trunc5 = trunc i16 %x5 to i8
+ %trunc6 = trunc i16 %x6 to i8
+ %trunc7 = trunc i16 %x7 to i8
+ %trunc8 = trunc i16 %x8 to i8
+ %trunc9 = trunc i16 %x9 to i8
+ %trunc10 = trunc i16 %x10 to i8
+ %trunc11 = trunc i16 %x11 to i8
+ %trunc12 = trunc i16 %x12 to i8
+ %trunc13 = trunc i16 %x13 to i8
+ %trunc14 = trunc i16 %x14 to i8
+ %trunc15 = trunc i16 %x15 to i8
+ %ext0 = zext i8 %trunc0 to i16
+ %ext1 = zext i8 %trunc1 to i16
+ %ext2 = zext i8 %trunc2 to i16
+ %ext3 = zext i8 %trunc3 to i16
+ %ext4 = zext i8 %trunc4 to i16
+ %ext5 = zext i8 %trunc5 to i16
+ %ext6 = zext i8 %trunc6 to i16
+ %ext7 = zext i8 %trunc7 to i16
+ %ext8 = zext i8 %trunc8 to i16
+ %ext9 = zext i8 %trunc9 to i16
+ %ext10 = zext i8 %trunc10 to i16
+ %ext11 = zext i8 %trunc11 to i16
+ %ext12 = zext i8 %trunc12 to i16
+ %ext13 = zext i8 %trunc13 to i16
+ %ext14 = zext i8 %trunc14 to i16
+ %ext15 = zext i8 %trunc15 to i16
+ %v0 = insertelement <16 x i16> undef, i16 %ext0, i32 0
+ %v1 = insertelement <16 x i16> %v0, i16 %ext1, i32 1
+ %v2 = insertelement <16 x i16> %v1, i16 %ext2, i32 2
+ %v3 = insertelement <16 x i16> %v2, i16 %ext3, i32 3
+ %v4 = insertelement <16 x i16> %v3, i16 %ext4, i32 4
+ %v5 = insertelement <16 x i16> %v4, i16 %ext5, i32 5
+ %v6 = insertelement <16 x i16> %v5, i16 %ext6, i32 6
+ %v7 = insertelement <16 x i16> %v6, i16 %ext7, i32 7
+ %v8 = insertelement <16 x i16> %v7, i16 %ext8, i32 8
+ %v9 = insertelement <16 x i16> %v8, i16 %ext9, i32 9
+ %v10 = insertelement <16 x i16> %v9, i16 %ext10, i32 10
+ %v11 = insertelement <16 x i16> %v10, i16 %ext11, i32 11
+ %v12 = insertelement <16 x i16> %v11, i16 %ext12, i32 12
+ %v13 = insertelement <16 x i16> %v12, i16 %ext13, i32 13
+ %v14 = insertelement <16 x i16> %v13, i16 %ext14, i32 14
+ %v15 = insertelement <16 x i16> %v14, i16 %ext15, i32 15
+ ret <16 x i16> %v15
+}
+
define <16 x i8> @_clearupper16xi8a(<16 x i8>) nounwind {
; SSE-LABEL: _clearupper16xi8a:
; SSE: # BB#0:
@@ -257,6 +494,266 @@ define <16 x i8> @_clearupper16xi8a(<16
ret <16 x i8> %v15
}
+define <32 x i8> @_clearupper32xi8a(<32 x i8>) nounwind {
+; SSE-LABEL: _clearupper32xi8a:
+; SSE: # BB#0:
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE-NEXT: movd %esi, %xmm0
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE-NEXT: movd %ecx, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE-NEXT: movd %edx, %xmm0
+; SSE-NEXT: movd %esi, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE-NEXT: movd %edi, %xmm0
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE-NEXT: movd %edx, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE-NEXT: movd %r9d, %xmm0
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE-NEXT: movd %r8d, %xmm0
+; SSE-NEXT: movd %ecx, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm1
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movd %eax, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE-NEXT: movd %esi, %xmm1
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE-NEXT: movd %ecx, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE-NEXT: movd %edx, %xmm1
+; SSE-NEXT: movd %esi, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE-NEXT: movd %edi, %xmm1
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSE-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
+; SSE-NEXT: movd %edx, %xmm5
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3],xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE-NEXT: movd %r9d, %xmm1
+; SSE-NEXT: movd %eax, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE-NEXT: movd %r8d, %xmm1
+; SSE-NEXT: movd %ecx, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; SSE-NEXT: movd {{.*#+}} xmm6 = mem[0],zero,zero,zero
+; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper32xi8a:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: vpextrb $1, %xmm0, %ecx
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpextrb $0, %xmm1, %edx
+; AVX1-NEXT: vpextrb $1, %xmm1, %esi
+; AVX1-NEXT: vmovd %edx, %xmm2
+; AVX1-NEXT: vpinsrb $1, %esi, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4,5,6,7]
+; AVX1-NEXT: vmovd %eax, %xmm2
+; AVX1-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5,6,7]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper32xi8a:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: vpextrb $1, %xmm0, %ecx
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrb $0, %xmm1, %edx
+; AVX2-NEXT: vpextrb $1, %xmm1, %esi
+; AVX2-NEXT: vmovd %edx, %xmm2
+; AVX2-NEXT: vpinsrb $1, %esi, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4,5,6,7]
+; AVX2-NEXT: vmovd %eax, %xmm2
+; AVX2-NEXT: vpinsrb $1, %ecx, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5,6,7]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %x0 = extractelement <32 x i8> %0, i32 0
+ %x1 = extractelement <32 x i8> %0, i32 1
+ %x2 = extractelement <32 x i8> %0, i32 2
+ %x3 = extractelement <32 x i8> %0, i32 3
+ %x4 = extractelement <32 x i8> %0, i32 4
+ %x5 = extractelement <32 x i8> %0, i32 5
+ %x6 = extractelement <32 x i8> %0, i32 6
+ %x7 = extractelement <32 x i8> %0, i32 7
+ %x8 = extractelement <32 x i8> %0, i32 8
+ %x9 = extractelement <32 x i8> %0, i32 9
+ %x10 = extractelement <32 x i8> %0, i32 10
+ %x11 = extractelement <32 x i8> %0, i32 11
+ %x12 = extractelement <32 x i8> %0, i32 12
+ %x13 = extractelement <32 x i8> %0, i32 13
+ %x14 = extractelement <32 x i8> %0, i32 14
+ %x15 = extractelement <32 x i8> %0, i32 15
+ %x16 = extractelement <32 x i8> %0, i32 16
+ %x17 = extractelement <32 x i8> %0, i32 17
+ %x18 = extractelement <32 x i8> %0, i32 18
+ %x19 = extractelement <32 x i8> %0, i32 19
+ %x20 = extractelement <32 x i8> %0, i32 20
+ %x21 = extractelement <32 x i8> %0, i32 21
+ %x22 = extractelement <32 x i8> %0, i32 22
+ %x23 = extractelement <32 x i8> %0, i32 23
+ %x24 = extractelement <32 x i8> %0, i32 24
+ %x25 = extractelement <32 x i8> %0, i32 25
+ %x26 = extractelement <32 x i8> %0, i32 26
+ %x27 = extractelement <32 x i8> %0, i32 27
+ %x28 = extractelement <32 x i8> %0, i32 28
+ %x29 = extractelement <32 x i8> %0, i32 29
+ %x30 = extractelement <32 x i8> %0, i32 30
+ %x31 = extractelement <32 x i8> %0, i32 31
+ %trunc0 = trunc i8 %x0 to i4
+ %trunc1 = trunc i8 %x1 to i4
+ %trunc2 = trunc i8 %x2 to i4
+ %trunc3 = trunc i8 %x3 to i4
+ %trunc4 = trunc i8 %x4 to i4
+ %trunc5 = trunc i8 %x5 to i4
+ %trunc6 = trunc i8 %x6 to i4
+ %trunc7 = trunc i8 %x7 to i4
+ %trunc8 = trunc i8 %x8 to i4
+ %trunc9 = trunc i8 %x9 to i4
+ %trunc10 = trunc i8 %x10 to i4
+ %trunc11 = trunc i8 %x11 to i4
+ %trunc12 = trunc i8 %x12 to i4
+ %trunc13 = trunc i8 %x13 to i4
+ %trunc14 = trunc i8 %x14 to i4
+ %trunc15 = trunc i8 %x15 to i4
+ %trunc16 = trunc i8 %x16 to i4
+ %trunc17 = trunc i8 %x17 to i4
+ %trunc18 = trunc i8 %x18 to i4
+ %trunc19 = trunc i8 %x19 to i4
+ %trunc20 = trunc i8 %x20 to i4
+ %trunc21 = trunc i8 %x21 to i4
+ %trunc22 = trunc i8 %x22 to i4
+ %trunc23 = trunc i8 %x23 to i4
+ %trunc24 = trunc i8 %x24 to i4
+ %trunc25 = trunc i8 %x25 to i4
+ %trunc26 = trunc i8 %x26 to i4
+ %trunc27 = trunc i8 %x27 to i4
+ %trunc28 = trunc i8 %x28 to i4
+ %trunc29 = trunc i8 %x29 to i4
+ %trunc30 = trunc i8 %x30 to i4
+ %trunc31 = trunc i8 %x31 to i4
+ %ext0 = zext i4 %trunc0 to i8
+ %ext1 = zext i4 %trunc1 to i8
+ %ext2 = zext i4 %trunc2 to i8
+ %ext3 = zext i4 %trunc3 to i8
+ %ext4 = zext i4 %trunc4 to i8
+ %ext5 = zext i4 %trunc5 to i8
+ %ext6 = zext i4 %trunc6 to i8
+ %ext7 = zext i4 %trunc7 to i8
+ %ext8 = zext i4 %trunc8 to i8
+ %ext9 = zext i4 %trunc9 to i8
+ %ext10 = zext i4 %trunc10 to i8
+ %ext11 = zext i4 %trunc11 to i8
+ %ext12 = zext i4 %trunc12 to i8
+ %ext13 = zext i4 %trunc13 to i8
+ %ext14 = zext i4 %trunc14 to i8
+ %ext15 = zext i4 %trunc15 to i8
+ %ext16 = zext i4 %trunc16 to i8
+ %ext17 = zext i4 %trunc17 to i8
+ %ext18 = zext i4 %trunc18 to i8
+ %ext19 = zext i4 %trunc19 to i8
+ %ext20 = zext i4 %trunc20 to i8
+ %ext21 = zext i4 %trunc21 to i8
+ %ext22 = zext i4 %trunc22 to i8
+ %ext23 = zext i4 %trunc23 to i8
+ %ext24 = zext i4 %trunc24 to i8
+ %ext25 = zext i4 %trunc25 to i8
+ %ext26 = zext i4 %trunc26 to i8
+ %ext27 = zext i4 %trunc27 to i8
+ %ext28 = zext i4 %trunc28 to i8
+ %ext29 = zext i4 %trunc29 to i8
+ %ext30 = zext i4 %trunc30 to i8
+ %ext31 = zext i4 %trunc31 to i8
+ %v0 = insertelement <32 x i8> undef, i8 %ext0, i32 0
+ %v1 = insertelement <32 x i8> %v0, i8 %ext1, i32 1
+ %v2 = insertelement <32 x i8> %v1, i8 %ext2, i32 2
+ %v3 = insertelement <32 x i8> %v2, i8 %ext3, i32 3
+ %v4 = insertelement <32 x i8> %v3, i8 %ext4, i32 4
+ %v5 = insertelement <32 x i8> %v4, i8 %ext5, i32 5
+ %v6 = insertelement <32 x i8> %v5, i8 %ext6, i32 6
+ %v7 = insertelement <32 x i8> %v6, i8 %ext7, i32 7
+ %v8 = insertelement <32 x i8> %v7, i8 %ext8, i32 8
+ %v9 = insertelement <32 x i8> %v8, i8 %ext9, i32 9
+ %v10 = insertelement <32 x i8> %v9, i8 %ext10, i32 10
+ %v11 = insertelement <32 x i8> %v10, i8 %ext11, i32 11
+ %v12 = insertelement <32 x i8> %v11, i8 %ext12, i32 12
+ %v13 = insertelement <32 x i8> %v12, i8 %ext13, i32 13
+ %v14 = insertelement <32 x i8> %v13, i8 %ext14, i32 14
+ %v15 = insertelement <32 x i8> %v14, i8 %ext15, i32 15
+ %v16 = insertelement <32 x i8> %v15, i8 %ext16, i32 16
+ %v17 = insertelement <32 x i8> %v16, i8 %ext17, i32 17
+ %v18 = insertelement <32 x i8> %v17, i8 %ext18, i32 18
+ %v19 = insertelement <32 x i8> %v18, i8 %ext19, i32 19
+ %v20 = insertelement <32 x i8> %v19, i8 %ext20, i32 20
+ %v21 = insertelement <32 x i8> %v20, i8 %ext21, i32 21
+ %v22 = insertelement <32 x i8> %v21, i8 %ext22, i32 22
+ %v23 = insertelement <32 x i8> %v22, i8 %ext23, i32 23
+ %v24 = insertelement <32 x i8> %v23, i8 %ext24, i32 24
+ %v25 = insertelement <32 x i8> %v24, i8 %ext25, i32 25
+ %v26 = insertelement <32 x i8> %v25, i8 %ext26, i32 26
+ %v27 = insertelement <32 x i8> %v26, i8 %ext27, i32 27
+ %v28 = insertelement <32 x i8> %v27, i8 %ext28, i32 28
+ %v29 = insertelement <32 x i8> %v28, i8 %ext29, i32 29
+ %v30 = insertelement <32 x i8> %v29, i8 %ext30, i32 30
+ %v31 = insertelement <32 x i8> %v30, i8 %ext31, i32 31
+ ret <32 x i8> %v31
+}
+
define <2 x i64> @_clearupper2xi64b(<2 x i64>) nounwind {
; SSE-LABEL: _clearupper2xi64b:
; SSE: # BB#0:
@@ -281,6 +778,34 @@ define <2 x i64> @_clearupper2xi64b(<2 x
ret <2 x i64> %r
}
+define <4 x i64> @_clearupper4xi64b(<4 x i64>) nounwind {
+; SSE-LABEL: _clearupper4xi64b:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper4xi64b:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper4xi64b:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-NEXT: retq
+ %x32 = bitcast <4 x i64> %0 to <8 x i32>
+ %r0 = insertelement <8 x i32> %x32, i32 zeroinitializer, i32 1
+ %r1 = insertelement <8 x i32> %r0, i32 zeroinitializer, i32 3
+ %r2 = insertelement <8 x i32> %r1, i32 zeroinitializer, i32 5
+ %r3 = insertelement <8 x i32> %r2, i32 zeroinitializer, i32 7
+ %r = bitcast <8 x i32> %r3 to <4 x i64>
+ ret <4 x i64> %r
+}
+
define <4 x i32> @_clearupper4xi32b(<4 x i32>) nounwind {
; SSE-LABEL: _clearupper4xi32b:
; SSE: # BB#0:
@@ -301,6 +826,48 @@ define <4 x i32> @_clearupper4xi32b(<4 x
ret <4 x i32> %r
}
+define <8 x i32> @_clearupper8xi32b(<8 x i32>) nounwind {
+; SSE-LABEL: _clearupper8xi32b:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper8xi32b:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6],xmm1[7]
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper8xi32b:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6,7]
+; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6],xmm1[7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %x16 = bitcast <8 x i32> %0 to <16 x i16>
+ %r0 = insertelement <16 x i16> %x16, i16 zeroinitializer, i32 1
+ %r1 = insertelement <16 x i16> %r0, i16 zeroinitializer, i32 3
+ %r2 = insertelement <16 x i16> %r1, i16 zeroinitializer, i32 5
+ %r3 = insertelement <16 x i16> %r2, i16 zeroinitializer, i32 7
+ %r4 = insertelement <16 x i16> %r3, i16 zeroinitializer, i32 9
+ %r5 = insertelement <16 x i16> %r4, i16 zeroinitializer, i32 11
+ %r6 = insertelement <16 x i16> %r5, i16 zeroinitializer, i32 13
+ %r7 = insertelement <16 x i16> %r6, i16 zeroinitializer, i32 15
+ %r = bitcast <16 x i16> %r7 to <8 x i32>
+ ret <8 x i32> %r
+}
+
define <8 x i16> @_clearupper8xi16b(<8 x i16>) nounwind {
; SSE-LABEL: _clearupper8xi16b:
; SSE: # BB#0:
@@ -324,6 +891,56 @@ define <8 x i16> @_clearupper8xi16b(<8 x
ret <8 x i16> %r
}
+define <16 x i16> @_clearupper16xi16b(<16 x i16>) nounwind {
+; SSE-LABEL: _clearupper16xi16b:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper16xi16b:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT: xorl %eax, %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vandpd {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper16xi16b:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+ %x8 = bitcast <16 x i16> %0 to <32 x i8>
+ %r0 = insertelement <32 x i8> %x8, i8 zeroinitializer, i32 1
+ %r1 = insertelement <32 x i8> %r0, i8 zeroinitializer, i32 3
+ %r2 = insertelement <32 x i8> %r1, i8 zeroinitializer, i32 5
+ %r3 = insertelement <32 x i8> %r2, i8 zeroinitializer, i32 7
+ %r4 = insertelement <32 x i8> %r3, i8 zeroinitializer, i32 9
+ %r5 = insertelement <32 x i8> %r4, i8 zeroinitializer, i32 11
+ %r6 = insertelement <32 x i8> %r5, i8 zeroinitializer, i32 13
+ %r7 = insertelement <32 x i8> %r6, i8 zeroinitializer, i32 15
+ %r8 = insertelement <32 x i8> %r7, i8 zeroinitializer, i32 17
+ %r9 = insertelement <32 x i8> %r8, i8 zeroinitializer, i32 19
+ %r10 = insertelement <32 x i8> %r9, i8 zeroinitializer, i32 21
+ %r11 = insertelement <32 x i8> %r10, i8 zeroinitializer, i32 23
+ %r12 = insertelement <32 x i8> %r11, i8 zeroinitializer, i32 25
+ %r13 = insertelement <32 x i8> %r12, i8 zeroinitializer, i32 27
+ %r14 = insertelement <32 x i8> %r13, i8 zeroinitializer, i32 29
+ %r15 = insertelement <32 x i8> %r14, i8 zeroinitializer, i32 31
+ %r = bitcast <32 x i8> %r15 to <16 x i16>
+ ret <16 x i16> %r
+}
+
define <16 x i8> @_clearupper16xi8b(<16 x i8>) nounwind {
; SSE-LABEL: _clearupper16xi8b:
; SSE: # BB#0:
@@ -502,6 +1119,463 @@ define <16 x i8> @_clearupper16xi8b(<16
ret <16 x i8> %r
}
+define <32 x i8> @_clearupper32xi8b(<32 x i8>) nounwind {
+; SSE-LABEL: _clearupper32xi8b:
+; SSE: # BB#0:
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE-NEXT: movd %xmm0, %rcx
+; SSE-NEXT: movq %rcx, %r8
+; SSE-NEXT: movq %rcx, %r9
+; SSE-NEXT: movq %rcx, %r10
+; SSE-NEXT: movq %rcx, %rax
+; SSE-NEXT: movq %rcx, %rdx
+; SSE-NEXT: movq %rcx, %rsi
+; SSE-NEXT: movq %rcx, %rdi
+; SSE-NEXT: andb $15, %cl
+; SSE-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movd %xmm2, %rcx
+; SSE-NEXT: shrq $56, %rdi
+; SSE-NEXT: andb $15, %dil
+; SSE-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %r11
+; SSE-NEXT: shrq $48, %rsi
+; SSE-NEXT: andb $15, %sil
+; SSE-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %r14
+; SSE-NEXT: shrq $40, %rdx
+; SSE-NEXT: andb $15, %dl
+; SSE-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %rdx
+; SSE-NEXT: shrq $32, %rax
+; SSE-NEXT: andb $15, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %rax
+; SSE-NEXT: shrq $24, %r10
+; SSE-NEXT: andb $15, %r10b
+; SSE-NEXT: movb %r10b, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %rdi
+; SSE-NEXT: shrq $16, %r9
+; SSE-NEXT: andb $15, %r9b
+; SSE-NEXT: movb %r9b, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %rsi
+; SSE-NEXT: shrq $8, %r8
+; SSE-NEXT: andb $15, %r8b
+; SSE-NEXT: movb %r8b, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rcx, %rbx
+; SSE-NEXT: movb $0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andb $15, %cl
+; SSE-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $56, %rbx
+; SSE-NEXT: andb $15, %bl
+; SSE-NEXT: movb %bl, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $48, %rsi
+; SSE-NEXT: andb $15, %sil
+; SSE-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $40, %rdi
+; SSE-NEXT: andb $15, %dil
+; SSE-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $32, %rax
+; SSE-NEXT: andb $15, %al
+; SSE-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $24, %rdx
+; SSE-NEXT: andb $15, %dl
+; SSE-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $16, %r14
+; SSE-NEXT: andb $15, %r14b
+; SSE-NEXT: movb %r14b, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: shrq $8, %r11
+; SSE-NEXT: andb $15, %r11b
+; SSE-NEXT: movb %r11b, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movb $0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r14
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper32xi8b:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: pushq %r15
+; AVX1-NEXT: pushq %r14
+; AVX1-NEXT: pushq %r13
+; AVX1-NEXT: pushq %r12
+; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: vpextrq $1, %xmm0, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rcx
+; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rdx
+; AVX1-NEXT: movq %rcx, %r8
+; AVX1-NEXT: movq %rcx, %r9
+; AVX1-NEXT: movq %rcx, %r10
+; AVX1-NEXT: movq %rcx, %r11
+; AVX1-NEXT: movq %rcx, %r14
+; AVX1-NEXT: movq %rcx, %r15
+; AVX1-NEXT: movq %rdx, %r12
+; AVX1-NEXT: movq %rdx, %r13
+; AVX1-NEXT: movq %rdx, %rdi
+; AVX1-NEXT: movq %rdx, %rax
+; AVX1-NEXT: movq %rdx, %rsi
+; AVX1-NEXT: movq %rdx, %rbx
+; AVX1-NEXT: movq %rdx, %rbp
+; AVX1-NEXT: andb $15, %dl
+; AVX1-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movq %rcx, %rdx
+; AVX1-NEXT: andb $15, %cl
+; AVX1-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $56, %rbp
+; AVX1-NEXT: andb $15, %bpl
+; AVX1-NEXT: movb %bpl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $48, %rbx
+; AVX1-NEXT: andb $15, %bl
+; AVX1-NEXT: movb %bl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $40, %rsi
+; AVX1-NEXT: andb $15, %sil
+; AVX1-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $32, %rax
+; AVX1-NEXT: andb $15, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $24, %rdi
+; AVX1-NEXT: andb $15, %dil
+; AVX1-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $16, %r13
+; AVX1-NEXT: andb $15, %r13b
+; AVX1-NEXT: movb %r13b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $8, %r12
+; AVX1-NEXT: andb $15, %r12b
+; AVX1-NEXT: movb %r12b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: shrq $8, %r8
+; AVX1-NEXT: shrq $16, %r9
+; AVX1-NEXT: shrq $24, %r10
+; AVX1-NEXT: shrq $32, %r11
+; AVX1-NEXT: shrq $40, %r14
+; AVX1-NEXT: shrq $48, %r15
+; AVX1-NEXT: shrq $56, %rdx
+; AVX1-NEXT: andb $15, %dl
+; AVX1-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %r15b
+; AVX1-NEXT: movb %r15b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %r14b
+; AVX1-NEXT: movb %r14b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %r11b
+; AVX1-NEXT: movb %r11b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %r10b
+; AVX1-NEXT: movb %r10b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %r9b
+; AVX1-NEXT: movb %r9b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: andb $15, %r8b
+; AVX1-NEXT: movb %r8b, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: movq %rax, %rdx
+; AVX1-NEXT: movq %rax, %rsi
+; AVX1-NEXT: movq %rax, %rdi
+; AVX1-NEXT: movl %eax, %ebp
+; AVX1-NEXT: movl %eax, %ebx
+; AVX1-NEXT: vmovd %eax, %xmm1
+; AVX1-NEXT: shrl $8, %eax
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX1-NEXT: shrl $16, %ebx
+; AVX1-NEXT: vpinsrb $2, %ebx, %xmm1, %xmm1
+; AVX1-NEXT: shrl $24, %ebp
+; AVX1-NEXT: vpinsrb $3, %ebp, %xmm1, %xmm1
+; AVX1-NEXT: shrq $32, %rdi
+; AVX1-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1
+; AVX1-NEXT: shrq $40, %rsi
+; AVX1-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
+; AVX1-NEXT: movb $0, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2
+; AVX1-NEXT: shrq $48, %rdx
+; AVX1-NEXT: vpinsrb $6, %edx, %xmm1, %xmm1
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: shrq $56, %rcx
+; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $8, %ecx
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $16, %ecx
+; AVX1-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $24, %ecx
+; AVX1-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $32, %rcx
+; AVX1-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $40, %rcx
+; AVX1-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $48, %rcx
+; AVX1-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX1-NEXT: vmovq %xmm2, %rcx
+; AVX1-NEXT: shrq $56, %rax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX1-NEXT: movl %ecx, %eax
+; AVX1-NEXT: shrl $8, %eax
+; AVX1-NEXT: vmovd %ecx, %xmm1
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %ecx, %eax
+; AVX1-NEXT: shrl $16, %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movl %ecx, %eax
+; AVX1-NEXT: shrl $24, %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rcx, %rax
+; AVX1-NEXT: shrq $32, %rax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rcx, %rax
+; AVX1-NEXT: shrq $40, %rax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX1-NEXT: movq %rcx, %rax
+; AVX1-NEXT: shrq $48, %rax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vpextrq $1, %xmm2, %rax
+; AVX1-NEXT: shrq $56, %rcx
+; AVX1-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $8, %ecx
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vpinsrb $9, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $16, %ecx
+; AVX1-NEXT: vpinsrb $10, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $24, %ecx
+; AVX1-NEXT: vpinsrb $11, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $32, %rcx
+; AVX1-NEXT: vpinsrb $12, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $40, %rcx
+; AVX1-NEXT: vpinsrb $13, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $48, %rcx
+; AVX1-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1
+; AVX1-NEXT: shrq $56, %rax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: popq %rbx
+; AVX1-NEXT: popq %r12
+; AVX1-NEXT: popq %r13
+; AVX1-NEXT: popq %r14
+; AVX1-NEXT: popq %r15
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper32xi8b:
+; AVX2: # BB#0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: vpextrq $1, %xmm0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vmovq %xmm0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx
+; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: movq %rcx, %r8
+; AVX2-NEXT: movq %rcx, %r9
+; AVX2-NEXT: movq %rcx, %r10
+; AVX2-NEXT: movq %rcx, %r11
+; AVX2-NEXT: movq %rcx, %r14
+; AVX2-NEXT: movq %rcx, %r15
+; AVX2-NEXT: movq %rdx, %r12
+; AVX2-NEXT: movq %rdx, %r13
+; AVX2-NEXT: movq %rdx, %rdi
+; AVX2-NEXT: movq %rdx, %rax
+; AVX2-NEXT: movq %rdx, %rsi
+; AVX2-NEXT: movq %rdx, %rbx
+; AVX2-NEXT: movq %rdx, %rbp
+; AVX2-NEXT: andb $15, %dl
+; AVX2-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movq %rcx, %rdx
+; AVX2-NEXT: andb $15, %cl
+; AVX2-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $56, %rbp
+; AVX2-NEXT: andb $15, %bpl
+; AVX2-NEXT: movb %bpl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $48, %rbx
+; AVX2-NEXT: andb $15, %bl
+; AVX2-NEXT: movb %bl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $40, %rsi
+; AVX2-NEXT: andb $15, %sil
+; AVX2-NEXT: movb %sil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $32, %rax
+; AVX2-NEXT: andb $15, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $24, %rdi
+; AVX2-NEXT: andb $15, %dil
+; AVX2-NEXT: movb %dil, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $16, %r13
+; AVX2-NEXT: andb $15, %r13b
+; AVX2-NEXT: movb %r13b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $8, %r12
+; AVX2-NEXT: andb $15, %r12b
+; AVX2-NEXT: movb %r12b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: shrq $8, %r8
+; AVX2-NEXT: shrq $16, %r9
+; AVX2-NEXT: shrq $24, %r10
+; AVX2-NEXT: shrq $32, %r11
+; AVX2-NEXT: shrq $40, %r14
+; AVX2-NEXT: shrq $48, %r15
+; AVX2-NEXT: shrq $56, %rdx
+; AVX2-NEXT: andb $15, %dl
+; AVX2-NEXT: movb %dl, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %r15b
+; AVX2-NEXT: movb %r15b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %r14b
+; AVX2-NEXT: movb %r14b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %r11b
+; AVX2-NEXT: movb %r11b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %r10b
+; AVX2-NEXT: movb %r10b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %r9b
+; AVX2-NEXT: movb %r9b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: andb $15, %r8b
+; AVX2-NEXT: movb %r8b, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: movq %rax, %rdx
+; AVX2-NEXT: movq %rax, %rsi
+; AVX2-NEXT: movq %rax, %rdi
+; AVX2-NEXT: movl %eax, %ebp
+; AVX2-NEXT: movl %eax, %ebx
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: shrl $8, %eax
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX2-NEXT: shrl $16, %ebx
+; AVX2-NEXT: vpinsrb $2, %ebx, %xmm1, %xmm1
+; AVX2-NEXT: shrl $24, %ebp
+; AVX2-NEXT: vpinsrb $3, %ebp, %xmm1, %xmm1
+; AVX2-NEXT: shrq $32, %rdi
+; AVX2-NEXT: vpinsrb $4, %edi, %xmm1, %xmm1
+; AVX2-NEXT: shrq $40, %rsi
+; AVX2-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
+; AVX2-NEXT: movb $0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2
+; AVX2-NEXT: shrq $48, %rdx
+; AVX2-NEXT: vpinsrb $6, %edx, %xmm1, %xmm1
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: shrq $56, %rcx
+; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $8, %ecx
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $16, %ecx
+; AVX2-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $24, %ecx
+; AVX2-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $32, %rcx
+; AVX2-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $40, %rcx
+; AVX2-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $48, %rcx
+; AVX2-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
+; AVX2-NEXT: vmovq %xmm2, %rcx
+; AVX2-NEXT: shrq $56, %rax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX2-NEXT: movl %ecx, %eax
+; AVX2-NEXT: shrl $8, %eax
+; AVX2-NEXT: vmovd %ecx, %xmm1
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %ecx, %eax
+; AVX2-NEXT: shrl $16, %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movl %ecx, %eax
+; AVX2-NEXT: shrl $24, %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rcx, %rax
+; AVX2-NEXT: shrq $32, %rax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rcx, %rax
+; AVX2-NEXT: shrq $40, %rax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX2-NEXT: movq %rcx, %rax
+; AVX2-NEXT: shrq $48, %rax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vpextrq $1, %xmm2, %rax
+; AVX2-NEXT: shrq $56, %rcx
+; AVX2-NEXT: vpinsrb $7, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $8, %ecx
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrb $9, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $16, %ecx
+; AVX2-NEXT: vpinsrb $10, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $24, %ecx
+; AVX2-NEXT: vpinsrb $11, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $32, %rcx
+; AVX2-NEXT: vpinsrb $12, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $40, %rcx
+; AVX2-NEXT: vpinsrb $13, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $48, %rcx
+; AVX2-NEXT: vpinsrb $14, %ecx, %xmm1, %xmm1
+; AVX2-NEXT: shrq $56, %rax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+ %x4 = bitcast <32 x i8> %0 to <64 x i4>
+ %r0 = insertelement <64 x i4> %x4, i4 zeroinitializer, i32 1
+ %r1 = insertelement <64 x i4> %r0, i4 zeroinitializer, i32 3
+ %r2 = insertelement <64 x i4> %r1, i4 zeroinitializer, i32 5
+ %r3 = insertelement <64 x i4> %r2, i4 zeroinitializer, i32 7
+ %r4 = insertelement <64 x i4> %r3, i4 zeroinitializer, i32 9
+ %r5 = insertelement <64 x i4> %r4, i4 zeroinitializer, i32 11
+ %r6 = insertelement <64 x i4> %r5, i4 zeroinitializer, i32 13
+ %r7 = insertelement <64 x i4> %r6, i4 zeroinitializer, i32 15
+ %r8 = insertelement <64 x i4> %r7, i4 zeroinitializer, i32 17
+ %r9 = insertelement <64 x i4> %r8, i4 zeroinitializer, i32 19
+ %r10 = insertelement <64 x i4> %r9, i4 zeroinitializer, i32 21
+ %r11 = insertelement <64 x i4> %r10, i4 zeroinitializer, i32 23
+ %r12 = insertelement <64 x i4> %r11, i4 zeroinitializer, i32 25
+ %r13 = insertelement <64 x i4> %r12, i4 zeroinitializer, i32 27
+ %r14 = insertelement <64 x i4> %r13, i4 zeroinitializer, i32 29
+ %r15 = insertelement <64 x i4> %r14, i4 zeroinitializer, i32 31
+ %r16 = insertelement <64 x i4> %r15, i4 zeroinitializer, i32 33
+ %r17 = insertelement <64 x i4> %r16, i4 zeroinitializer, i32 35
+ %r18 = insertelement <64 x i4> %r17, i4 zeroinitializer, i32 37
+ %r19 = insertelement <64 x i4> %r18, i4 zeroinitializer, i32 39
+ %r20 = insertelement <64 x i4> %r19, i4 zeroinitializer, i32 41
+ %r21 = insertelement <64 x i4> %r20, i4 zeroinitializer, i32 43
+ %r22 = insertelement <64 x i4> %r21, i4 zeroinitializer, i32 45
+ %r23 = insertelement <64 x i4> %r22, i4 zeroinitializer, i32 47
+ %r24 = insertelement <64 x i4> %r23, i4 zeroinitializer, i32 49
+ %r25 = insertelement <64 x i4> %r24, i4 zeroinitializer, i32 51
+ %r26 = insertelement <64 x i4> %r25, i4 zeroinitializer, i32 53
+ %r27 = insertelement <64 x i4> %r26, i4 zeroinitializer, i32 55
+ %r28 = insertelement <64 x i4> %r27, i4 zeroinitializer, i32 57
+ %r29 = insertelement <64 x i4> %r28, i4 zeroinitializer, i32 59
+ %r30 = insertelement <64 x i4> %r29, i4 zeroinitializer, i32 61
+ %r31 = insertelement <64 x i4> %r30, i4 zeroinitializer, i32 63
+ %r = bitcast <64 x i4> %r15 to <32 x i8>
+ ret <32 x i8> %r
+}
+
define <2 x i64> @_clearupper2xi64c(<2 x i64>) nounwind {
; SSE-LABEL: _clearupper2xi64c:
; SSE: # BB#0:
@@ -523,6 +1597,29 @@ define <2 x i64> @_clearupper2xi64c(<2 x
ret <2 x i64> %r
}
+define <4 x i64> @_clearupper4xi64c(<4 x i64>) nounwind {
+; SSE-LABEL: _clearupper4xi64c:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [4294967295,0,4294967295,0]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper4xi64c:
+; AVX1: # BB#0:
+; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper4xi64c:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-NEXT: retq
+ %r = and <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>, %0
+ ret <4 x i64> %r
+}
+
define <4 x i32> @_clearupper4xi32c(<4 x i32>) nounwind {
; SSE-LABEL: _clearupper4xi32c:
; SSE: # BB#0:
@@ -538,6 +1635,28 @@ define <4 x i32> @_clearupper4xi32c(<4 x
ret <4 x i32> %r
}
+define <8 x i32> @_clearupper8xi32c(<8 x i32>) nounwind {
+; SSE-LABEL: _clearupper8xi32c:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: _clearupper8xi32c:
+; AVX1: # BB#0:
+; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _clearupper8xi32c:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
+; AVX2-NEXT: retq
+ %r = and <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>, %0
+ ret <8 x i32> %r
+}
+
define <8 x i16> @_clearupper8xi16c(<8 x i16>) nounwind {
; SSE-LABEL: _clearupper8xi16c:
; SSE: # BB#0:
@@ -552,6 +1671,22 @@ define <8 x i16> @_clearupper8xi16c(<8 x
ret <8 x i16> %r
}
+define <16 x i16> @_clearupper16xi16c(<16 x i16>) nounwind {
+; SSE-LABEL: _clearupper16xi16c:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: _clearupper16xi16c:
+; AVX: # BB#0:
+; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: retq
+ %r = and <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>, %0
+ ret <16 x i16> %r
+}
+
define <16 x i8> @_clearupper16xi8c(<16 x i8>) nounwind {
; SSE-LABEL: _clearupper16xi8c:
; SSE: # BB#0:
@@ -565,3 +1700,19 @@ define <16 x i8> @_clearupper16xi8c(<16
%r = and <16 x i8> <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>, %0
ret <16 x i8> %r
}
+
+define <32 x i8> @_clearupper32xi8c(<32 x i8>) nounwind {
+; SSE-LABEL: _clearupper32xi8c:
+; SSE: # BB#0:
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: _clearupper32xi8c:
+; AVX: # BB#0:
+; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: retq
+ %r = and <32 x i8> <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>, %0
+ ret <32 x i8> %r
+}
More information about the llvm-commits
mailing list