[llvm] r346745 - [X86] Add more tests for -x86-experimental-vector-widening-legalization
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 12 23:47:52 PST 2018
Added: llvm/trunk/test/CodeGen/X86/vector-trunc-math-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-math-widen.ll?rev=346745&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-math-widen.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-math-widen.ll Mon Nov 12 23:47:52 2018
@@ -0,0 +1,5690 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX2,AVX2-SLOW
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=ALL,AVX,AVX2,AVX2-FAST
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefixes=ALL,AVX,AVX512,AVX512BW
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+fast-variable-shuffle | FileCheck %s --check-prefixes=ALL,AVX,AVX512,AVX512DQ
+
+;
+; add
+;
+
+define <4 x i32> @trunc_add_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_add_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: paddq %xmm3, %xmm1
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_add_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_add_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_add_v4i64_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = add <4 x i64> %a0, %a1
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_add_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_add_v8i64_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: paddq %xmm6, %xmm2
+; SSE-NEXT: paddq %xmm7, %xmm3
+; SSE-NEXT: paddq %xmm4, %xmm0
+; SSE-NEXT: paddq %xmm5, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3],xmm0[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0],xmm3[1,2,3],xmm4[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_add_v8i64_v8i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpaddq %ymm3, %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_add_v8i64_v8i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpaddq %ymm3, %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_add_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = add <8 x i64> %a0, %a1
+ %2 = trunc <8 x i64> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_add_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
+; SSE-LABEL: trunc_add_v8i32_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: paddd %xmm3, %xmm1
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: pslld $16, %xmm0
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_add_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_add_v8i32_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = add <8 x i32> %a0, %a1
+ %2 = trunc <8 x i32> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @trunc_add_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_add_v16i64_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: paddq {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: paddq {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: paddq {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: paddq {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: paddq {{[0-9]+}}(%rsp), %xmm4
+; SSE-NEXT: paddq {{[0-9]+}}(%rsp), %xmm5
+; SSE-NEXT: paddq {{[0-9]+}}(%rsp), %xmm6
+; SSE-NEXT: paddq {{[0-9]+}}(%rsp), %xmm7
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: packuswb %xmm5, %xmm4
+; SSE-NEXT: packuswb %xmm6, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm5, %xmm1, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm6, %xmm2, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm7, %xmm3, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vmovddup {{.*#+}} xmm7 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm7 = mem[0,0]
+; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpackusdw %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5
+; AVX1-NEXT: vpackusdw %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm3
+; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpand %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm3
+; AVX1-NEXT: vpackusdw %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_add_v16i64_v16i8:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpaddq %ymm5, %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpaddq %ymm4, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpaddq %ymm7, %ymm3, %ymm3
+; AVX2-SLOW-NEXT: vpaddq %ymm6, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_add_v16i64_v16i8:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpaddq %ymm5, %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpaddq %ymm4, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpaddq %ymm7, %ymm3, %ymm3
+; AVX2-FAST-NEXT: vpaddq %ymm6, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_add_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpaddq %zmm3, %zmm1, %zmm1
+; AVX512-NEXT: vpaddq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = add <16 x i64> %a0, %a1
+ %2 = trunc <16 x i64> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_add_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
+; SSE-LABEL: trunc_add_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: paddd %xmm4, %xmm0
+; SSE-NEXT: paddd %xmm5, %xmm1
+; SSE-NEXT: paddd %xmm6, %xmm2
+; SSE-NEXT: paddd %xmm7, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm2
+; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_add_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_add_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = add <16 x i32> %a0, %a1
+ %2 = trunc <16 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
+; SSE-LABEL: trunc_add_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: paddw %xmm2, %xmm0
+; SSE-NEXT: paddw %xmm3, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_add_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_add_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_add_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_add_v16i16_v16i8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = add <16 x i16> %a0, %a1
+ %2 = trunc <16 x i16> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @trunc_add_v8i32_v8i16_sext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
+; SSE-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pslld $16, %xmm2
+; SSE-NEXT: psrad $16, %xmm2
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: packssdw %xmm2, %xmm1
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: psraw $8, %xmm0
+; SSE-NEXT: paddw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = sext <8 x i8> %1 to <8 x i32>
+ %3 = add <8 x i32> %2, %a1
+ %4 = trunc <8 x i32> %3 to <8 x i16>
+ ret <8 x i16> %4
+}
+
+;
+; add to constant
+;
+
+define <4 x i32> @trunc_add_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_add_const_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_const_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_add_const_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_add_const_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_add_const_v4i64_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = add <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_add_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_add_const_v8i64_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE-NEXT: paddw {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_const_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3],xmm0[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_add_const_v8i64_v8i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_add_const_v8i64_v8i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_add_const_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = add <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
+ %2 = trunc <8 x i64> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_add_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
+; SSE-LABEL: trunc_add_const_v8i32_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: pslld $16, %xmm0
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: paddw {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_const_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_add_const_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_add_const_v8i32_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = add <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = trunc <8 x i32> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_add_const_v16i64_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: packuswb %xmm5, %xmm4
+; SSE-NEXT: packuswb %xmm6, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: paddb {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_const_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm5 = mem[0,0]
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_add_const_v16i64_v16i8:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-SLOW-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_add_const_v16i64_v16i8:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-FAST-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_add_const_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = add <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
+ %2 = trunc <16 x i64> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_add_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
+; SSE-LABEL: trunc_add_const_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: paddb {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_const_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_add_const_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_add_const_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = add <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = trunc <16 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
+; SSE-LABEL: trunc_add_const_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: paddb {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_add_const_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_add_const_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_add_const_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_add_const_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_add_const_v16i16_v16i8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = add <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %2 = trunc <16 x i16> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+;
+; sub
+;
+
+define <4 x i32> @trunc_sub_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_sub_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: psubq %xmm3, %xmm1
+; SSE-NEXT: psubq %xmm2, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_sub_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_sub_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_sub_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_sub_v4i64_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = sub <4 x i64> %a0, %a1
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_sub_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_sub_v8i64_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: psubq %xmm6, %xmm2
+; SSE-NEXT: psubq %xmm7, %xmm3
+; SSE-NEXT: psubq %xmm4, %xmm0
+; SSE-NEXT: psubq %xmm5, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_sub_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3],xmm0[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0],xmm3[1,2,3],xmm4[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_sub_v8i64_v8i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpsubq %ymm3, %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_sub_v8i64_v8i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpsubq %ymm3, %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_sub_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = sub <8 x i64> %a0, %a1
+ %2 = trunc <8 x i64> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_sub_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
+; SSE-LABEL: trunc_sub_v8i32_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: psubd %xmm2, %xmm0
+; SSE-NEXT: psubd %xmm3, %xmm1
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: pslld $16, %xmm0
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_sub_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_sub_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_sub_v8i32_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = sub <8 x i32> %a0, %a1
+ %2 = trunc <8 x i32> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @trunc_sub_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_sub_v16i64_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: psubq {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: psubq {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: psubq {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: psubq {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: psubq {{[0-9]+}}(%rsp), %xmm4
+; SSE-NEXT: psubq {{[0-9]+}}(%rsp), %xmm5
+; SSE-NEXT: psubq {{[0-9]+}}(%rsp), %xmm6
+; SSE-NEXT: psubq {{[0-9]+}}(%rsp), %xmm7
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: packuswb %xmm5, %xmm4
+; SSE-NEXT: packuswb %xmm6, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_sub_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm5, %xmm1, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm6, %xmm2, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm7, %xmm3, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vpsubq %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vmovddup {{.*#+}} xmm7 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm7 = mem[0,0]
+; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpackusdw %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5
+; AVX1-NEXT: vpackusdw %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm3
+; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpand %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm3
+; AVX1-NEXT: vpackusdw %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_sub_v16i64_v16i8:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpsubq %ymm5, %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpsubq %ymm4, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpsubq %ymm7, %ymm3, %ymm3
+; AVX2-SLOW-NEXT: vpsubq %ymm6, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_sub_v16i64_v16i8:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpsubq %ymm5, %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpsubq %ymm4, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpsubq %ymm7, %ymm3, %ymm3
+; AVX2-FAST-NEXT: vpsubq %ymm6, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_sub_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsubq %zmm3, %zmm1, %zmm1
+; AVX512-NEXT: vpsubq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = sub <16 x i64> %a0, %a1
+ %2 = trunc <16 x i64> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_sub_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
+; SSE-LABEL: trunc_sub_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: psubd %xmm4, %xmm0
+; SSE-NEXT: psubd %xmm5, %xmm1
+; SSE-NEXT: psubd %xmm6, %xmm2
+; SSE-NEXT: psubd %xmm7, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_sub_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm2
+; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_sub_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_sub_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsubd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = sub <16 x i32> %a0, %a1
+ %2 = trunc <16 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
+; SSE-LABEL: trunc_sub_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: psubw %xmm2, %xmm0
+; SSE-NEXT: psubw %xmm3, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_sub_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_sub_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_sub_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_sub_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_sub_v16i16_v16i8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = sub <16 x i16> %a0, %a1
+ %2 = trunc <16 x i16> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_ext_sub_v16i16_v16i8(<16 x i8> %x, <16 x i8> %y) {
+; SSE-LABEL: trunc_ext_sub_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: psubb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc_ext_sub_v16i16_v16i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %a = zext <16 x i8> %x to <16 x i16>
+ %b = zext <16 x i8> %y to <16 x i16>
+ %c = sub <16 x i16> %a, %b
+ %d = trunc <16 x i16> %c to <16 x i8>
+ ret <16 x i8> %d
+}
+
+;
+; sub to constant
+;
+
+define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_sub_const_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movl $1, %eax
+; SSE-NEXT: movq %rax, %xmm2
+; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
+; SSE-NEXT: psubq %xmm2, %xmm0
+; SSE-NEXT: psubq {{.*}}(%rip), %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_sub_const_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: movl $1, %eax
+; AVX1-NEXT: vmovq %rax, %xmm1
+; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
+; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_sub_const_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_sub_const_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_sub_const_v4i64_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = sub <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_sub_const_v8i64_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movl $1, %eax
+; SSE-NEXT: movq %rax, %xmm4
+; SSE-NEXT: pslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
+; SSE-NEXT: psubq %xmm4, %xmm0
+; SSE-NEXT: psubq {{.*}}(%rip), %xmm1
+; SSE-NEXT: psubq {{.*}}(%rip), %xmm2
+; SSE-NEXT: psubq {{.*}}(%rip), %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
+; SSE-NEXT: movapd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_sub_const_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: movl $1, %eax
+; AVX1-NEXT: vmovq %rax, %xmm2
+; AVX1-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
+; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_sub_const_v8i64_v8i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_sub_const_v8i64_v8i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_sub_const_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsubq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = sub <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
+ %2 = trunc <8 x i64> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
+; SSE-LABEL: trunc_sub_const_v8i32_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: psubd {{.*}}(%rip), %xmm0
+; SSE-NEXT: psubd {{.*}}(%rip), %xmm1
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: pslld $16, %xmm0
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_sub_const_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_sub_const_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_sub_const_v8i32_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = sub <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = trunc <8 x i32> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_sub_const_v16i64_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movl $1, %eax
+; SSE-NEXT: movq %rax, %xmm8
+; SSE-NEXT: pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4,5,6,7]
+; SSE-NEXT: psubq %xmm8, %xmm0
+; SSE-NEXT: psubq {{.*}}(%rip), %xmm1
+; SSE-NEXT: psubq {{.*}}(%rip), %xmm2
+; SSE-NEXT: psubq {{.*}}(%rip), %xmm3
+; SSE-NEXT: psubq {{.*}}(%rip), %xmm4
+; SSE-NEXT: psubq {{.*}}(%rip), %xmm5
+; SSE-NEXT: psubq {{.*}}(%rip), %xmm6
+; SSE-NEXT: psubq {{.*}}(%rip), %xmm7
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: packuswb %xmm5, %xmm4
+; SSE-NEXT: packuswb %xmm6, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_sub_const_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: movl $1, %eax
+; AVX1-NEXT: vmovq %rax, %xmm4
+; AVX1-NEXT: vpslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
+; AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm7
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vmovddup {{.*#+}} xmm4 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm4 = mem[0,0]
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm4, %xmm7, %xmm7
+; AVX1-NEXT: vpackusdw %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm4, %xmm6, %xmm6
+; AVX1-NEXT: vpackusdw %xmm2, %xmm6, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm3
+; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm8, %xmm3
+; AVX1-NEXT: vpackusdw %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_sub_const_v16i64_v16i8:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-SLOW-NEXT: vpsubq {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_sub_const_v16i64_v16i8:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-FAST-NEXT: vpsubq {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_sub_const_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsubq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512-NEXT: vpsubq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = sub <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
+ %2 = trunc <16 x i64> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
+; SSE-LABEL: trunc_sub_const_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: psubd {{.*}}(%rip), %xmm0
+; SSE-NEXT: psubd {{.*}}(%rip), %xmm1
+; SSE-NEXT: psubd {{.*}}(%rip), %xmm2
+; SSE-NEXT: psubd {{.*}}(%rip), %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_sub_const_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm1, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_sub_const_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_sub_const_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsubd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = sub <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = trunc <16 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
+; SSE-LABEL: trunc_sub_const_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: psubw {{.*}}(%rip), %xmm0
+; SSE-NEXT: psubw {{.*}}(%rip), %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_sub_const_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_sub_const_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_sub_const_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_sub_const_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_sub_const_v16i16_v16i8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = sub <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %2 = trunc <16 x i16> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_ext_sub_const_rhs_v16i16_v16i8(<16 x i8> %x) {
+; SSE-LABEL: trunc_ext_sub_const_rhs_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: psubb {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc_ext_sub_const_rhs_v16i16_v16i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %a = zext <16 x i8> %x to <16 x i16>
+ %b = sub <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %c = trunc <16 x i16> %b to <16 x i8>
+ ret <16 x i8> %c
+}
+
+define <16 x i8> @trunc_ext_sub_const_lhs_v16i16_v16i8(<16 x i8> %x) {
+; SSE-LABEL: trunc_ext_sub_const_lhs_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; SSE-NEXT: psubb %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc_ext_sub_const_lhs_v16i16_v16i8:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX-NEXT: vpsubb %xmm0, %xmm1, %xmm0
+; AVX-NEXT: retq
+ %a = zext <16 x i8> %x to <16 x i16>
+ %b = sub <16 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>, %a
+ %c = trunc <16 x i16> %b to <16 x i8>
+ ret <16 x i8> %c
+}
+
+;
+; mul
+;
+
+define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_mul_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: psrlq $32, %xmm4
+; SSE-NEXT: pmuludq %xmm3, %xmm4
+; SSE-NEXT: movdqa %xmm3, %xmm5
+; SSE-NEXT: psrlq $32, %xmm5
+; SSE-NEXT: pmuludq %xmm1, %xmm5
+; SSE-NEXT: paddq %xmm4, %xmm5
+; SSE-NEXT: psllq $32, %xmm5
+; SSE-NEXT: pmuludq %xmm3, %xmm1
+; SSE-NEXT: paddq %xmm5, %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: psrlq $32, %xmm3
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: psrlq $32, %xmm4
+; SSE-NEXT: pmuludq %xmm0, %xmm4
+; SSE-NEXT: paddq %xmm3, %xmm4
+; SSE-NEXT: psllq $32, %xmm4
+; SSE-NEXT: pmuludq %xmm2, %xmm0
+; SSE-NEXT: paddq %xmm4, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_mul_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_mul_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512F-LABEL: trunc_mul_v4i64_v4i32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_mul_v4i64_v4i32:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_mul_v4i64_v4i32:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = mul <4 x i64> %a0, %a1
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_mul_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_mul_v8i64_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm7[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,0,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,0,2,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm4[0],xmm6[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE-NEXT: pmullw %xmm6, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2,3],xmm4[4],xmm5[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1,2,3],xmm3[4],xmm5[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2,3],xmm4[4],xmm5[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1,2,3],xmm2[4],xmm5[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1,2,3],xmm3[4],xmm5[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1,2,3],xmm1[4],xmm5[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1,2,3],xmm3[4],xmm5[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1,2,3],xmm0[4],xmm5[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_mul_v8i64_v8i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpmullw %xmm2, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_mul_v8i64_v8i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpmullw %xmm2, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512F-LABEL: trunc_mul_v8i64_v8i16:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmovqw %zmm1, %xmm1
+; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512F-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_mul_v8i64_v8i16:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpmovqw %zmm1, %xmm1
+; AVX512BW-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_mul_v8i64_v8i16:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = mul <8 x i64> %a0, %a1
+ %2 = trunc <8 x i64> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_mul_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
+; SSE-LABEL: trunc_mul_v8i32_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm2, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm4, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm3, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: pslld $16, %xmm0
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_mul_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_mul_v8i32_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = mul <8 x i32> %a0, %a1
+ %2 = trunc <8 x i32> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @trunc_mul_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_mul_v16i64_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: movdqa %xmm0, %xmm9
+; SSE-NEXT: psrlq $32, %xmm9
+; SSE-NEXT: pmuludq %xmm8, %xmm9
+; SSE-NEXT: movdqa %xmm8, %xmm10
+; SSE-NEXT: psrlq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm0, %xmm10
+; SSE-NEXT: paddq %xmm9, %xmm10
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: psllq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm8, %xmm0
+; SSE-NEXT: paddq %xmm10, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm8
+; SSE-NEXT: psrlq $32, %xmm8
+; SSE-NEXT: pmuludq %xmm9, %xmm8
+; SSE-NEXT: movdqa %xmm9, %xmm10
+; SSE-NEXT: psrlq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm1, %xmm10
+; SSE-NEXT: paddq %xmm8, %xmm10
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: psllq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm9, %xmm1
+; SSE-NEXT: paddq %xmm10, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: psrlq $32, %xmm9
+; SSE-NEXT: pmuludq %xmm8, %xmm9
+; SSE-NEXT: movdqa %xmm8, %xmm10
+; SSE-NEXT: psrlq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm2, %xmm10
+; SSE-NEXT: paddq %xmm9, %xmm10
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: psllq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm8, %xmm2
+; SSE-NEXT: paddq %xmm10, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm8
+; SSE-NEXT: psrlq $32, %xmm8
+; SSE-NEXT: pmuludq %xmm9, %xmm8
+; SSE-NEXT: movdqa %xmm9, %xmm10
+; SSE-NEXT: psrlq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm3, %xmm10
+; SSE-NEXT: paddq %xmm8, %xmm10
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: psllq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm9, %xmm3
+; SSE-NEXT: paddq %xmm10, %xmm3
+; SSE-NEXT: movdqa %xmm4, %xmm9
+; SSE-NEXT: psrlq $32, %xmm9
+; SSE-NEXT: pmuludq %xmm8, %xmm9
+; SSE-NEXT: movdqa %xmm8, %xmm10
+; SSE-NEXT: psrlq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm4, %xmm10
+; SSE-NEXT: paddq %xmm9, %xmm10
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: psllq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm8, %xmm4
+; SSE-NEXT: paddq %xmm10, %xmm4
+; SSE-NEXT: movdqa %xmm5, %xmm8
+; SSE-NEXT: psrlq $32, %xmm8
+; SSE-NEXT: pmuludq %xmm9, %xmm8
+; SSE-NEXT: movdqa %xmm9, %xmm10
+; SSE-NEXT: psrlq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm5, %xmm10
+; SSE-NEXT: paddq %xmm8, %xmm10
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
+; SSE-NEXT: psllq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm9, %xmm5
+; SSE-NEXT: paddq %xmm10, %xmm5
+; SSE-NEXT: movdqa %xmm6, %xmm9
+; SSE-NEXT: psrlq $32, %xmm9
+; SSE-NEXT: pmuludq %xmm8, %xmm9
+; SSE-NEXT: movdqa %xmm8, %xmm10
+; SSE-NEXT: psrlq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm6, %xmm10
+; SSE-NEXT: paddq %xmm9, %xmm10
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: psllq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm8, %xmm6
+; SSE-NEXT: paddq %xmm10, %xmm6
+; SSE-NEXT: movdqa %xmm7, %xmm8
+; SSE-NEXT: psrlq $32, %xmm8
+; SSE-NEXT: pmuludq %xmm9, %xmm8
+; SSE-NEXT: movdqa %xmm9, %xmm10
+; SSE-NEXT: psrlq $32, %xmm10
+; SSE-NEXT: pmuludq %xmm7, %xmm10
+; SSE-NEXT: paddq %xmm8, %xmm10
+; SSE-NEXT: pmuludq %xmm9, %xmm7
+; SSE-NEXT: psllq $32, %xmm10
+; SSE-NEXT: paddq %xmm10, %xmm7
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: packuswb %xmm5, %xmm4
+; SSE-NEXT: packuswb %xmm6, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm8
+; AVX1-NEXT: vpmuludq %xmm4, %xmm8, %xmm8
+; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm9
+; AVX1-NEXT: vpmuludq %xmm9, %xmm0, %xmm9
+; AVX1-NEXT: vpaddq %xmm8, %xmm9, %xmm8
+; AVX1-NEXT: vpsllq $32, %xmm8, %xmm8
+; AVX1-NEXT: vpmuludq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT: vpaddq %xmm8, %xmm9, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm9
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm4
+; AVX1-NEXT: vpmuludq %xmm9, %xmm4, %xmm10
+; AVX1-NEXT: vpsrlq $32, %xmm9, %xmm4
+; AVX1-NEXT: vpmuludq %xmm4, %xmm0, %xmm4
+; AVX1-NEXT: vpaddq %xmm10, %xmm4, %xmm4
+; AVX1-NEXT: vpsllq $32, %xmm4, %xmm4
+; AVX1-NEXT: vpmuludq %xmm9, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
+; AVX1-NEXT: vpmuludq %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlq $32, %xmm5, %xmm0
+; AVX1-NEXT: vpmuludq %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT: vpmuludq %xmm5, %xmm1, %xmm4
+; AVX1-NEXT: vpaddq %xmm0, %xmm4, %xmm10
+; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm5
+; AVX1-NEXT: vpmuludq %xmm0, %xmm5, %xmm5
+; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm4
+; AVX1-NEXT: vpmuludq %xmm4, %xmm1, %xmm4
+; AVX1-NEXT: vpaddq %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpsllq $32, %xmm4, %xmm4
+; AVX1-NEXT: vpmuludq %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm1
+; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm0
+; AVX1-NEXT: vpmuludq %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $32, %xmm6, %xmm4
+; AVX1-NEXT: vpmuludq %xmm4, %xmm2, %xmm4
+; AVX1-NEXT: vpaddq %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT: vpmuludq %xmm6, %xmm2, %xmm4
+; AVX1-NEXT: vpaddq %xmm0, %xmm4, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm4
+; AVX1-NEXT: vpmuludq %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm6
+; AVX1-NEXT: vpmuludq %xmm6, %xmm2, %xmm6
+; AVX1-NEXT: vpaddq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT: vpsllq $32, %xmm4, %xmm4
+; AVX1-NEXT: vpmuludq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm2
+; AVX1-NEXT: vpmuludq %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlq $32, %xmm7, %xmm4
+; AVX1-NEXT: vpmuludq %xmm4, %xmm3, %xmm4
+; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
+; AVX1-NEXT: vpmuludq %xmm7, %xmm3, %xmm4
+; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm6
+; AVX1-NEXT: vpmuludq %xmm4, %xmm6, %xmm6
+; AVX1-NEXT: vpsrlq $32, %xmm4, %xmm7
+; AVX1-NEXT: vpmuludq %xmm7, %xmm3, %xmm7
+; AVX1-NEXT: vpaddq %xmm6, %xmm7, %xmm6
+; AVX1-NEXT: vpsllq $32, %xmm6, %xmm6
+; AVX1-NEXT: vpmuludq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vmovddup {{.*#+}} xmm4 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm4 = mem[0,0]
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm3
+; AVX1-NEXT: vpackusdw %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm10, %xmm2
+; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm9, %xmm2
+; AVX1-NEXT: vpand %xmm4, %xmm8, %xmm3
+; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_mul_v16i64_v16i8:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-SLOW-NEXT: vpmulld %xmm7, %xmm3, %xmm3
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpmulld %xmm6, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm6 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT: vpshufb %xmm6, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vpmulld %xmm5, %xmm1, %xmm1
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpmulld %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufb %xmm6, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_mul_v16i64_v16i8:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm7, %ymm8, %ymm7
+; AVX2-FAST-NEXT: vpermd %ymm3, %ymm8, %ymm3
+; AVX2-FAST-NEXT: vpmulld %xmm7, %xmm3, %xmm3
+; AVX2-FAST-NEXT: vpermd %ymm6, %ymm8, %ymm6
+; AVX2-FAST-NEXT: vpermd %ymm2, %ymm8, %ymm2
+; AVX2-FAST-NEXT: vpmulld %xmm6, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm6 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpermd %ymm5, %ymm8, %ymm5
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm8, %ymm1
+; AVX2-FAST-NEXT: vpmulld %xmm5, %xmm1, %xmm1
+; AVX2-FAST-NEXT: vpermd %ymm4, %ymm8, %ymm4
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm8, %ymm0
+; AVX2-FAST-NEXT: vpmulld %xmm4, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpshufb %xmm6, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512F-LABEL: trunc_mul_v16i64_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmovqd %zmm3, %ymm3
+; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512F-NEXT: vpmulld %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpmovqd %zmm2, %ymm2
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vpmulld %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_mul_v16i64_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpmovqd %zmm3, %ymm3
+; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512BW-NEXT: vpmulld %ymm3, %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovqd %zmm2, %ymm2
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vpmulld %ymm2, %ymm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_mul_v16i64_v16i8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmullq %zmm3, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpmullq %zmm2, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = mul <16 x i64> %a0, %a1
+ %2 = trunc <16 x i64> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_mul_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
+; SSE-LABEL: trunc_mul_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm0[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm4, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm8, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm5, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm4, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm6, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm4, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm7, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm4, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmulld %xmm2, %xmm0, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpmulld %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm2
+; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_mul_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmulld %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_mul_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = mul <16 x i32> %a0, %a1
+ %2 = trunc <16 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
+; SSE-LABEL: trunc_mul_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pmullw %xmm2, %xmm0
+; SSE-NEXT: pmullw %xmm3, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_mul_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_mul_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_mul_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_mul_v16i16_v16i8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = mul <16 x i16> %a0, %a1
+ %2 = trunc <16 x i16> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @trunc_mul_v8i32_v8i16_zext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
+; SSE-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm3, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; SSE-NEXT: pslld $16, %xmm2
+; SSE-NEXT: psrad $16, %xmm2
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: packssdw %xmm2, %xmm1
+; SSE-NEXT: pmullw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = zext <8 x i8> %1 to <8 x i32>
+ %3 = mul <8 x i32> %2, %a1
+ %4 = trunc <8 x i32> %3 to <8 x i16>
+ ret <8 x i16> %4
+}
+
+;
+; mul to constant
+;
+
+define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_mul_const_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2,3]
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: psrlq $32, %xmm1
+; SSE-NEXT: pmuludq %xmm2, %xmm1
+; SSE-NEXT: psllq $32, %xmm1
+; SSE-NEXT: paddq %xmm3, %xmm1
+; SSE-NEXT: movl $1, %eax
+; SSE-NEXT: movq %rax, %xmm2
+; SSE-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: pmuludq %xmm2, %xmm3
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pmuludq %xmm2, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm3, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_const_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_mul_const_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_mul_const_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_mul_const_v4i64_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = mul <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_mul_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_mul_const_v8i64_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_const_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3],xmm0[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_mul_const_v8i64_v8i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_mul_const_v8i64_v8i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_mul_const_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = mul <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
+ %2 = trunc <8 x i64> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_mul_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
+; SSE-LABEL: trunc_mul_const_v8i32_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: pslld $16, %xmm0
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_const_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_mul_const_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_mul_const_v8i32_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = mul <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = trunc <8 x i32> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @trunc_mul_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_mul_const_v16i64_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movl $1, %eax
+; SSE-NEXT: movq %rax, %xmm8
+; SSE-NEXT: pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm9
+; SSE-NEXT: pmuludq %xmm8, %xmm9
+; SSE-NEXT: psrlq $32, %xmm0
+; SSE-NEXT: pmuludq %xmm8, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm9, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [2,3]
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: pmuludq %xmm8, %xmm9
+; SSE-NEXT: psrlq $32, %xmm1
+; SSE-NEXT: pmuludq %xmm8, %xmm1
+; SSE-NEXT: psllq $32, %xmm1
+; SSE-NEXT: paddq %xmm9, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [4,5]
+; SSE-NEXT: movdqa %xmm2, %xmm9
+; SSE-NEXT: pmuludq %xmm8, %xmm9
+; SSE-NEXT: psrlq $32, %xmm2
+; SSE-NEXT: pmuludq %xmm8, %xmm2
+; SSE-NEXT: psllq $32, %xmm2
+; SSE-NEXT: paddq %xmm9, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [6,7]
+; SSE-NEXT: movdqa %xmm3, %xmm9
+; SSE-NEXT: pmuludq %xmm8, %xmm9
+; SSE-NEXT: psrlq $32, %xmm3
+; SSE-NEXT: pmuludq %xmm8, %xmm3
+; SSE-NEXT: psllq $32, %xmm3
+; SSE-NEXT: paddq %xmm9, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [8,9]
+; SSE-NEXT: movdqa %xmm4, %xmm9
+; SSE-NEXT: pmuludq %xmm8, %xmm9
+; SSE-NEXT: psrlq $32, %xmm4
+; SSE-NEXT: pmuludq %xmm8, %xmm4
+; SSE-NEXT: psllq $32, %xmm4
+; SSE-NEXT: paddq %xmm9, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [10,11]
+; SSE-NEXT: movdqa %xmm5, %xmm9
+; SSE-NEXT: pmuludq %xmm8, %xmm9
+; SSE-NEXT: psrlq $32, %xmm5
+; SSE-NEXT: pmuludq %xmm8, %xmm5
+; SSE-NEXT: psllq $32, %xmm5
+; SSE-NEXT: paddq %xmm9, %xmm5
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [12,13]
+; SSE-NEXT: movdqa %xmm6, %xmm9
+; SSE-NEXT: pmuludq %xmm8, %xmm9
+; SSE-NEXT: psrlq $32, %xmm6
+; SSE-NEXT: pmuludq %xmm8, %xmm6
+; SSE-NEXT: psllq $32, %xmm6
+; SSE-NEXT: paddq %xmm9, %xmm6
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [14,15]
+; SSE-NEXT: movdqa %xmm7, %xmm9
+; SSE-NEXT: pmuludq %xmm8, %xmm9
+; SSE-NEXT: psrlq $32, %xmm7
+; SSE-NEXT: pmuludq %xmm8, %xmm7
+; SSE-NEXT: psllq $32, %xmm7
+; SSE-NEXT: paddq %xmm9, %xmm7
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: packuswb %xmm5, %xmm4
+; SSE-NEXT: packuswb %xmm6, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_const_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: movl $1, %eax
+; AVX1-NEXT: vmovq %rax, %xmm4
+; AVX1-NEXT: vpslldq {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
+; AVX1-NEXT: vpmuludq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm6
+; AVX1-NEXT: vpmuludq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT: vpsllq $32, %xmm4, %xmm4
+; AVX1-NEXT: vpaddq %xmm4, %xmm5, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [2,3]
+; AVX1-NEXT: vpmuludq %xmm5, %xmm0, %xmm6
+; AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX1-NEXT: vpmuludq %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm0, %xmm6, %xmm9
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [4,5]
+; AVX1-NEXT: vpmuludq %xmm5, %xmm1, %xmm6
+; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm7
+; AVX1-NEXT: vpmuludq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT: vpsllq $32, %xmm5, %xmm5
+; AVX1-NEXT: vpaddq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [6,7]
+; AVX1-NEXT: vpmuludq %xmm6, %xmm1, %xmm7
+; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm1
+; AVX1-NEXT: vpmuludq %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm7, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [8,9]
+; AVX1-NEXT: vpmuludq %xmm6, %xmm2, %xmm7
+; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm4
+; AVX1-NEXT: vpmuludq %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpsllq $32, %xmm4, %xmm4
+; AVX1-NEXT: vpaddq %xmm4, %xmm7, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [10,11]
+; AVX1-NEXT: vpmuludq %xmm6, %xmm2, %xmm7
+; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm2
+; AVX1-NEXT: vpmuludq %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpsllq $32, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm2, %xmm7, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [12,13]
+; AVX1-NEXT: vpmuludq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm0
+; AVX1-NEXT: vpmuludq %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm0, %xmm7, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [14,15]
+; AVX1-NEXT: vpmuludq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm3
+; AVX1-NEXT: vpmuludq %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpsllq $32, %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vmovddup {{.*#+}} xmm6 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm6 = mem[0,0]
+; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm6, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm3
+; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm2
+; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpand %xmm6, %xmm9, %xmm2
+; AVX1-NEXT: vpand %xmm6, %xmm8, %xmm3
+; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_mul_const_v16i64_v16i8:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpmulld {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-SLOW-NEXT: vpmulld {{.*}}(%rip), %xmm3, %xmm3
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_mul_const_v16i64_v16i8:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT: vpmulld {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT: vpmulld {{.*}}(%rip), %xmm3, %xmm3
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_mul_const_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmulld {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vpmulld {{.*}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = mul <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
+ %2 = trunc <16 x i64> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_mul_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
+; SSE-LABEL: trunc_mul_const_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [0,1,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm4, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm5, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm4, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm5, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [8,9,10,11]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm4, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm5, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [12,13,14,15]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm4, %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE-NEXT: pmuludq %xmm5, %xmm4
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_const_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_mul_const_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_mul_const_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmulld {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = mul <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = trunc <16 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
+; SSE-LABEL: trunc_mul_const_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
+; SSE-NEXT: pmullw {{.*}}(%rip), %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_mul_const_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_mul_const_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_mul_const_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_mul_const_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_mul_const_v16i16_v16i8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = mul <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %2 = trunc <16 x i16> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+;
+; and
+;
+
+define <4 x i32> @trunc_and_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_and_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: andps %xmm3, %xmm1
+; SSE-NEXT: andps %xmm2, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_and_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_and_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_and_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_and_v4i64_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = and <4 x i64> %a0, %a1
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_and_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_and_v8i64_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pand %xmm6, %xmm2
+; SSE-NEXT: pand %xmm7, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: pand %xmm5, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_and_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3],xmm0[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_and_v8i64_v8i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpand %ymm3, %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_and_v8i64_v8i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpand %ymm3, %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_and_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = and <8 x i64> %a0, %a1
+ %2 = trunc <8 x i64> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_and_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
+; SSE-LABEL: trunc_and_v8i32_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: pand %xmm3, %xmm1
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: pslld $16, %xmm0
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_and_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_and_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_and_v8i32_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = and <8 x i32> %a0, %a1
+ %2 = trunc <8 x i32> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_and_v16i64_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm4
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm5
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm6
+; SSE-NEXT: pand {{[0-9]+}}(%rsp), %xmm7
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: packuswb %xmm5, %xmm4
+; SSE-NEXT: packuswb %xmm6, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_and_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandpd %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vandpd %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vandpd %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vandpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm5 = mem[0,0]
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_and_v16i64_v16i8:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpand %ymm5, %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpand %ymm7, %ymm3, %ymm3
+; AVX2-SLOW-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_and_v16i64_v16i8:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpand %ymm5, %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpand %ymm4, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpand %ymm7, %ymm3, %ymm3
+; AVX2-FAST-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_and_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandq %zmm3, %zmm1, %zmm1
+; AVX512-NEXT: vpandq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = and <16 x i64> %a0, %a1
+ %2 = trunc <16 x i64> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_and_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
+; SSE-LABEL: trunc_and_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: pand %xmm3, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: pand %xmm2, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pand %xmm1, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: packuswb %xmm5, %xmm0
+; SSE-NEXT: packuswb %xmm6, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_and_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_and_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_and_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = and <16 x i32> %a0, %a1
+ %2 = trunc <16 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
+; SSE-LABEL: trunc_and_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm3, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_and_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_and_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_and_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_and_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_and_v16i16_v16i8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = and <16 x i16> %a0, %a1
+ %2 = trunc <16 x i16> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+;
+; and to constant
+;
+
+define <4 x i32> @trunc_and_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_and_const_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: andps {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_and_const_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_and_const_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_and_const_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_and_const_v4i64_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = and <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_and_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_and_const_v8i64_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE-NEXT: andpd {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_and_const_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3],xmm0[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_and_const_v8i64_v8i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_and_const_v8i64_v8i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_and_const_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = and <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
+ %2 = trunc <8 x i64> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_and_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
+; SSE-LABEL: trunc_and_const_v8i32_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: pslld $16, %xmm0
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_and_const_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_and_const_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_and_const_v8i32_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = and <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = trunc <8 x i32> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_and_const_v16i64_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: packuswb %xmm5, %xmm4
+; SSE-NEXT: packuswb %xmm6, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_and_const_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm5 = mem[0,0]
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_and_const_v16i64_v16i8:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-SLOW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_and_const_v16i64_v16i8:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-FAST-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_and_const_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = and <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
+ %2 = trunc <16 x i64> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_and_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
+; SSE-LABEL: trunc_and_const_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_and_const_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_and_const_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_and_const_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = and <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = trunc <16 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
+; SSE-LABEL: trunc_and_const_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_and_const_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_and_const_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_and_const_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_and_const_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_and_const_v16i16_v16i8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = and <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %2 = trunc <16 x i16> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+;
+; xor
+;
+
+define <4 x i32> @trunc_xor_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_xor_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: xorps %xmm3, %xmm1
+; SSE-NEXT: xorps %xmm2, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_xor_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_xor_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_xor_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_xor_v4i64_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = xor <4 x i64> %a0, %a1
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_xor_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_xor_v8i64_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_xor_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3],xmm0[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_xor_v8i64_v8i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_xor_v8i64_v8i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_xor_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = xor <8 x i64> %a0, %a1
+ %2 = trunc <8 x i64> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_xor_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
+; SSE-LABEL: trunc_xor_v8i32_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: pslld $16, %xmm0
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_xor_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_xor_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_xor_v8i32_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = xor <8 x i32> %a0, %a1
+ %2 = trunc <8 x i32> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_xor_v16i64_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm4
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm5
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm6
+; SSE-NEXT: pxor {{[0-9]+}}(%rsp), %xmm7
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: packuswb %xmm5, %xmm4
+; SSE-NEXT: packuswb %xmm6, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_xor_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vxorpd %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vxorpd %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vxorpd %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vxorpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm5 = mem[0,0]
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_xor_v16i64_v16i8:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpxor %ymm5, %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpxor %ymm4, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpxor %ymm7, %ymm3, %ymm3
+; AVX2-SLOW-NEXT: vpxor %ymm6, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_xor_v16i64_v16i8:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpxor %ymm5, %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpxor %ymm4, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpxor %ymm7, %ymm3, %ymm3
+; AVX2-FAST-NEXT: vpxor %ymm6, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_xor_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxorq %zmm3, %zmm1, %zmm1
+; AVX512-NEXT: vpxorq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = xor <16 x i64> %a0, %a1
+ %2 = trunc <16 x i64> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_xor_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
+; SSE-LABEL: trunc_xor_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_xor_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_xor_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_xor_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = xor <16 x i32> %a0, %a1
+ %2 = trunc <16 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
+; SSE-LABEL: trunc_xor_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_xor_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_xor_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_xor_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_xor_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_xor_v16i16_v16i8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = xor <16 x i16> %a0, %a1
+ %2 = trunc <16 x i16> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+;
+; xor to constant
+;
+
+define <4 x i32> @trunc_xor_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_xor_const_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: xorps {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_xor_const_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_xor_const_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_xor_const_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_xor_const_v4i64_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = xor <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_xor_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_xor_const_v8i64_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE-NEXT: xorpd {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_xor_const_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3],xmm0[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_xor_const_v8i64_v8i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_xor_const_v8i64_v8i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_xor_const_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = xor <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
+ %2 = trunc <8 x i64> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_xor_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
+; SSE-LABEL: trunc_xor_const_v8i32_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: pslld $16, %xmm0
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: pxor {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_xor_const_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_xor_const_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_xor_const_v8i32_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = xor <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = trunc <8 x i32> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_xor_const_v16i64_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: packuswb %xmm5, %xmm4
+; SSE-NEXT: packuswb %xmm6, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: pxor {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_xor_const_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm5 = mem[0,0]
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_xor_const_v16i64_v16i8:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-SLOW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_xor_const_v16i64_v16i8:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-FAST-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_xor_const_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = xor <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
+ %2 = trunc <16 x i64> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_xor_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
+; SSE-LABEL: trunc_xor_const_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: pxor {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_xor_const_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_xor_const_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_xor_const_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = xor <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = trunc <16 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
+; SSE-LABEL: trunc_xor_const_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: pxor {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_xor_const_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_xor_const_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_xor_const_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_xor_const_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_xor_const_v16i16_v16i8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = xor <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %2 = trunc <16 x i16> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+;
+; or
+;
+
+define <4 x i32> @trunc_or_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_or_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: orps %xmm3, %xmm1
+; SSE-NEXT: orps %xmm2, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_or_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_or_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_or_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_or_v4i64_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = or <4 x i64> %a0, %a1
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_or_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_or_v8i64_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: por %xmm6, %xmm2
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: por %xmm4, %xmm0
+; SSE-NEXT: por %xmm5, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_or_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3],xmm0[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_or_v8i64_v8i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpor %ymm3, %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_or_v8i64_v8i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpor %ymm3, %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_or_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = or <8 x i64> %a0, %a1
+ %2 = trunc <8 x i64> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_or_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
+; SSE-LABEL: trunc_or_v8i32_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: por %xmm2, %xmm0
+; SSE-NEXT: por %xmm3, %xmm1
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: pslld $16, %xmm0
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_or_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_or_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_or_v8i32_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = or <8 x i32> %a0, %a1
+ %2 = trunc <8 x i32> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind {
+; SSE-LABEL: trunc_or_v16i64_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm4
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm5
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm6
+; SSE-NEXT: por {{[0-9]+}}(%rsp), %xmm7
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: packuswb %xmm5, %xmm4
+; SSE-NEXT: packuswb %xmm6, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_or_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorpd %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vorpd %ymm5, %ymm1, %ymm1
+; AVX1-NEXT: vorpd %ymm6, %ymm2, %ymm2
+; AVX1-NEXT: vorpd %ymm7, %ymm3, %ymm3
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm5 = mem[0,0]
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_or_v16i64_v16i8:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpor %ymm5, %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpor %ymm4, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpor %ymm7, %ymm3, %ymm3
+; AVX2-SLOW-NEXT: vpor %ymm6, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_or_v16i64_v16i8:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpor %ymm5, %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpor %ymm4, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpor %ymm7, %ymm3, %ymm3
+; AVX2-FAST-NEXT: vpor %ymm6, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_or_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vporq %zmm3, %zmm1, %zmm1
+; AVX512-NEXT: vporq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = or <16 x i64> %a0, %a1
+ %2 = trunc <16 x i64> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_or_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwind {
+; SSE-LABEL: trunc_or_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: por %xmm4, %xmm0
+; SSE-NEXT: por %xmm5, %xmm1
+; SSE-NEXT: por %xmm6, %xmm2
+; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_or_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_or_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_or_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = or <16 x i32> %a0, %a1
+ %2 = trunc <16 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind {
+; SSE-LABEL: trunc_or_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: por %xmm2, %xmm0
+; SSE-NEXT: por %xmm3, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_or_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_or_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_or_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_or_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_or_v16i16_v16i8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = or <16 x i16> %a0, %a1
+ %2 = trunc <16 x i16> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+;
+; or to constant
+;
+
+define <4 x i32> @trunc_or_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_or_const_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: orps {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_or_const_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vorps {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_or_const_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vorps {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_or_const_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: vorps {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_or_const_v4i64_v4i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = or <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
+ %2 = trunc <4 x i64> %1 to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_or_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_or_const_v8i64_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE-NEXT: orpd {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_or_const_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3],xmm0[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_or_const_v8i64_v8i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_or_const_v8i64_v8i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_or_const_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = or <8 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>
+ %2 = trunc <8 x i64> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_or_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
+; SSE-LABEL: trunc_or_const_v8i32_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: pslld $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: pslld $16, %xmm0
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: por {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_or_const_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_or_const_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_or_const_v8i32_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = or <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %2 = trunc <8 x i32> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
+; SSE-LABEL: trunc_or_const_v16i64_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE-NEXT: pand %xmm8, %xmm7
+; SSE-NEXT: pand %xmm8, %xmm6
+; SSE-NEXT: packuswb %xmm7, %xmm6
+; SSE-NEXT: pand %xmm8, %xmm5
+; SSE-NEXT: pand %xmm8, %xmm4
+; SSE-NEXT: packuswb %xmm5, %xmm4
+; SSE-NEXT: packuswb %xmm6, %xmm4
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm4, %xmm0
+; SSE-NEXT: por {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_or_const_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm5 = mem[0,0]
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandpd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandpd %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_or_const_v16i64_v16i8:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-SLOW-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_or_const_v16i64_v16i8:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm4, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb %ymm3, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-FAST-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_or_const_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = or <16 x i64> %a0, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
+ %2 = trunc <16 x i64> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_or_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
+; SSE-LABEL: trunc_or_const_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: por {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_or_const_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_or_const_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_or_const_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = or <16 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = trunc <16 x i32> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
+; SSE-LABEL: trunc_or_const_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: por {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_or_const_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_or_const_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_or_const_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_or_const_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQ-LABEL: trunc_or_const_v16i16_v16i8:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vzeroupper
+; AVX512DQ-NEXT: retq
+ %1 = or <16 x i16> %a0, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
+ %2 = trunc <16 x i16> %1 to <16 x i8>
+ ret <16 x i8> %2
+}
+
+;
+; complex patterns - often created by vectorizer
+;
+
+define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
+; SSE-LABEL: mul_add_const_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
+; SSE-NEXT: pmuludq %xmm1, %xmm3
+; SSE-NEXT: pxor %xmm0, %xmm0
+; SSE-NEXT: pmuludq %xmm0, %xmm1
+; SSE-NEXT: psllq $32, %xmm1
+; SSE-NEXT: paddq %xmm3, %xmm1
+; SSE-NEXT: pmuludq %xmm4, %xmm2
+; SSE-NEXT: pmuludq %xmm4, %xmm0
+; SSE-NEXT: psllq $32, %xmm0
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: mul_add_const_v4i64_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = sext <4 x i32> %a0 to <4 x i64>
+ %2 = sext <4 x i32> %a1 to <4 x i64>
+ %3 = mul <4 x i64> %1, %2
+ %4 = add <4 x i64> %3, <i64 -3, i64 -1, i64 1, i64 3>
+ %5 = trunc <4 x i64> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
+
+define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
+; SSE-LABEL: mul_add_self_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: psrad $31, %xmm3
+; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: psrad $31, %xmm6
+; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
+; SSE-NEXT: movdqa %xmm4, %xmm5
+; SSE-NEXT: psrad $31, %xmm5
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE-NEXT: movdqa %xmm1, %xmm7
+; SSE-NEXT: psrad $31, %xmm7
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
+; SSE-NEXT: pxor %xmm8, %xmm8
+; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
+; SSE-NEXT: pmuludq %xmm1, %xmm6
+; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
+; SSE-NEXT: pmuludq %xmm0, %xmm7
+; SSE-NEXT: paddq %xmm6, %xmm7
+; SSE-NEXT: psllq $32, %xmm7
+; SSE-NEXT: pmuludq %xmm0, %xmm1
+; SSE-NEXT: paddq %xmm7, %xmm1
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
+; SSE-NEXT: pmuludq %xmm4, %xmm3
+; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
+; SSE-NEXT: pmuludq %xmm2, %xmm5
+; SSE-NEXT: paddq %xmm3, %xmm5
+; SSE-NEXT: psllq $32, %xmm5
+; SSE-NEXT: pmuludq %xmm2, %xmm4
+; SSE-NEXT: paddq %xmm5, %xmm4
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
+; SSE-NEXT: paddd %xmm1, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: mul_add_self_v4i64_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = sext <4 x i32> %a0 to <4 x i64>
+ %2 = sext <4 x i32> %a1 to <4 x i64>
+ %3 = mul <4 x i64> %1, %2
+ %4 = add <4 x i64> %3, %3
+ %5 = trunc <4 x i64> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
+
+define <4 x i32> @mul_add_multiuse_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
+; SSE-LABEL: mul_add_multiuse_v4i64_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,1,1,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
+; SSE-NEXT: pmuludq %xmm1, %xmm3
+; SSE-NEXT: pxor %xmm5, %xmm5
+; SSE-NEXT: pmuludq %xmm5, %xmm1
+; SSE-NEXT: psllq $32, %xmm1
+; SSE-NEXT: paddq %xmm3, %xmm1
+; SSE-NEXT: pmuludq %xmm4, %xmm2
+; SSE-NEXT: pmuludq %xmm4, %xmm5
+; SSE-NEXT: psllq $32, %xmm5
+; SSE-NEXT: paddq %xmm2, %xmm5
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm1[0,2]
+; SSE-NEXT: paddd %xmm5, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: mul_add_multiuse_v4i64_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %1 = sext <4 x i32> %a0 to <4 x i64>
+ %2 = sext <4 x i32> %a1 to <4 x i64>
+ %3 = mul <4 x i64> %1, %2
+ %4 = add <4 x i64> %1, %3
+ %5 = trunc <4 x i64> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
Added: llvm/trunk/test/CodeGen/X86/vector-trunc-packus-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-packus-widen.ll?rev=346745&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-packus-widen.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-packus-widen.ll Mon Nov 12 23:47:52 2018
@@ -0,0 +1,3306 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-SLOW
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
+
+;
+; PACKUS saturation truncation to vXi32
+;
+
+define <4 x i32> @trunc_packus_v4i64_v4i32(<4 x i64> %a0) {
+; SSE2-LABEL: trunc_packus_v4i64_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [4294967295,4294967295]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483647,2147483647]
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm5, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm3
+; SSE2-NEXT: por %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm5, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm5, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm4
+; SSE2-NEXT: por %xmm1, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_packus_v4i64_v4i32:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [4294967295,4294967295]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: pxor %xmm2, %xmm3
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147483647,2147483647]
+; SSSE3-NEXT: movdqa %xmm5, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm5, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm3
+; SSSE3-NEXT: por %xmm0, %xmm3
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pxor %xmm2, %xmm0
+; SSSE3-NEXT: movdqa %xmm5, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm5, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm0, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm4
+; SSSE3-NEXT: por %xmm1, %xmm4
+; SSSE3-NEXT: movdqa %xmm4, %xmm0
+; SSSE3-NEXT: pxor %xmm2, %xmm0
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm2, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm5, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: por %xmm0, %xmm1
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm3, %xmm0
+; SSSE3-NEXT: pxor %xmm2, %xmm0
+; SSSE3-NEXT: movdqa %xmm0, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm2, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm5, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm2, %xmm0
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_packus_v4i64_v4i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: movapd {{.*#+}} xmm4 = [4294967295,4294967295]
+; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
+; SSE41-NEXT: pxor %xmm8, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [2147483647,2147483647]
+; SSE41-NEXT: movdqa %xmm6, %xmm5
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm7, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT: por %xmm3, %xmm0
+; SSE41-NEXT: movapd %xmm4, %xmm5
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm8, %xmm0
+; SSE41-NEXT: movdqa %xmm6, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm6, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm4
+; SSE41-NEXT: xorpd %xmm1, %xmm1
+; SSE41-NEXT: movapd %xmm4, %xmm0
+; SSE41-NEXT: xorpd %xmm8, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
+; SSE41-NEXT: movapd %xmm5, %xmm0
+; SSE41-NEXT: xorpd %xmm8, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm1
+; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE41-NEXT: movaps %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_packus_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [4294967295,4294967295]
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_packus_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
+; AVX2-SLOW-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm1
+; AVX2-SLOW-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_packus_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
+; AVX2-FAST-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
+; AVX2-FAST-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-FAST-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm1
+; AVX2-FAST-NEXT: vpand %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512F-LABEL: trunc_packus_v4i64_v4i32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
+; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_packus_v4i64_v4i32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovusqd %ymm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_packus_v4i64_v4i32:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
+; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_packus_v4i64_v4i32:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpmaxsq %ymm1, %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmovusqd %ymm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <4 x i64> %a0, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+ %2 = select <4 x i1> %1, <4 x i64> %a0, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+ %3 = icmp sgt <4 x i64> %2, zeroinitializer
+ %4 = select <4 x i1> %3, <4 x i64> %2, <4 x i64> zeroinitializer
+ %5 = trunc <4 x i64> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
+
+
+define <8 x i32> @trunc_packus_v8i64_v8i32(<8 x i64> %a0) {
+; SSE2-LABEL: trunc_packus_v8i64_v8i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [4294967295,4294967295]
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: pxor %xmm10, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483647,2147483647]
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm5
+; SSE2-NEXT: por %xmm0, %xmm5
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm10, %xmm0
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm0
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm10, %xmm1
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm6
+; SSE2-NEXT: pand %xmm6, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm6
+; SSE2-NEXT: por %xmm2, %xmm6
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm10, %xmm1
+; SSE2-NEXT: movdqa %xmm9, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm10, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm6, %xmm1
+; SSE2-NEXT: pxor %xmm10, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm10, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm5, %xmm0
+; SSE2-NEXT: pxor %xmm10, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm0
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_packus_v8i64_v8i32:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [4294967295,4294967295]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm0, %xmm5
+; SSSE3-NEXT: pxor %xmm10, %xmm5
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [2147483647,2147483647]
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm5
+; SSSE3-NEXT: por %xmm0, %xmm5
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pxor %xmm10, %xmm0
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm0
+; SSSE3-NEXT: pand %xmm0, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm0
+; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm10, %xmm1
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm6
+; SSSE3-NEXT: pand %xmm6, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm6
+; SSSE3-NEXT: por %xmm2, %xmm6
+; SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSSE3-NEXT: pxor %xmm10, %xmm1
+; SSSE3-NEXT: movdqa %xmm9, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm3, %xmm2
+; SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm10, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm3
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: movdqa %xmm6, %xmm1
+; SSSE3-NEXT: pxor %xmm10, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm1
+; SSSE3-NEXT: pand %xmm6, %xmm1
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSSE3-NEXT: pxor %xmm10, %xmm2
+; SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: pand %xmm0, %xmm3
+; SSSE3-NEXT: movdqa %xmm5, %xmm0
+; SSSE3-NEXT: pxor %xmm10, %xmm0
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm0
+; SSSE3-NEXT: pand %xmm5, %xmm0
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_packus_v8i64_v8i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: movapd {{.*#+}} xmm7 = [4294967295,4294967295]
+; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm11 = [2147483647,2147483647]
+; SSE41-NEXT: movdqa %xmm11, %xmm6
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm8, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm8
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm8
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm9
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm9
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm4
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm2, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm7
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: movapd %xmm7, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm3
+; SSE41-NEXT: movapd %xmm4, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm1
+; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; SSE41-NEXT: movapd %xmm9, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm9, %xmm3
+; SSE41-NEXT: movapd %xmm8, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm2
+; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
+; SSE41-NEXT: movaps %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_packus_v8i64_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [4294967295,4294967295]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm1, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm7, %xmm2
+; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm2
+; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_packus_v8i64_v8i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
+; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
+; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm3
+; AVX2-SLOW-NEXT: vpand %ymm1, %ymm3, %ymm1
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm2
+; AVX2-SLOW-NEXT: vpand %ymm0, %ymm2, %ymm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_packus_v8i64_v8i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
+; AVX2-FAST-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
+; AVX2-FAST-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
+; AVX2-FAST-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm3
+; AVX2-FAST-NEXT: vpand %ymm1, %ymm3, %ymm1
+; AVX2-FAST-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT: vpand %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_packus_v8i64_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovusqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %1 = icmp slt <8 x i64> %a0, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+ %2 = select <8 x i1> %1, <8 x i64> %a0, <8 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+ %3 = icmp sgt <8 x i64> %2, zeroinitializer
+ %4 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> zeroinitializer
+ %5 = trunc <8 x i64> %4 to <8 x i32>
+ ret <8 x i32> %5
+}
+
+;
+; PACKUS saturation truncation to vXi16
+;
+
+define <8 x i16> @trunc_packus_v8i64_v8i16(<8 x i64> %a0) {
+; SSE2-LABEL: trunc_packus_v8i64_v8i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535]
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: pxor %xmm10, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147549183,2147549183]
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm5
+; SSE2-NEXT: por %xmm1, %xmm5
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm10, %xmm1
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm10, %xmm0
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm6
+; SSE2-NEXT: pand %xmm6, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm6
+; SSE2-NEXT: por %xmm3, %xmm6
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm10, %xmm0
+; SSE2-NEXT: movdqa %xmm9, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm10, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: movdqa %xmm6, %xmm2
+; SSE2-NEXT: pxor %xmm10, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: pand %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm10, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: pand %xmm1, %xmm4
+; SSE2-NEXT: movdqa %xmm5, %xmm1
+; SSE2-NEXT: pxor %xmm10, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_packus_v8i64_v8i16:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm1, %xmm5
+; SSSE3-NEXT: pxor %xmm10, %xmm5
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [2147549183,2147549183]
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm5
+; SSSE3-NEXT: por %xmm1, %xmm5
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm10, %xmm1
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm0, %xmm1
+; SSSE3-NEXT: movdqa %xmm3, %xmm0
+; SSSE3-NEXT: pxor %xmm10, %xmm0
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm0, %xmm6
+; SSSE3-NEXT: pand %xmm6, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm6
+; SSSE3-NEXT: por %xmm3, %xmm6
+; SSSE3-NEXT: movdqa %xmm2, %xmm0
+; SSSE3-NEXT: pxor %xmm10, %xmm0
+; SSSE3-NEXT: movdqa %xmm9, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm0, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm3
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: movdqa %xmm3, %xmm0
+; SSSE3-NEXT: pxor %xmm10, %xmm0
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm0
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: movdqa %xmm6, %xmm2
+; SSSE3-NEXT: pxor %xmm10, %xmm2
+; SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm2
+; SSSE3-NEXT: pand %xmm6, %xmm2
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: pxor %xmm10, %xmm3
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm3, %xmm4
+; SSSE3-NEXT: pand %xmm1, %xmm4
+; SSSE3-NEXT: movdqa %xmm5, %xmm1
+; SSSE3-NEXT: pxor %xmm10, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm3
+; SSSE3-NEXT: pand %xmm5, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_packus_v8i64_v8i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm9
+; SSE41-NEXT: movapd {{.*#+}} xmm7 = [65535,65535]
+; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm11 = [2147549183,2147549183]
+; SSE41-NEXT: movdqa %xmm11, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm8
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm8
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm9, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm9, %xmm4
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm7
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: movapd %xmm7, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm5
+; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm5
+; SSE41-NEXT: movapd %xmm4, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm1
+; SSE41-NEXT: packusdw %xmm5, %xmm1
+; SSE41-NEXT: movapd %xmm2, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm4
+; SSE41-NEXT: movapd %xmm8, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm3
+; SSE41-NEXT: packusdw %xmm4, %xmm3
+; SSE41-NEXT: packusdw %xmm3, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_packus_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [65535,65535,65535,65535]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [65535,65535]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm1, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm7, %xmm2
+; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm2
+; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_packus_v8i64_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [65535,65535,65535,65535]
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm3
+; AVX2-NEXT: vpand %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm2
+; AVX2-NEXT: vpand %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_packus_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovusqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <8 x i64> %a0, <i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535>
+ %2 = select <8 x i1> %1, <8 x i64> %a0, <8 x i64> <i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535>
+ %3 = icmp sgt <8 x i64> %2, zeroinitializer
+ %4 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> zeroinitializer
+ %5 = trunc <8 x i64> %4 to <8 x i16>
+ ret <8 x i16> %5
+}
+
+define <8 x i16> @trunc_packus_v8i32_v8i16(<8 x i32> %a0) {
+; SSE2-LABEL: trunc_packus_v8i32_v8i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pandn %xmm2, %xmm3
+; SSE2-NEXT: por %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pandn %xmm2, %xmm1
+; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: pslld $16, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_packus_v8i32_v8i16:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
+; SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm1
+; SSSE3-NEXT: pandn %xmm2, %xmm3
+; SSSE3-NEXT: por %xmm1, %xmm3
+; SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pandn %xmm2, %xmm1
+; SSSE3-NEXT: por %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm0
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm1
+; SSSE3-NEXT: pand %xmm3, %xmm1
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSSE3-NEXT: pshufb %xmm2, %xmm1
+; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_packus_v8i32_v8i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_packus_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_packus_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_packus_v8i32_v8i16:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-NEXT: vpminsd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_packus_v8i32_v8i16:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovusdw %ymm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_packus_v8i32_v8i16:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512BW-NEXT: vpminsd %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_packus_v8i32_v8i16:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmovusdw %ymm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <8 x i32> %a0, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+ %2 = select <8 x i1> %1, <8 x i32> %a0, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+ %3 = icmp sgt <8 x i32> %2, zeroinitializer
+ %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
+ %5 = trunc <8 x i32> %4 to <8 x i16>
+ ret <8 x i16> %5
+}
+
+define <16 x i16> @trunc_packus_v16i32_v16i16(<16 x i32> %a0) {
+; SSE2-LABEL: trunc_packus_v16i32_v16i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535]
+; SSE2-NEXT: movdqa %xmm6, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pandn %xmm6, %xmm4
+; SSE2-NEXT: por %xmm1, %xmm4
+; SSE2-NEXT: movdqa %xmm6, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: pandn %xmm6, %xmm5
+; SSE2-NEXT: por %xmm0, %xmm5
+; SSE2-NEXT: movdqa %xmm6, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm3
+; SSE2-NEXT: pandn %xmm6, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: movdqa %xmm6, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm6, %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm5, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm5
+; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: pslld $16, %xmm5
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: pslld $16, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm5, %xmm0
+; SSE2-NEXT: pslld $16, %xmm3
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm3, %xmm1
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_packus_v16i32_v16i16:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535]
+; SSSE3-NEXT: movdqa %xmm6, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pandn %xmm6, %xmm4
+; SSSE3-NEXT: por %xmm1, %xmm4
+; SSSE3-NEXT: movdqa %xmm6, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm0
+; SSSE3-NEXT: pandn %xmm6, %xmm5
+; SSSE3-NEXT: por %xmm0, %xmm5
+; SSSE3-NEXT: movdqa %xmm6, %xmm0
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm0
+; SSSE3-NEXT: pand %xmm0, %xmm3
+; SSSE3-NEXT: pandn %xmm6, %xmm0
+; SSSE3-NEXT: por %xmm3, %xmm0
+; SSSE3-NEXT: movdqa %xmm6, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm2
+; SSSE3-NEXT: pandn %xmm6, %xmm3
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm1
+; SSSE3-NEXT: pand %xmm3, %xmm1
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm3
+; SSSE3-NEXT: pand %xmm0, %xmm3
+; SSSE3-NEXT: movdqa %xmm5, %xmm0
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm0
+; SSSE3-NEXT: pand %xmm5, %xmm0
+; SSSE3-NEXT: movdqa %xmm4, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm5
+; SSSE3-NEXT: pand %xmm4, %xmm5
+; SSSE3-NEXT: pslld $16, %xmm5
+; SSSE3-NEXT: psrad $16, %xmm5
+; SSSE3-NEXT: pslld $16, %xmm0
+; SSSE3-NEXT: psrad $16, %xmm0
+; SSSE3-NEXT: packssdw %xmm5, %xmm0
+; SSSE3-NEXT: pslld $16, %xmm3
+; SSSE3-NEXT: psrad $16, %xmm3
+; SSSE3-NEXT: pslld $16, %xmm1
+; SSSE3-NEXT: psrad $16, %xmm1
+; SSSE3-NEXT: packssdw %xmm3, %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_packus_v16i32_v16i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm1
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_packus_v16i32_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_packus_v16i32_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_packus_v16i32_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovusdw %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %1 = icmp slt <16 x i32> %a0, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+ %2 = select <16 x i1> %1, <16 x i32> %a0, <16 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+ %3 = icmp sgt <16 x i32> %2, zeroinitializer
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
+ %5 = trunc <16 x i32> %4 to <16 x i16>
+ ret <16 x i16> %5
+}
+
+;
+; PACKUS saturation truncation to v16i8
+;
+
+define <8 x i8> @trunc_packus_v8i64_v8i8(<8 x i64> %a0) {
+; SSE2-LABEL: trunc_packus_v8i64_v8i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255]
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm10, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483903,2147483903]
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm11
+; SSE2-NEXT: pand %xmm11, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm11
+; SSE2-NEXT: por %xmm3, %xmm11
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm10, %xmm3
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm10, %xmm2
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm10, %xmm1
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm10, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm10, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm5[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm7
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm10, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm5[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm9, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm4
+; SSE2-NEXT: movdqa %xmm11, %xmm5
+; SSE2-NEXT: pxor %xmm10, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm9, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm6
+; SSE2-NEXT: pand %xmm8, %xmm6
+; SSE2-NEXT: pand %xmm11, %xmm6
+; SSE2-NEXT: pand %xmm8, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: packuswb %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm8, %xmm7
+; SSE2-NEXT: pand %xmm2, %xmm7
+; SSE2-NEXT: pand %xmm8, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm7, %xmm0
+; SSE2-NEXT: packuswb %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_packus_v8i64_v8i8:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [255,255]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pxor %xmm10, %xmm4
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [2147483903,2147483903]
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm11 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm11
+; SSSE3-NEXT: pand %xmm11, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm11
+; SSSE3-NEXT: por %xmm3, %xmm11
+; SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSSE3-NEXT: pxor %xmm10, %xmm3
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm5, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm3
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pxor %xmm10, %xmm2
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm5, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm10, %xmm1
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm5, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm0, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pxor %xmm10, %xmm0
+; SSSE3-NEXT: movdqa %xmm0, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm5, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm0
+; SSSE3-NEXT: movdqa %xmm2, %xmm4
+; SSSE3-NEXT: pxor %xmm10, %xmm4
+; SSSE3-NEXT: movdqa %xmm4, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm5[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm7
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pxor %xmm10, %xmm4
+; SSSE3-NEXT: movdqa %xmm4, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm5[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm9, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm4
+; SSSE3-NEXT: movdqa %xmm11, %xmm5
+; SSSE3-NEXT: pxor %xmm10, %xmm5
+; SSSE3-NEXT: movdqa %xmm5, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSSE3-NEXT: pand %xmm9, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm5, %xmm6
+; SSSE3-NEXT: pand %xmm8, %xmm6
+; SSSE3-NEXT: pand %xmm11, %xmm6
+; SSSE3-NEXT: pand %xmm8, %xmm4
+; SSSE3-NEXT: pand %xmm3, %xmm4
+; SSSE3-NEXT: packuswb %xmm6, %xmm4
+; SSSE3-NEXT: pand %xmm8, %xmm7
+; SSSE3-NEXT: pand %xmm2, %xmm7
+; SSSE3-NEXT: pand %xmm8, %xmm0
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: packuswb %xmm7, %xmm0
+; SSSE3-NEXT: packuswb %xmm4, %xmm0
+; SSSE3-NEXT: packuswb %xmm0, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_packus_v8i64_v8i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm9
+; SSE41-NEXT: movapd {{.*#+}} xmm8 = [255,255]
+; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483903,2147483903]
+; SSE41-NEXT: movdqa %xmm5, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm7, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm11
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm11
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm5, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm5, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm4
+; SSE41-NEXT: movdqa %xmm9, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm5, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm2, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm5
+; SSE41-NEXT: blendvpd %xmm0, %xmm9, %xmm5
+; SSE41-NEXT: movapd %xmm5, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm2, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm1
+; SSE41-NEXT: movapd %xmm4, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm5
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm5
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm5
+; SSE41-NEXT: movapd %xmm3, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm4
+; SSE41-NEXT: movapd %xmm11, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm11, %xmm2
+; SSE41-NEXT: andpd %xmm8, %xmm2
+; SSE41-NEXT: andpd %xmm8, %xmm4
+; SSE41-NEXT: packusdw %xmm2, %xmm4
+; SSE41-NEXT: andpd %xmm8, %xmm5
+; SSE41-NEXT: andpd %xmm8, %xmm1
+; SSE41-NEXT: packusdw %xmm5, %xmm1
+; SSE41-NEXT: packusdw %xmm4, %xmm1
+; SSE41-NEXT: packuswb %xmm1, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_packus_v8i64_v8i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [255,255,255,255]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm1, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm7, %xmm2
+; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm3 = mem[0,0]
+; AVX1-NEXT: vpand %xmm3, %xmm7, %xmm7
+; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm2
+; AVX1-NEXT: vpand %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm0, %xmm8, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_packus_v8i64_v8i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [255,255,255,255]
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm3
+; AVX2-NEXT: vpand %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm2
+; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_packus_v8i64_v8i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpminsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512F-NEXT: vmovq %xmm0, %rcx
+; AVX512F-NEXT: vmovd %ecx, %xmm1
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512F-NEXT: vmovq %xmm2, %rax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm2
+; AVX512F-NEXT: vmovq %xmm2, %rax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vextracti32x4 $3, %zmm0, %xmm0
+; AVX512F-NEXT: vmovq %xmm0, %rax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm1, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_packus_v8i64_v8i8:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpminsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512VL-NEXT: vmovq %xmm0, %rcx
+; AVX512VL-NEXT: vmovd %ecx, %xmm1
+; AVX512VL-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512VL-NEXT: vmovq %xmm2, %rax
+; AVX512VL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512VL-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vextracti32x4 $2, %zmm0, %xmm2
+; AVX512VL-NEXT: vmovq %xmm2, %rax
+; AVX512VL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512VL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vextracti32x4 $3, %zmm0, %xmm0
+; AVX512VL-NEXT: vmovq %xmm0, %rax
+; AVX512VL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512VL-NEXT: vpinsrb $7, %eax, %xmm1, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_packus_v8i64_v8i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpminsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX512BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_packus_v8i64_v8i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpminsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512BWVL-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BWVL-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512BWVL-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BWVL-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512BWVL-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX512BWVL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX512BWVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <8 x i64> %a0, <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %2 = select <8 x i1> %1, <8 x i64> %a0, <8 x i64> <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %3 = icmp sgt <8 x i64> %2, zeroinitializer
+ %4 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> zeroinitializer
+ %5 = trunc <8 x i64> %4 to <8 x i8>
+ ret <8 x i8> %5
+}
+
+define void @trunc_packus_v8i64_v8i8_store(<8 x i64> %a0, <8 x i8> *%p1) {
+; SSE2-LABEL: trunc_packus_v8i64_v8i8_store:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255]
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm10, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483903,2147483903]
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm11
+; SSE2-NEXT: pand %xmm11, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm11
+; SSE2-NEXT: por %xmm3, %xmm11
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm10, %xmm3
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm10, %xmm2
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm10, %xmm1
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm10, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm10, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm5[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm7
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm10, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm5[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm9, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm4
+; SSE2-NEXT: movdqa %xmm11, %xmm5
+; SSE2-NEXT: pxor %xmm10, %xmm5
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm10, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm9, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm6
+; SSE2-NEXT: pand %xmm8, %xmm6
+; SSE2-NEXT: pand %xmm11, %xmm6
+; SSE2-NEXT: pand %xmm8, %xmm4
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: packuswb %xmm6, %xmm4
+; SSE2-NEXT: pand %xmm8, %xmm7
+; SSE2-NEXT: pand %xmm2, %xmm7
+; SSE2-NEXT: pand %xmm8, %xmm0
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm7, %xmm0
+; SSE2-NEXT: packuswb %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: movq %xmm0, (%rdi)
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_packus_v8i64_v8i8_store:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [255,255]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pxor %xmm10, %xmm4
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [2147483903,2147483903]
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm11 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm11
+; SSSE3-NEXT: pand %xmm11, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm11
+; SSSE3-NEXT: por %xmm3, %xmm11
+; SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSSE3-NEXT: pxor %xmm10, %xmm3
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm5, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm3
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pxor %xmm10, %xmm2
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm5, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm10, %xmm1
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm5, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm0, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pxor %xmm10, %xmm0
+; SSSE3-NEXT: movdqa %xmm0, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm5, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm0
+; SSSE3-NEXT: movdqa %xmm2, %xmm4
+; SSSE3-NEXT: pxor %xmm10, %xmm4
+; SSSE3-NEXT: movdqa %xmm4, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm5[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm7
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pxor %xmm10, %xmm4
+; SSSE3-NEXT: movdqa %xmm4, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm5[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm9, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm4
+; SSSE3-NEXT: movdqa %xmm11, %xmm5
+; SSSE3-NEXT: pxor %xmm10, %xmm5
+; SSSE3-NEXT: movdqa %xmm5, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm10, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSSE3-NEXT: pand %xmm9, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm5, %xmm6
+; SSSE3-NEXT: pand %xmm8, %xmm6
+; SSSE3-NEXT: pand %xmm11, %xmm6
+; SSSE3-NEXT: pand %xmm8, %xmm4
+; SSSE3-NEXT: pand %xmm3, %xmm4
+; SSSE3-NEXT: packuswb %xmm6, %xmm4
+; SSSE3-NEXT: pand %xmm8, %xmm7
+; SSSE3-NEXT: pand %xmm2, %xmm7
+; SSSE3-NEXT: pand %xmm8, %xmm0
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: packuswb %xmm7, %xmm0
+; SSSE3-NEXT: packuswb %xmm4, %xmm0
+; SSSE3-NEXT: packuswb %xmm0, %xmm0
+; SSSE3-NEXT: movq %xmm0, (%rdi)
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_packus_v8i64_v8i8_store:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm9
+; SSE41-NEXT: movapd {{.*#+}} xmm8 = [255,255]
+; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [2147483648,2147483648]
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483903,2147483903]
+; SSE41-NEXT: movdqa %xmm5, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm7, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm11
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm11
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm5, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm3
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm5, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm4
+; SSE41-NEXT: movdqa %xmm9, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm5, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm5, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm2, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm5
+; SSE41-NEXT: blendvpd %xmm0, %xmm9, %xmm5
+; SSE41-NEXT: movapd %xmm5, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm2, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm1
+; SSE41-NEXT: movapd %xmm4, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm5
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm5
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm5
+; SSE41-NEXT: movapd %xmm3, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm4
+; SSE41-NEXT: movapd %xmm11, %xmm0
+; SSE41-NEXT: xorpd %xmm10, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm11, %xmm2
+; SSE41-NEXT: andpd %xmm8, %xmm2
+; SSE41-NEXT: andpd %xmm8, %xmm4
+; SSE41-NEXT: packusdw %xmm2, %xmm4
+; SSE41-NEXT: andpd %xmm8, %xmm5
+; SSE41-NEXT: andpd %xmm8, %xmm1
+; SSE41-NEXT: packusdw %xmm5, %xmm1
+; SSE41-NEXT: packusdw %xmm4, %xmm1
+; SSE41-NEXT: packuswb %xmm1, %xmm1
+; SSE41-NEXT: movq %xmm1, (%rdi)
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_packus_v8i64_v8i8_store:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [255,255,255,255]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm1, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm7, %xmm2
+; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm3 = mem[0,0]
+; AVX1-NEXT: vpand %xmm3, %xmm7, %xmm7
+; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm2
+; AVX1-NEXT: vpand %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm0, %xmm8, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_packus_v8i64_v8i8_store:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [255,255,255,255]
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm3
+; AVX2-NEXT: vpand %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm2
+; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-NEXT: vmovq %xmm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_packus_v8i64_v8i8_store:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovusqb %zmm0, (%rdi)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <8 x i64> %a0, <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %2 = select <8 x i1> %1, <8 x i64> %a0, <8 x i64> <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %3 = icmp sgt <8 x i64> %2, zeroinitializer
+ %4 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> zeroinitializer
+ %5 = trunc <8 x i64> %4 to <8 x i8>
+ store <8 x i8> %5, <8 x i8> *%p1
+ ret void
+}
+
+define <16 x i8> @trunc_packus_v16i64_v16i8(<16 x i64> %a0) {
+; SSE2-LABEL: trunc_packus_v16i64_v16i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [255,255]
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm6, %xmm9
+; SSE2-NEXT: pxor %xmm8, %xmm9
+; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [2147483903,2147483903]
+; SSE2-NEXT: movdqa %xmm11, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm12
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm9
+; SSE2-NEXT: pshufd {{.*#+}} xmm14 = xmm9[1,1,3,3]
+; SSE2-NEXT: pand %xmm13, %xmm14
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm12[1,1,3,3]
+; SSE2-NEXT: por %xmm14, %xmm9
+; SSE2-NEXT: pand %xmm9, %xmm6
+; SSE2-NEXT: pandn %xmm10, %xmm9
+; SSE2-NEXT: por %xmm6, %xmm9
+; SSE2-NEXT: movdqa %xmm7, %xmm6
+; SSE2-NEXT: pxor %xmm8, %xmm6
+; SSE2-NEXT: movdqa %xmm11, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm12
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT: pand %xmm13, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm12[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm12
+; SSE2-NEXT: pand %xmm12, %xmm7
+; SSE2-NEXT: pandn %xmm10, %xmm12
+; SSE2-NEXT: por %xmm7, %xmm12
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: pxor %xmm8, %xmm6
+; SSE2-NEXT: movdqa %xmm11, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm7[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT: pand %xmm13, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm7[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm13
+; SSE2-NEXT: pand %xmm13, %xmm4
+; SSE2-NEXT: pandn %xmm10, %xmm13
+; SSE2-NEXT: por %xmm4, %xmm13
+; SSE2-NEXT: movdqa %xmm5, %xmm4
+; SSE2-NEXT: pxor %xmm8, %xmm4
+; SSE2-NEXT: movdqa %xmm11, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm14 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm14
+; SSE2-NEXT: pand %xmm14, %xmm5
+; SSE2-NEXT: pandn %xmm10, %xmm14
+; SSE2-NEXT: por %xmm5, %xmm14
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm8, %xmm4
+; SSE2-NEXT: movdqa %xmm11, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pandn %xmm10, %xmm5
+; SSE2-NEXT: por %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: pxor %xmm8, %xmm2
+; SSE2-NEXT: movdqa %xmm11, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm10, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm8, %xmm3
+; SSE2-NEXT: movdqa %xmm11, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: pandn %xmm10, %xmm3
+; SSE2-NEXT: por %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm8, %xmm0
+; SSE2-NEXT: movdqa %xmm11, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pandn %xmm10, %xmm4
+; SSE2-NEXT: por %xmm1, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: pxor %xmm8, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm8, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm8, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm5, %xmm1
+; SSE2-NEXT: pxor %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm8, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm14, %xmm1
+; SSE2-NEXT: pxor %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm8, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm14, %xmm2
+; SSE2-NEXT: movdqa %xmm13, %xmm1
+; SSE2-NEXT: pxor %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm8, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm13, %xmm1
+; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm12, %xmm2
+; SSE2-NEXT: pxor %xmm8, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm8, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm12, %xmm3
+; SSE2-NEXT: movdqa %xmm9, %xmm2
+; SSE2-NEXT: pxor %xmm8, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm8, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm8, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm9, %xmm4
+; SSE2-NEXT: packuswb %xmm3, %xmm4
+; SSE2-NEXT: packuswb %xmm4, %xmm1
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_packus_v16i64_v16i8:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm10 = [255,255]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm6, %xmm9
+; SSSE3-NEXT: pxor %xmm8, %xmm9
+; SSSE3-NEXT: movdqa {{.*#+}} xmm11 = [2147483903,2147483903]
+; SSSE3-NEXT: movdqa %xmm11, %xmm12
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm12
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm9
+; SSSE3-NEXT: pshufd {{.*#+}} xmm14 = xmm9[1,1,3,3]
+; SSSE3-NEXT: pand %xmm13, %xmm14
+; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm12[1,1,3,3]
+; SSSE3-NEXT: por %xmm14, %xmm9
+; SSSE3-NEXT: pand %xmm9, %xmm6
+; SSSE3-NEXT: pandn %xmm10, %xmm9
+; SSSE3-NEXT: por %xmm6, %xmm9
+; SSSE3-NEXT: movdqa %xmm7, %xmm6
+; SSSE3-NEXT: pxor %xmm8, %xmm6
+; SSSE3-NEXT: movdqa %xmm11, %xmm12
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm12
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSSE3-NEXT: pand %xmm13, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm12[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm12
+; SSSE3-NEXT: pand %xmm12, %xmm7
+; SSSE3-NEXT: pandn %xmm10, %xmm12
+; SSSE3-NEXT: por %xmm7, %xmm12
+; SSSE3-NEXT: movdqa %xmm4, %xmm6
+; SSSE3-NEXT: pxor %xmm8, %xmm6
+; SSSE3-NEXT: movdqa %xmm11, %xmm7
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm7[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSSE3-NEXT: pand %xmm13, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm7[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm13
+; SSSE3-NEXT: pand %xmm13, %xmm4
+; SSSE3-NEXT: pandn %xmm10, %xmm13
+; SSSE3-NEXT: por %xmm4, %xmm13
+; SSSE3-NEXT: movdqa %xmm5, %xmm4
+; SSSE3-NEXT: pxor %xmm8, %xmm4
+; SSSE3-NEXT: movdqa %xmm11, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm14 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm14
+; SSSE3-NEXT: pand %xmm14, %xmm5
+; SSSE3-NEXT: pandn %xmm10, %xmm14
+; SSSE3-NEXT: por %xmm5, %xmm14
+; SSSE3-NEXT: movdqa %xmm2, %xmm4
+; SSSE3-NEXT: pxor %xmm8, %xmm4
+; SSSE3-NEXT: movdqa %xmm11, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm2
+; SSSE3-NEXT: pandn %xmm10, %xmm5
+; SSSE3-NEXT: por %xmm2, %xmm5
+; SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSSE3-NEXT: pxor %xmm8, %xmm2
+; SSSE3-NEXT: movdqa %xmm11, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pandn %xmm10, %xmm2
+; SSSE3-NEXT: por %xmm3, %xmm2
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: pxor %xmm8, %xmm3
+; SSSE3-NEXT: movdqa %xmm11, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: pandn %xmm10, %xmm3
+; SSSE3-NEXT: por %xmm0, %xmm3
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pxor %xmm8, %xmm0
+; SSSE3-NEXT: movdqa %xmm11, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm0, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pandn %xmm10, %xmm4
+; SSSE3-NEXT: por %xmm1, %xmm4
+; SSSE3-NEXT: movdqa %xmm4, %xmm0
+; SSSE3-NEXT: pxor %xmm8, %xmm0
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm8, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm8, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: por %xmm0, %xmm1
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm3, %xmm0
+; SSSE3-NEXT: pxor %xmm8, %xmm0
+; SSSE3-NEXT: movdqa %xmm0, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm8, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm8, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm0
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: packuswb %xmm1, %xmm0
+; SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm8, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm8, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm8, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm3
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: movdqa %xmm5, %xmm1
+; SSSE3-NEXT: pxor %xmm8, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm8, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm8, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: pand %xmm5, %xmm2
+; SSSE3-NEXT: packuswb %xmm3, %xmm2
+; SSSE3-NEXT: packuswb %xmm2, %xmm0
+; SSSE3-NEXT: movdqa %xmm14, %xmm1
+; SSSE3-NEXT: pxor %xmm8, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm8, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm8, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm3, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: pand %xmm14, %xmm2
+; SSSE3-NEXT: movdqa %xmm13, %xmm1
+; SSSE3-NEXT: pxor %xmm8, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm8, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm8, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm5, %xmm1
+; SSSE3-NEXT: pand %xmm13, %xmm1
+; SSSE3-NEXT: packuswb %xmm2, %xmm1
+; SSSE3-NEXT: movdqa %xmm12, %xmm2
+; SSSE3-NEXT: pxor %xmm8, %xmm2
+; SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm8, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm8, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: pand %xmm12, %xmm3
+; SSSE3-NEXT: movdqa %xmm9, %xmm2
+; SSSE3-NEXT: pxor %xmm8, %xmm2
+; SSSE3-NEXT: movdqa %xmm2, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm8, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm8, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm5, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm2, %xmm4
+; SSSE3-NEXT: pand %xmm9, %xmm4
+; SSSE3-NEXT: packuswb %xmm3, %xmm4
+; SSSE3-NEXT: packuswb %xmm4, %xmm1
+; SSSE3-NEXT: packuswb %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_packus_v16i64_v16i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm8
+; SSE41-NEXT: movapd {{.*#+}} xmm11 = [255,255]
+; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,2147483648]
+; SSE41-NEXT: movdqa %xmm6, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm12 = [2147483903,2147483903]
+; SSE41-NEXT: movdqa %xmm12, %xmm10
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm10
+; SSE41-NEXT: pshufd {{.*#+}} xmm13 = xmm10[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm13, %xmm14
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
+; SSE41-NEXT: por %xmm14, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm10
+; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm10
+; SSE41-NEXT: movdqa %xmm7, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm6
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm13 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm13, %xmm14
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT: por %xmm14, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm13
+; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm13
+; SSE41-NEXT: movdqa %xmm4, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm6
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm14, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm14
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm14
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm15
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm15
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm5
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm5
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm6
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm6
+; SSE41-NEXT: movdqa %xmm8, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm3
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm7, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm11
+; SSE41-NEXT: xorpd %xmm8, %xmm8
+; SSE41-NEXT: movapd %xmm11, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm9, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm11, %xmm4
+; SSE41-NEXT: movapd %xmm3, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm9, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm7, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm1
+; SSE41-NEXT: packusdw %xmm4, %xmm1
+; SSE41-NEXT: movapd %xmm6, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm9, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm2
+; SSE41-NEXT: movapd %xmm5, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm9, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm3
+; SSE41-NEXT: packusdw %xmm2, %xmm3
+; SSE41-NEXT: packusdw %xmm3, %xmm1
+; SSE41-NEXT: movapd %xmm15, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm9, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm15, %xmm2
+; SSE41-NEXT: movapd %xmm14, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm9, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: pxor %xmm3, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm14, %xmm3
+; SSE41-NEXT: packusdw %xmm2, %xmm3
+; SSE41-NEXT: movapd %xmm13, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm9, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm13, %xmm2
+; SSE41-NEXT: movapd %xmm10, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm9, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm10, %xmm8
+; SSE41-NEXT: packusdw %xmm2, %xmm8
+; SSE41-NEXT: packusdw %xmm8, %xmm3
+; SSE41-NEXT: packuswb %xmm3, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_packus_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm4 = [255,255,255,255]
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [255,255]
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm6, %xmm7
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT: vblendvpd %ymm5, %ymm3, %ymm4, %ymm15
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm6, %xmm7
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT: vblendvpd %ymm5, %ymm2, %ymm4, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm6, %xmm7
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT: vblendvpd %ymm5, %ymm1, %ymm4, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm6, %xmm6
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
+; AVX1-NEXT: vblendvpd %ymm5, %ymm0, %ymm4, %ymm0
+; AVX1-NEXT: vxorpd %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm8
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm14
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm14, %xmm9
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm1, %xmm10
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm7, %xmm11
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm2, %xmm12
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm13
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm15, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm15, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm4
+; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpand %xmm15, %xmm6, %xmm4
+; AVX1-NEXT: vpackusdw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpand %xmm5, %xmm13, %xmm4
+; AVX1-NEXT: vpand %xmm2, %xmm12, %xmm2
+; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm7, %xmm11, %xmm3
+; AVX1-NEXT: vpand %xmm1, %xmm10, %xmm1
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm14, %xmm9, %xmm3
+; AVX1-NEXT: vpand %xmm0, %xmm8, %xmm0
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_packus_v16i64_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [255,255,255,255]
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm4, %ymm5
+; AVX2-NEXT: vblendvpd %ymm5, %ymm2, %ymm4, %ymm2
+; AVX2-NEXT: vpcmpgtq %ymm3, %ymm4, %ymm5
+; AVX2-NEXT: vblendvpd %ymm5, %ymm3, %ymm4, %ymm3
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm4, %ymm5
+; AVX2-NEXT: vblendvpd %ymm5, %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm4, %ymm5
+; AVX2-NEXT: vblendvpd %ymm5, %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm1, %ymm5
+; AVX2-NEXT: vpand %ymm1, %ymm5, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm0, %ymm5
+; AVX2-NEXT: vpand %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm3, %ymm1
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm2, %ymm3
+; AVX2-NEXT: vpand %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vpackusdw %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_packus_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255]
+; AVX512-NEXT: vpminsq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpminsq %zmm2, %zmm1, %zmm1
+; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpmaxsq %zmm2, %zmm1, %zmm1
+; AVX512-NEXT: vpmaxsq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <16 x i64> %a0, <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %2 = select <16 x i1> %1, <16 x i64> %a0, <16 x i64> <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %3 = icmp sgt <16 x i64> %2, zeroinitializer
+ %4 = select <16 x i1> %3, <16 x i64> %2, <16 x i64> zeroinitializer
+ %5 = trunc <16 x i64> %4 to <16 x i8>
+ ret <16 x i8> %5
+}
+
+define <8 x i8> @trunc_packus_v8i32_v8i8(<8 x i32> %a0) {
+; SSE-LABEL: trunc_packus_v8i32_v8i8:
+; SSE: # %bb.0:
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_packus_v8i32_v8i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_packus_v8i32_v8i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_packus_v8i32_v8i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
+; AVX512F-NEXT: vpminsd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_packus_v8i32_v8i8:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpminsd {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_packus_v8i32_v8i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT: vpminsd %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_packus_v8i32_v8i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpminsd {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <8 x i32> %a0, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %2 = select <8 x i1> %1, <8 x i32> %a0, <8 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %3 = icmp sgt <8 x i32> %2, zeroinitializer
+ %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
+ %5 = trunc <8 x i32> %4 to <8 x i8>
+ ret <8 x i8> %5
+}
+
+define void @trunc_packus_v8i32_v8i8_store(<8 x i32> %a0, <8 x i8> *%p1) {
+; SSE-LABEL: trunc_packus_v8i32_v8i8_store:
+; SSE: # %bb.0:
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm0, %xmm0
+; SSE-NEXT: movq %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_packus_v8i32_v8i8_store:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_packus_v8i32_v8i8_store:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_packus_v8i32_v8i8_store:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
+; AVX512F-NEXT: vpminsd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vmovq %xmm0, (%rdi)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_packus_v8i32_v8i8_store:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovusdb %ymm0, (%rdi)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_packus_v8i32_v8i8_store:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT: vpminsd %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vmovq %xmm0, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_packus_v8i32_v8i8_store:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmovusdb %ymm0, (%rdi)
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <8 x i32> %a0, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %2 = select <8 x i1> %1, <8 x i32> %a0, <8 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %3 = icmp sgt <8 x i32> %2, zeroinitializer
+ %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
+ %5 = trunc <8 x i32> %4 to <8 x i8>
+ store <8 x i8> %5, <8 x i8> *%p1
+ ret void
+}
+
+define <16 x i8> @trunc_packus_v16i32_v16i8(<16 x i32> %a0) {
+; SSE-LABEL: trunc_packus_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: packssdw %xmm3, %xmm2
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_packus_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_packus_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_packus_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpmaxsd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovusdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <16 x i32> %a0, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %2 = select <16 x i1> %1, <16 x i32> %a0, <16 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %3 = icmp sgt <16 x i32> %2, zeroinitializer
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
+ %5 = trunc <16 x i32> %4 to <16 x i8>
+ ret <16 x i8> %5
+}
+
+define <16 x i8> @trunc_packus_v16i16_v16i8(<16 x i16> %a0) {
+; SSE-LABEL: trunc_packus_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_packus_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_packus_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_packus_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_packus_v16i16_v16i8:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_packus_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_packus_v16i16_v16i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmovuswb %ymm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <16 x i16> %a0, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %2 = select <16 x i1> %1, <16 x i16> %a0, <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %3 = icmp sgt <16 x i16> %2, zeroinitializer
+ %4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> zeroinitializer
+ %5 = trunc <16 x i16> %4 to <16 x i8>
+ ret <16 x i8> %5
+}
+
+define <32 x i8> @trunc_packus_v32i16_v32i8(<32 x i16> %a0) {
+; SSE-LABEL: trunc_packus_v32i16_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_packus_v32i16_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_packus_v32i16_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_packus_v32i16_v32i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT: vpminsw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpminsw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT: vpmaxsw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpmaxsw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_packus_v32i16_v32i8:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-NEXT: vpminsw %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpminsw %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpmaxsw %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpmaxsw %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_packus_v32i16_v32i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT: vpmaxsw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovuswb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_packus_v32i16_v32i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpmaxsw %zmm1, %zmm0, %zmm0
+; AVX512BWVL-NEXT: vpmovuswb %zmm0, %ymm0
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <32 x i16> %a0, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %2 = select <32 x i1> %1, <32 x i16> %a0, <32 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %3 = icmp sgt <32 x i16> %2, zeroinitializer
+ %4 = select <32 x i1> %3, <32 x i16> %2, <32 x i16> zeroinitializer
+ %5 = trunc <32 x i16> %4 to <32 x i8>
+ ret <32 x i8> %5
+}
Added: llvm/trunk/test/CodeGen/X86/vector-trunc-ssat-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-ssat-widen.ll?rev=346745&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-ssat-widen.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-ssat-widen.ll Mon Nov 12 23:47:52 2018
@@ -0,0 +1,3243 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-SLOW
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
+
+;
+; Signed saturation truncation to vXi32
+;
+
+define <4 x i32> @trunc_ssat_v4i64_v4i32(<4 x i64> %a0) {
+; SSE2-LABEL: trunc_ssat_v4i64_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483647,2147483647]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [4294967295,4294967295]
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm5, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm3
+; SSE2-NEXT: por %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm5, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm5, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm4
+; SSE2-NEXT: por %xmm1, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [18446744071562067968,18446744071562067968]
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [18446744069414584320,18446744069414584320]
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm5, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm6
+; SSE2-NEXT: pand %xmm6, %xmm4
+; SSE2-NEXT: pandn %xmm1, %xmm6
+; SSE2-NEXT: por %xmm4, %xmm6
+; SSE2-NEXT: pxor %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm5, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm3
+; SSE2-NEXT: pandn %xmm1, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm6[0,2]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_ssat_v4i64_v4i32:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [2147483647,2147483647]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: pxor %xmm2, %xmm3
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [4294967295,4294967295]
+; SSSE3-NEXT: movdqa %xmm5, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm5, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm3
+; SSSE3-NEXT: por %xmm0, %xmm3
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pxor %xmm2, %xmm0
+; SSSE3-NEXT: movdqa %xmm5, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm5, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm0, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm4
+; SSSE3-NEXT: por %xmm1, %xmm4
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [18446744071562067968,18446744071562067968]
+; SSSE3-NEXT: movdqa %xmm4, %xmm0
+; SSSE3-NEXT: pxor %xmm2, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [18446744069414584320,18446744069414584320]
+; SSSE3-NEXT: movdqa %xmm0, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm5, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm0, %xmm6
+; SSSE3-NEXT: pand %xmm6, %xmm4
+; SSSE3-NEXT: pandn %xmm1, %xmm6
+; SSSE3-NEXT: por %xmm4, %xmm6
+; SSSE3-NEXT: pxor %xmm3, %xmm2
+; SSSE3-NEXT: movdqa %xmm2, %xmm0
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm5, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: por %xmm2, %xmm0
+; SSSE3-NEXT: pand %xmm0, %xmm3
+; SSSE3-NEXT: pandn %xmm1, %xmm0
+; SSSE3-NEXT: por %xmm3, %xmm0
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm6[0,2]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_ssat_v4i64_v4i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: movapd {{.*#+}} xmm4 = [2147483647,2147483647]
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648]
+; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [4294967295,4294967295]
+; SSE41-NEXT: movdqa %xmm8, %xmm5
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm5[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm7, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm4, %xmm5
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm3, %xmm0
+; SSE41-NEXT: movdqa %xmm8, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm4
+; SSE41-NEXT: movapd {{.*#+}} xmm1 = [18446744071562067968,18446744071562067968]
+; SSE41-NEXT: movapd %xmm4, %xmm0
+; SSE41-NEXT: xorpd %xmm3, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [18446744069414584320,18446744069414584320]
+; SSE41-NEXT: movapd %xmm0, %xmm6
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm7, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT: por %xmm2, %xmm0
+; SSE41-NEXT: movapd %xmm1, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
+; SSE41-NEXT: xorpd %xmm5, %xmm3
+; SSE41-NEXT: movapd %xmm3, %xmm0
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT: por %xmm3, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm1
+; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE41-NEXT: movaps %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_ssat_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [2147483647,2147483647]
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vmovapd {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [18446744071562067968,18446744071562067968]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm0, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_ssat_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647]
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
+; AVX2-SLOW-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
+; AVX2-SLOW-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_ssat_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647]
+; AVX2-FAST-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm2
+; AVX2-FAST-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
+; AVX2-FAST-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
+; AVX2-FAST-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512F-LABEL: trunc_ssat_v4i64_v4i32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647]
+; AVX512F-NEXT: vpminsq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
+; AVX512F-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_ssat_v4i64_v4i32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpmovsqd %ymm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_ssat_v4i64_v4i32:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2147483647,2147483647,2147483647,2147483647]
+; AVX512BW-NEXT: vpminsq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
+; AVX512BW-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_ssat_v4i64_v4i32:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpmovsqd %ymm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <4 x i64> %a0, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+ %2 = select <4 x i1> %1, <4 x i64> %a0, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+ %3 = icmp sgt <4 x i64> %2, <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
+ %4 = select <4 x i1> %3, <4 x i64> %2, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
+ %5 = trunc <4 x i64> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
+
+
+define <8 x i32> @trunc_ssat_v8i64_v8i32(<8 x i64> %a0) {
+; SSE2-LABEL: trunc_ssat_v8i64_v8i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483647,2147483647]
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: pxor %xmm4, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [4294967295,4294967295]
+; SSE2-NEXT: movdqa %xmm9, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm5
+; SSE2-NEXT: por %xmm0, %xmm5
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm0
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm7
+; SSE2-NEXT: pand %xmm7, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm7
+; SSE2-NEXT: por %xmm2, %xmm7
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm9, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm6
+; SSE2-NEXT: pand %xmm6, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm6
+; SSE2-NEXT: por %xmm3, %xmm6
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [18446744071562067968,18446744071562067968]
+; SSE2-NEXT: movdqa %xmm6, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [18446744069414584320,18446744069414584320]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm6
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm7, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm7
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm3
+; SSE2-NEXT: por %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm5, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm5
+; SSE2-NEXT: pandn %xmm8, %xmm0
+; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_ssat_v8i64_v8i32:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [2147483647,2147483647]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm0, %xmm5
+; SSSE3-NEXT: pxor %xmm4, %xmm5
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [4294967295,4294967295]
+; SSSE3-NEXT: movdqa %xmm9, %xmm7
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm5
+; SSSE3-NEXT: por %xmm0, %xmm5
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pxor %xmm4, %xmm0
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm0
+; SSSE3-NEXT: pand %xmm0, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm0
+; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm7
+; SSSE3-NEXT: pand %xmm7, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm7
+; SSSE3-NEXT: por %xmm2, %xmm7
+; SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm9, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm6
+; SSSE3-NEXT: pand %xmm6, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm6
+; SSSE3-NEXT: por %xmm3, %xmm6
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [18446744071562067968,18446744071562067968]
+; SSSE3-NEXT: movdqa %xmm6, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [18446744069414584320,18446744069414584320]
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm3, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm6
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm6, %xmm2
+; SSSE3-NEXT: movdqa %xmm7, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm7
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm7, %xmm1
+; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSSE3-NEXT: pxor %xmm4, %xmm2
+; SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm3
+; SSSE3-NEXT: por %xmm0, %xmm3
+; SSSE3-NEXT: pxor %xmm5, %xmm4
+; SSSE3-NEXT: movdqa %xmm4, %xmm0
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm2, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm0
+; SSSE3-NEXT: pand %xmm0, %xmm5
+; SSSE3-NEXT: pandn %xmm8, %xmm0
+; SSSE3-NEXT: por %xmm5, %xmm0
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_ssat_v8i64_v8i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm9
+; SSE41-NEXT: movapd {{.*#+}} xmm7 = [2147483647,2147483647]
+; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [4294967295,4294967295]
+; SSE41-NEXT: movdqa %xmm10, %xmm6
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm8, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm8
+; SSE41-NEXT: blendvpd %xmm0, %xmm9, %xmm8
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm10, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm9, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm9
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm9
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm10, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm4
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm10, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm2, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm7
+; SSE41-NEXT: movapd {{.*#+}} xmm2 = [18446744071562067968,18446744071562067968]
+; SSE41-NEXT: movapd %xmm7, %xmm0
+; SSE41-NEXT: xorpd %xmm5, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [18446744069414584320,18446744069414584320]
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm3, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm3
+; SSE41-NEXT: movapd %xmm4, %xmm0
+; SSE41-NEXT: xorpd %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm1
+; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; SSE41-NEXT: movapd %xmm9, %xmm0
+; SSE41-NEXT: xorpd %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm9, %xmm3
+; SSE41-NEXT: xorpd %xmm8, %xmm5
+; SSE41-NEXT: movapd %xmm5, %xmm0
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm2
+; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
+; SSE41-NEXT: movaps %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_ssat_v8i64_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [2147483647,2147483647,2147483647,2147483647]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [2147483647,2147483647]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [18446744071562067968,18446744071562067968]
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm1, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_ssat_v8i64_v8i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2147483647,2147483647,2147483647,2147483647]
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
+; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
+; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm3
+; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm3
+; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_ssat_v8i64_v8i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2147483647,2147483647,2147483647,2147483647]
+; AVX2-FAST-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
+; AVX2-FAST-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
+; AVX2-FAST-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm2 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
+; AVX2-FAST-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm3
+; AVX2-FAST-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm3
+; AVX2-FAST-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vmovapd {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermps %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_ssat_v8i64_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %1 = icmp slt <8 x i64> %a0, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+ %2 = select <8 x i1> %1, <8 x i64> %a0, <8 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
+ %3 = icmp sgt <8 x i64> %2, <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
+ %4 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
+ %5 = trunc <8 x i64> %4 to <8 x i32>
+ ret <8 x i32> %5
+}
+
+;
+; Signed saturation truncation to vXi16
+;
+
+define <8 x i16> @trunc_ssat_v8i64_v8i16(<8 x i64> %a0) {
+; SSE2-LABEL: trunc_ssat_v8i64_v8i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [32767,32767]
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: pxor %xmm4, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147516415,2147516415]
+; SSE2-NEXT: movdqa %xmm9, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm5
+; SSE2-NEXT: por %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: pxor %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm3
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm3
+; SSE2-NEXT: por %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm7
+; SSE2-NEXT: pand %xmm7, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm7
+; SSE2-NEXT: por %xmm1, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [18446744073709518848,18446744073709518848]
+; SSE2-NEXT: movdqa %xmm7, %xmm0
+; SSE2-NEXT: pxor %xmm4, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [18446744071562035200,18446744071562035200]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm7
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm5, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm5
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: packssdw %xmm3, %xmm1
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_ssat_v8i64_v8i16:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [32767,32767]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm2, %xmm5
+; SSSE3-NEXT: pxor %xmm4, %xmm5
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [2147516415,2147516415]
+; SSSE3-NEXT: movdqa %xmm9, %xmm7
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm5
+; SSSE3-NEXT: por %xmm2, %xmm5
+; SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSSE3-NEXT: pxor %xmm4, %xmm2
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm3, %xmm2
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: pxor %xmm4, %xmm3
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm3
+; SSSE3-NEXT: por %xmm0, %xmm3
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pxor %xmm4, %xmm0
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm0, %xmm7
+; SSSE3-NEXT: pand %xmm7, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm7
+; SSSE3-NEXT: por %xmm1, %xmm7
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [18446744073709518848,18446744073709518848]
+; SSSE3-NEXT: movdqa %xmm7, %xmm0
+; SSSE3-NEXT: pxor %xmm4, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [18446744071562035200,18446744071562035200]
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: por %xmm0, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm7
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm7, %xmm1
+; SSSE3-NEXT: movdqa %xmm3, %xmm0
+; SSSE3-NEXT: pxor %xmm4, %xmm0
+; SSSE3-NEXT: movdqa %xmm0, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm0
+; SSSE3-NEXT: pand %xmm0, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm0
+; SSSE3-NEXT: por %xmm3, %xmm0
+; SSSE3-NEXT: packssdw %xmm1, %xmm0
+; SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm3
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: pxor %xmm5, %xmm4
+; SSSE3-NEXT: movdqa %xmm4, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm2, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm5
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm5, %xmm1
+; SSSE3-NEXT: packssdw %xmm3, %xmm1
+; SSSE3-NEXT: packssdw %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_ssat_v8i64_v8i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm10
+; SSE41-NEXT: movapd {{.*#+}} xmm7 = [32767,32767]
+; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm11 = [2147516415,2147516415]
+; SSE41-NEXT: movdqa %xmm11, %xmm6
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm8, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm8
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm8
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm9
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm9
+; SSE41-NEXT: movdqa %xmm10, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm10, %xmm2
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm7
+; SSE41-NEXT: movapd {{.*#+}} xmm3 = [18446744073709518848,18446744073709518848]
+; SSE41-NEXT: movapd %xmm7, %xmm0
+; SSE41-NEXT: xorpd %xmm5, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [18446744071562035200,18446744071562035200]
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movapd %xmm3, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm4
+; SSE41-NEXT: movapd %xmm2, %xmm0
+; SSE41-NEXT: xorpd %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm3, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; SSE41-NEXT: packssdw %xmm4, %xmm1
+; SSE41-NEXT: movapd %xmm9, %xmm0
+; SSE41-NEXT: xorpd %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm3, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm9, %xmm2
+; SSE41-NEXT: xorpd %xmm8, %xmm5
+; SSE41-NEXT: movapd %xmm5, %xmm0
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm3
+; SSE41-NEXT: packssdw %xmm2, %xmm3
+; SSE41-NEXT: packssdw %xmm3, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_ssat_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [32767,32767,32767,32767]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [32767,32767]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [18446744073709518848,18446744073709518848,18446744073709518848,18446744073709518848]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [18446744073709518848,18446744073709518848]
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm1, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_ssat_v8i64_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [32767,32767,32767,32767]
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [18446744073709518848,18446744073709518848,18446744073709518848,18446744073709518848]
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_ssat_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <8 x i64> %a0, <i64 32767, i64 32767, i64 32767, i64 32767, i64 32767, i64 32767, i64 32767, i64 32767>
+ %2 = select <8 x i1> %1, <8 x i64> %a0, <8 x i64> <i64 32767, i64 32767, i64 32767, i64 32767, i64 32767, i64 32767, i64 32767, i64 32767>
+ %3 = icmp sgt <8 x i64> %2, <i64 -32768, i64 -32768, i64 -32768, i64 -32768, i64 -32768, i64 -32768, i64 -32768, i64 -32768>
+ %4 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> <i64 -32768, i64 -32768, i64 -32768, i64 -32768, i64 -32768, i64 -32768, i64 -32768, i64 -32768>
+ %5 = trunc <8 x i64> %4 to <8 x i16>
+ ret <8 x i16> %5
+}
+
+define <8 x i16> @trunc_ssat_v8i32_v8i16(<8 x i32> %a0) {
+; SSE-LABEL: trunc_ssat_v8i32_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_ssat_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_ssat_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_ssat_v8i32_v8i16:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
+; AVX512F-NEXT: vpminsd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528]
+; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_ssat_v8i32_v8i16:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpmovsdw %ymm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_ssat_v8i32_v8i16:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [32767,32767,32767,32767,32767,32767,32767,32767]
+; AVX512BW-NEXT: vpminsd %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528,4294934528]
+; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_ssat_v8i32_v8i16:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpmovsdw %ymm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <8 x i32> %a0, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+ %2 = select <8 x i1> %1, <8 x i32> %a0, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+ %3 = icmp sgt <8 x i32> %2, <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+ %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+ %5 = trunc <8 x i32> %4 to <8 x i16>
+ ret <8 x i16> %5
+}
+
+define <16 x i16> @trunc_ssat_v16i32_v16i16(<16 x i32> %a0) {
+; SSE-LABEL: trunc_ssat_v16i32_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packssdw %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_ssat_v16i32_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_ssat_v16i32_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_ssat_v16i32_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsdw %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %1 = icmp slt <16 x i32> %a0, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+ %2 = select <16 x i1> %1, <16 x i32> %a0, <16 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
+ %3 = icmp sgt <16 x i32> %2, <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+ %5 = trunc <16 x i32> %4 to <16 x i16>
+ ret <16 x i16> %5
+}
+
+;
+; Signed saturation truncation to v16i8
+;
+
+define <8 x i8> @trunc_ssat_v8i64_v8i8(<8 x i64> %a0) {
+; SSE2-LABEL: trunc_ssat_v8i64_v8i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [127,127]
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: pxor %xmm4, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483775,2147483775]
+; SSE2-NEXT: movdqa %xmm9, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm5
+; SSE2-NEXT: por %xmm3, %xmm5
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm3
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm7
+; SSE2-NEXT: pand %xmm7, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm7
+; SSE2-NEXT: por %xmm0, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [18446744073709551488,18446744073709551488]
+; SSE2-NEXT: movdqa %xmm7, %xmm0
+; SSE2-NEXT: pxor %xmm4, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [18446744071562067840,18446744071562067840]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm7
+; SSE2-NEXT: pandn %xmm8, %xmm0
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm7
+; SSE2-NEXT: pand %xmm7, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm7
+; SSE2-NEXT: por %xmm2, %xmm7
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: pxor %xmm5, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm5
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: packuswb %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm7
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: packuswb %xmm7, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_ssat_v8i64_v8i8:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [127,127]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm3, %xmm5
+; SSSE3-NEXT: pxor %xmm4, %xmm5
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [2147483775,2147483775]
+; SSSE3-NEXT: movdqa %xmm9, %xmm7
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm5
+; SSSE3-NEXT: por %xmm3, %xmm5
+; SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSSE3-NEXT: pxor %xmm4, %xmm3
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm3
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pxor %xmm4, %xmm2
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm7
+; SSSE3-NEXT: pand %xmm7, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm7
+; SSSE3-NEXT: por %xmm0, %xmm7
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [18446744073709551488,18446744073709551488]
+; SSSE3-NEXT: movdqa %xmm7, %xmm0
+; SSSE3-NEXT: pxor %xmm4, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [18446744071562067840,18446744071562067840]
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm0
+; SSSE3-NEXT: pand %xmm0, %xmm7
+; SSSE3-NEXT: pandn %xmm8, %xmm0
+; SSSE3-NEXT: por %xmm7, %xmm0
+; SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm7
+; SSSE3-NEXT: pand %xmm7, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm7
+; SSSE3-NEXT: por %xmm2, %xmm7
+; SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm3, %xmm2
+; SSSE3-NEXT: pxor %xmm5, %xmm4
+; SSSE3-NEXT: movdqa %xmm4, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm3, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm5
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm5, %xmm1
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSSE3-NEXT: pand %xmm3, %xmm1
+; SSSE3-NEXT: pand %xmm3, %xmm2
+; SSSE3-NEXT: packuswb %xmm1, %xmm2
+; SSSE3-NEXT: pand %xmm3, %xmm7
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: packuswb %xmm7, %xmm0
+; SSSE3-NEXT: packuswb %xmm2, %xmm0
+; SSSE3-NEXT: packuswb %xmm0, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_ssat_v8i64_v8i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm8
+; SSE41-NEXT: movapd {{.*#+}} xmm7 = [127,127]
+; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [2147483775,2147483775]
+; SSE41-NEXT: movdqa %xmm10, %xmm6
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm9, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm9
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm9
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm10, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm11
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm11
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm10, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm6
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm6
+; SSE41-NEXT: movdqa %xmm8, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm10, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm2, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm7
+; SSE41-NEXT: movapd {{.*#+}} xmm2 = [18446744073709551488,18446744073709551488]
+; SSE41-NEXT: movapd %xmm7, %xmm0
+; SSE41-NEXT: xorpd %xmm5, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [18446744071562067840,18446744071562067840]
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm1
+; SSE41-NEXT: movapd %xmm6, %xmm0
+; SSE41-NEXT: xorpd %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm7
+; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm7
+; SSE41-NEXT: movapd %xmm11, %xmm0
+; SSE41-NEXT: xorpd %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm11, %xmm3
+; SSE41-NEXT: xorpd %xmm9, %xmm5
+; SSE41-NEXT: movapd %xmm5, %xmm0
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm9, %xmm2
+; SSE41-NEXT: movapd {{.*#+}} xmm0 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE41-NEXT: andpd %xmm0, %xmm2
+; SSE41-NEXT: andpd %xmm0, %xmm3
+; SSE41-NEXT: packusdw %xmm2, %xmm3
+; SSE41-NEXT: andpd %xmm0, %xmm7
+; SSE41-NEXT: andpd %xmm0, %xmm1
+; SSE41-NEXT: packusdw %xmm7, %xmm1
+; SSE41-NEXT: packusdw %xmm3, %xmm1
+; SSE41-NEXT: packuswb %xmm1, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_ssat_v8i64_v8i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [127,127,127,127]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [127,127]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [18446744073709551488,18446744073709551488]
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm1, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm3 = mem[0,0]
+; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_ssat_v8i64_v8i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [127,127,127,127]
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_ssat_v8i64_v8i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpminsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512F-NEXT: vpmaxsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512F-NEXT: vmovq %xmm0, %rcx
+; AVX512F-NEXT: vmovd %ecx, %xmm1
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512F-NEXT: vmovq %xmm2, %rax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm2
+; AVX512F-NEXT: vmovq %xmm2, %rax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vextracti32x4 $3, %zmm0, %xmm0
+; AVX512F-NEXT: vmovq %xmm0, %rax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm1, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_ssat_v8i64_v8i8:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpminsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512VL-NEXT: vpmaxsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512VL-NEXT: vmovq %xmm0, %rcx
+; AVX512VL-NEXT: vmovd %ecx, %xmm1
+; AVX512VL-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512VL-NEXT: vmovq %xmm2, %rax
+; AVX512VL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512VL-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vextracti32x4 $2, %zmm0, %xmm2
+; AVX512VL-NEXT: vmovq %xmm2, %rax
+; AVX512VL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512VL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vextracti32x4 $3, %zmm0, %xmm0
+; AVX512VL-NEXT: vmovq %xmm0, %rax
+; AVX512VL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512VL-NEXT: vpinsrb $7, %eax, %xmm1, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_ssat_v8i64_v8i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpminsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmaxsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX512BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_ssat_v8i64_v8i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpminsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512BWVL-NEXT: vpmaxsq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512BWVL-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BWVL-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512BWVL-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BWVL-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512BWVL-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX512BWVL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX512BWVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <8 x i64> %a0, <i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127>
+ %2 = select <8 x i1> %1, <8 x i64> %a0, <8 x i64> <i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127>
+ %3 = icmp sgt <8 x i64> %2, <i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128>
+ %4 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> <i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128>
+ %5 = trunc <8 x i64> %4 to <8 x i8>
+ ret <8 x i8> %5
+}
+
+define void @trunc_ssat_v8i64_v8i8_store(<8 x i64> %a0, <8 x i8> *%p1) {
+; SSE2-LABEL: trunc_ssat_v8i64_v8i8_store:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [127,127]
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: pxor %xmm4, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [2147483775,2147483775]
+; SSE2-NEXT: movdqa %xmm9, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm5
+; SSE2-NEXT: por %xmm3, %xmm5
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm3
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm7
+; SSE2-NEXT: pand %xmm7, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm7
+; SSE2-NEXT: por %xmm0, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [18446744073709551488,18446744073709551488]
+; SSE2-NEXT: movdqa %xmm7, %xmm0
+; SSE2-NEXT: pxor %xmm4, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [18446744071562067840,18446744071562067840]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm7
+; SSE2-NEXT: pandn %xmm8, %xmm0
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm7
+; SSE2-NEXT: pand %xmm7, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm7
+; SSE2-NEXT: por %xmm2, %xmm7
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: pxor %xmm5, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm3, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm5
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: packuswb %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm3, %xmm7
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: packuswb %xmm7, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: movq %xmm0, (%rdi)
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_ssat_v8i64_v8i8_store:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [127,127]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm3, %xmm5
+; SSSE3-NEXT: pxor %xmm4, %xmm5
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [2147483775,2147483775]
+; SSSE3-NEXT: movdqa %xmm9, %xmm7
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm5
+; SSSE3-NEXT: por %xmm3, %xmm5
+; SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSSE3-NEXT: pxor %xmm4, %xmm3
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm3
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pxor %xmm4, %xmm2
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm7
+; SSSE3-NEXT: pand %xmm7, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm7
+; SSSE3-NEXT: por %xmm0, %xmm7
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [18446744073709551488,18446744073709551488]
+; SSSE3-NEXT: movdqa %xmm7, %xmm0
+; SSSE3-NEXT: pxor %xmm4, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [18446744071562067840,18446744071562067840]
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm0
+; SSSE3-NEXT: pand %xmm0, %xmm7
+; SSSE3-NEXT: pandn %xmm8, %xmm0
+; SSSE3-NEXT: por %xmm7, %xmm0
+; SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm7
+; SSSE3-NEXT: pand %xmm7, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm7
+; SSSE3-NEXT: por %xmm2, %xmm7
+; SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm3, %xmm2
+; SSSE3-NEXT: pxor %xmm5, %xmm4
+; SSSE3-NEXT: movdqa %xmm4, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm3, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm5
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm5, %xmm1
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSSE3-NEXT: pand %xmm3, %xmm1
+; SSSE3-NEXT: pand %xmm3, %xmm2
+; SSSE3-NEXT: packuswb %xmm1, %xmm2
+; SSSE3-NEXT: pand %xmm3, %xmm7
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: packuswb %xmm7, %xmm0
+; SSSE3-NEXT: packuswb %xmm2, %xmm0
+; SSSE3-NEXT: packuswb %xmm0, %xmm0
+; SSSE3-NEXT: movq %xmm0, (%rdi)
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_ssat_v8i64_v8i8_store:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm8
+; SSE41-NEXT: movapd {{.*#+}} xmm7 = [127,127]
+; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [2147483775,2147483775]
+; SSE41-NEXT: movdqa %xmm10, %xmm6
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm9, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm9
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm9
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm10, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm11
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm11
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm10, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm7, %xmm6
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm6
+; SSE41-NEXT: movdqa %xmm8, %xmm0
+; SSE41-NEXT: pxor %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm10, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm2, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm7
+; SSE41-NEXT: movapd {{.*#+}} xmm1 = [18446744073709551488,18446744073709551488]
+; SSE41-NEXT: movapd %xmm7, %xmm0
+; SSE41-NEXT: xorpd %xmm5, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [18446744071562067840,18446744071562067840]
+; SSE41-NEXT: movapd %xmm0, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movapd %xmm1, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm2
+; SSE41-NEXT: movapd %xmm6, %xmm0
+; SSE41-NEXT: xorpd %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm1, %xmm7
+; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm7
+; SSE41-NEXT: movapd %xmm11, %xmm0
+; SSE41-NEXT: xorpd %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm1, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm11, %xmm3
+; SSE41-NEXT: xorpd %xmm9, %xmm5
+; SSE41-NEXT: movapd %xmm5, %xmm0
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm9, %xmm1
+; SSE41-NEXT: movapd {{.*#+}} xmm0 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE41-NEXT: andpd %xmm0, %xmm1
+; SSE41-NEXT: andpd %xmm0, %xmm3
+; SSE41-NEXT: packusdw %xmm1, %xmm3
+; SSE41-NEXT: andpd %xmm0, %xmm7
+; SSE41-NEXT: andpd %xmm0, %xmm2
+; SSE41-NEXT: packusdw %xmm7, %xmm2
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: packuswb %xmm2, %xmm2
+; SSE41-NEXT: movq %xmm2, (%rdi)
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_ssat_v8i64_v8i8_store:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [127,127,127,127]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [127,127]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [18446744073709551488,18446744073709551488]
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm1, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm3 = mem[0,0]
+; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_ssat_v8i64_v8i8_store:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [127,127,127,127]
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-NEXT: vmovq %xmm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_ssat_v8i64_v8i8_store:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsqb %zmm0, (%rdi)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <8 x i64> %a0, <i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127>
+ %2 = select <8 x i1> %1, <8 x i64> %a0, <8 x i64> <i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127>
+ %3 = icmp sgt <8 x i64> %2, <i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128>
+ %4 = select <8 x i1> %3, <8 x i64> %2, <8 x i64> <i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128>
+ %5 = trunc <8 x i64> %4 to <8 x i8>
+ store <8 x i8> %5, <8 x i8> *%p1
+ ret void
+}
+
+define <16 x i8> @trunc_ssat_v16i64_v16i8(<16 x i64> %a0) {
+; SSE2-LABEL: trunc_ssat_v16i64_v16i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [127,127]
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm6, %xmm9
+; SSE2-NEXT: pxor %xmm8, %xmm9
+; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [2147483775,2147483775]
+; SSE2-NEXT: movdqa %xmm11, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm12
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm9
+; SSE2-NEXT: pshufd {{.*#+}} xmm14 = xmm9[1,1,3,3]
+; SSE2-NEXT: pand %xmm13, %xmm14
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm12[1,1,3,3]
+; SSE2-NEXT: por %xmm14, %xmm9
+; SSE2-NEXT: pand %xmm9, %xmm6
+; SSE2-NEXT: pandn %xmm10, %xmm9
+; SSE2-NEXT: por %xmm6, %xmm9
+; SSE2-NEXT: movdqa %xmm7, %xmm6
+; SSE2-NEXT: pxor %xmm8, %xmm6
+; SSE2-NEXT: movdqa %xmm11, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm12
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT: pand %xmm13, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm12[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm12
+; SSE2-NEXT: pand %xmm12, %xmm7
+; SSE2-NEXT: pandn %xmm10, %xmm12
+; SSE2-NEXT: por %xmm7, %xmm12
+; SSE2-NEXT: movdqa %xmm4, %xmm6
+; SSE2-NEXT: pxor %xmm8, %xmm6
+; SSE2-NEXT: movdqa %xmm11, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm7[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT: pand %xmm13, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm7[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm13
+; SSE2-NEXT: pand %xmm13, %xmm4
+; SSE2-NEXT: pandn %xmm10, %xmm13
+; SSE2-NEXT: por %xmm4, %xmm13
+; SSE2-NEXT: movdqa %xmm5, %xmm4
+; SSE2-NEXT: pxor %xmm8, %xmm4
+; SSE2-NEXT: movdqa %xmm11, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm14 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm14
+; SSE2-NEXT: pand %xmm14, %xmm5
+; SSE2-NEXT: pandn %xmm10, %xmm14
+; SSE2-NEXT: por %xmm5, %xmm14
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm8, %xmm4
+; SSE2-NEXT: movdqa %xmm11, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pandn %xmm10, %xmm5
+; SSE2-NEXT: por %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: pxor %xmm8, %xmm2
+; SSE2-NEXT: movdqa %xmm11, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm6
+; SSE2-NEXT: pand %xmm6, %xmm3
+; SSE2-NEXT: pandn %xmm10, %xmm6
+; SSE2-NEXT: por %xmm3, %xmm6
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm8, %xmm2
+; SSE2-NEXT: movdqa %xmm11, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: pandn %xmm10, %xmm3
+; SSE2-NEXT: por %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm8, %xmm0
+; SSE2-NEXT: movdqa %xmm11, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pandn %xmm10, %xmm4
+; SSE2-NEXT: por %xmm1, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [18446744073709551488,18446744073709551488]
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: pxor %xmm8, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [18446744071562067840,18446744071562067840]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm4
+; SSE2-NEXT: pandn %xmm10, %xmm1
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm8, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm3
+; SSE2-NEXT: pandn %xmm10, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm6, %xmm1
+; SSE2-NEXT: pxor %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm6
+; SSE2-NEXT: pandn %xmm10, %xmm2
+; SSE2-NEXT: por %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm5, %xmm1
+; SSE2-NEXT: pxor %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm5
+; SSE2-NEXT: pandn %xmm10, %xmm3
+; SSE2-NEXT: por %xmm5, %xmm3
+; SSE2-NEXT: packssdw %xmm2, %xmm3
+; SSE2-NEXT: packssdw %xmm3, %xmm0
+; SSE2-NEXT: movdqa %xmm14, %xmm1
+; SSE2-NEXT: pxor %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm14
+; SSE2-NEXT: pandn %xmm10, %xmm2
+; SSE2-NEXT: por %xmm14, %xmm2
+; SSE2-NEXT: movdqa %xmm13, %xmm1
+; SSE2-NEXT: pxor %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm13
+; SSE2-NEXT: pandn %xmm10, %xmm3
+; SSE2-NEXT: por %xmm13, %xmm3
+; SSE2-NEXT: packssdw %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm12, %xmm1
+; SSE2-NEXT: pxor %xmm8, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm12
+; SSE2-NEXT: pandn %xmm10, %xmm2
+; SSE2-NEXT: por %xmm12, %xmm2
+; SSE2-NEXT: pxor %xmm9, %xmm8
+; SSE2-NEXT: movdqa %xmm8, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm11, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm8[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm9
+; SSE2-NEXT: pandn %xmm10, %xmm1
+; SSE2-NEXT: por %xmm9, %xmm1
+; SSE2-NEXT: packssdw %xmm2, %xmm1
+; SSE2-NEXT: packssdw %xmm1, %xmm3
+; SSE2-NEXT: packsswb %xmm3, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_ssat_v16i64_v16i8:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm10 = [127,127]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm6, %xmm9
+; SSSE3-NEXT: pxor %xmm8, %xmm9
+; SSSE3-NEXT: movdqa {{.*#+}} xmm11 = [2147483775,2147483775]
+; SSSE3-NEXT: movdqa %xmm11, %xmm12
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm12
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm9
+; SSSE3-NEXT: pshufd {{.*#+}} xmm14 = xmm9[1,1,3,3]
+; SSSE3-NEXT: pand %xmm13, %xmm14
+; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm12[1,1,3,3]
+; SSSE3-NEXT: por %xmm14, %xmm9
+; SSSE3-NEXT: pand %xmm9, %xmm6
+; SSSE3-NEXT: pandn %xmm10, %xmm9
+; SSSE3-NEXT: por %xmm6, %xmm9
+; SSSE3-NEXT: movdqa %xmm7, %xmm6
+; SSSE3-NEXT: pxor %xmm8, %xmm6
+; SSSE3-NEXT: movdqa %xmm11, %xmm12
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm12
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSSE3-NEXT: pand %xmm13, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm12[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm12
+; SSSE3-NEXT: pand %xmm12, %xmm7
+; SSSE3-NEXT: pandn %xmm10, %xmm12
+; SSSE3-NEXT: por %xmm7, %xmm12
+; SSSE3-NEXT: movdqa %xmm4, %xmm6
+; SSSE3-NEXT: pxor %xmm8, %xmm6
+; SSSE3-NEXT: movdqa %xmm11, %xmm7
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm7[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSSE3-NEXT: pand %xmm13, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm7[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm13
+; SSSE3-NEXT: pand %xmm13, %xmm4
+; SSSE3-NEXT: pandn %xmm10, %xmm13
+; SSSE3-NEXT: por %xmm4, %xmm13
+; SSSE3-NEXT: movdqa %xmm5, %xmm4
+; SSSE3-NEXT: pxor %xmm8, %xmm4
+; SSSE3-NEXT: movdqa %xmm11, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm14 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm14
+; SSSE3-NEXT: pand %xmm14, %xmm5
+; SSSE3-NEXT: pandn %xmm10, %xmm14
+; SSSE3-NEXT: por %xmm5, %xmm14
+; SSSE3-NEXT: movdqa %xmm2, %xmm4
+; SSSE3-NEXT: pxor %xmm8, %xmm4
+; SSSE3-NEXT: movdqa %xmm11, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm2
+; SSSE3-NEXT: pandn %xmm10, %xmm5
+; SSSE3-NEXT: por %xmm2, %xmm5
+; SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSSE3-NEXT: pxor %xmm8, %xmm2
+; SSSE3-NEXT: movdqa %xmm11, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm2, %xmm6
+; SSSE3-NEXT: pand %xmm6, %xmm3
+; SSSE3-NEXT: pandn %xmm10, %xmm6
+; SSSE3-NEXT: por %xmm3, %xmm6
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSSE3-NEXT: pxor %xmm8, %xmm2
+; SSSE3-NEXT: movdqa %xmm11, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm0
+; SSSE3-NEXT: pandn %xmm10, %xmm3
+; SSSE3-NEXT: por %xmm0, %xmm3
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pxor %xmm8, %xmm0
+; SSSE3-NEXT: movdqa %xmm11, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm0, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pandn %xmm10, %xmm4
+; SSSE3-NEXT: por %xmm1, %xmm4
+; SSSE3-NEXT: movdqa {{.*#+}} xmm10 = [18446744073709551488,18446744073709551488]
+; SSSE3-NEXT: movdqa %xmm4, %xmm0
+; SSSE3-NEXT: pxor %xmm8, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm11 = [18446744071562067840,18446744071562067840]
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm11, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm2, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: por %xmm0, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm4
+; SSSE3-NEXT: pandn %xmm10, %xmm1
+; SSSE3-NEXT: por %xmm4, %xmm1
+; SSSE3-NEXT: movdqa %xmm3, %xmm0
+; SSSE3-NEXT: pxor %xmm8, %xmm0
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm11, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm0
+; SSSE3-NEXT: pand %xmm0, %xmm3
+; SSSE3-NEXT: pandn %xmm10, %xmm0
+; SSSE3-NEXT: por %xmm3, %xmm0
+; SSSE3-NEXT: packssdw %xmm1, %xmm0
+; SSSE3-NEXT: movdqa %xmm6, %xmm1
+; SSSE3-NEXT: pxor %xmm8, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm11, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm3, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm6
+; SSSE3-NEXT: pandn %xmm10, %xmm2
+; SSSE3-NEXT: por %xmm6, %xmm2
+; SSSE3-NEXT: movdqa %xmm5, %xmm1
+; SSSE3-NEXT: pxor %xmm8, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm11, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm5
+; SSSE3-NEXT: pandn %xmm10, %xmm3
+; SSSE3-NEXT: por %xmm5, %xmm3
+; SSSE3-NEXT: packssdw %xmm2, %xmm3
+; SSSE3-NEXT: packssdw %xmm3, %xmm0
+; SSSE3-NEXT: movdqa %xmm14, %xmm1
+; SSSE3-NEXT: pxor %xmm8, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm11, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm3, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm14
+; SSSE3-NEXT: pandn %xmm10, %xmm2
+; SSSE3-NEXT: por %xmm14, %xmm2
+; SSSE3-NEXT: movdqa %xmm13, %xmm1
+; SSSE3-NEXT: pxor %xmm8, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm11, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm13
+; SSSE3-NEXT: pandn %xmm10, %xmm3
+; SSSE3-NEXT: por %xmm13, %xmm3
+; SSSE3-NEXT: packssdw %xmm2, %xmm3
+; SSSE3-NEXT: movdqa %xmm12, %xmm1
+; SSSE3-NEXT: pxor %xmm8, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm11, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm12
+; SSSE3-NEXT: pandn %xmm10, %xmm2
+; SSSE3-NEXT: por %xmm12, %xmm2
+; SSSE3-NEXT: pxor %xmm9, %xmm8
+; SSSE3-NEXT: movdqa %xmm8, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm11, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm11, %xmm8
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm8[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: por %xmm5, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm9
+; SSSE3-NEXT: pandn %xmm10, %xmm1
+; SSSE3-NEXT: por %xmm9, %xmm1
+; SSSE3-NEXT: packssdw %xmm2, %xmm1
+; SSSE3-NEXT: packssdw %xmm1, %xmm3
+; SSSE3-NEXT: packsswb %xmm3, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_ssat_v16i64_v16i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm8
+; SSE41-NEXT: movapd {{.*#+}} xmm11 = [127,127]
+; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [2147483648,2147483648]
+; SSE41-NEXT: movdqa %xmm6, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm12 = [2147483775,2147483775]
+; SSE41-NEXT: movdqa %xmm12, %xmm10
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm10
+; SSE41-NEXT: pshufd {{.*#+}} xmm13 = xmm10[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm13, %xmm14
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,3,3]
+; SSE41-NEXT: por %xmm14, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm10
+; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm10
+; SSE41-NEXT: movdqa %xmm7, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm6
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm13 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm13, %xmm14
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT: por %xmm14, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm13
+; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm13
+; SSE41-NEXT: movdqa %xmm4, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm6
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm14, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm14
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm14
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm15
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm15
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm5
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm5
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm6
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm6
+; SSE41-NEXT: movdqa %xmm8, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm11, %xmm7
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm7
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm9, %xmm0
+; SSE41-NEXT: movdqa %xmm12, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm12, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm11
+; SSE41-NEXT: movapd {{.*#+}} xmm2 = [18446744073709551488,18446744073709551488]
+; SSE41-NEXT: movapd %xmm11, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm8 = [18446744071562067840,18446744071562067840]
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm3, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm11, %xmm3
+; SSE41-NEXT: movapd %xmm7, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm11, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm1
+; SSE41-NEXT: packssdw %xmm3, %xmm1
+; SSE41-NEXT: movapd %xmm6, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm3
+; SSE41-NEXT: movapd %xmm5, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm4
+; SSE41-NEXT: packssdw %xmm3, %xmm4
+; SSE41-NEXT: packssdw %xmm4, %xmm1
+; SSE41-NEXT: movapd %xmm15, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm4, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm15, %xmm3
+; SSE41-NEXT: movapd %xmm14, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm14, %xmm4
+; SSE41-NEXT: packssdw %xmm3, %xmm4
+; SSE41-NEXT: movapd %xmm13, %xmm0
+; SSE41-NEXT: xorpd %xmm9, %xmm0
+; SSE41-NEXT: movapd %xmm0, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm2, %xmm3
+; SSE41-NEXT: blendvpd %xmm0, %xmm13, %xmm3
+; SSE41-NEXT: xorpd %xmm10, %xmm9
+; SSE41-NEXT: movapd %xmm9, %xmm0
+; SSE41-NEXT: pcmpgtd %xmm8, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm8, %xmm9
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm9[1,1,3,3]
+; SSE41-NEXT: pand %xmm5, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm10, %xmm2
+; SSE41-NEXT: packssdw %xmm3, %xmm2
+; SSE41-NEXT: packssdw %xmm2, %xmm4
+; SSE41-NEXT: packsswb %xmm4, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_ssat_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm4 = [127,127,127,127]
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [127,127]
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm6, %xmm7
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT: vblendvpd %ymm5, %ymm3, %ymm4, %ymm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm6, %xmm7
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT: vblendvpd %ymm5, %ymm2, %ymm4, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm6, %xmm7
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT: vblendvpd %ymm5, %ymm1, %ymm4, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm6, %xmm6
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
+; AVX1-NEXT: vblendvpd %ymm5, %ymm0, %ymm4, %ymm0
+; AVX1-NEXT: vmovapd {{.*#+}} ymm4 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [18446744073709551488,18446744073709551488]
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm0, %xmm7
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT: vblendvpd %ymm5, %ymm0, %ymm4, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm1, %xmm7
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT: vblendvpd %ymm5, %ymm1, %ymm4, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm2, %xmm7
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT: vblendvpd %ymm5, %ymm2, %ymm4, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm3, %xmm6
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
+; AVX1-NEXT: vblendvpd %ymm5, %ymm3, %ymm4, %ymm3
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vpackssdw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vpackssdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackssdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_ssat_v16i64_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [127,127,127,127]
+; AVX2-NEXT: vpcmpgtq %ymm2, %ymm4, %ymm5
+; AVX2-NEXT: vblendvpd %ymm5, %ymm2, %ymm4, %ymm2
+; AVX2-NEXT: vpcmpgtq %ymm3, %ymm4, %ymm5
+; AVX2-NEXT: vblendvpd %ymm5, %ymm3, %ymm4, %ymm3
+; AVX2-NEXT: vpcmpgtq %ymm0, %ymm4, %ymm5
+; AVX2-NEXT: vblendvpd %ymm5, %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm4, %ymm5
+; AVX2-NEXT: vblendvpd %ymm5, %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm1, %ymm5
+; AVX2-NEXT: vblendvpd %ymm5, %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm0, %ymm5
+; AVX2-NEXT: vblendvpd %ymm5, %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm3, %ymm1
+; AVX2-NEXT: vblendvpd %ymm1, %ymm3, %ymm4, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm2, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm2, %ymm4, %ymm2
+; AVX2-NEXT: vpackssdw %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_ssat_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm2 = [127,127,127,127,127,127,127,127]
+; AVX512-NEXT: vpminsq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpminsq %zmm2, %zmm1, %zmm1
+; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm2 = [18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488,18446744073709551488]
+; AVX512-NEXT: vpmaxsq %zmm2, %zmm1, %zmm1
+; AVX512-NEXT: vpmaxsq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <16 x i64> %a0, <i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127>
+ %2 = select <16 x i1> %1, <16 x i64> %a0, <16 x i64> <i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127, i64 127>
+ %3 = icmp sgt <16 x i64> %2, <i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128>
+ %4 = select <16 x i1> %3, <16 x i64> %2, <16 x i64> <i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128, i64 -128>
+ %5 = trunc <16 x i64> %4 to <16 x i8>
+ ret <16 x i8> %5
+}
+
+define <8 x i8> @trunc_ssat_v8i32_v8i8(<8 x i32> %a0) {
+; SSE-LABEL: trunc_ssat_v8i32_v8i8:
+; SSE: # %bb.0:
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packsswb %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_ssat_v8i32_v8i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_ssat_v8i32_v8i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_ssat_v8i32_v8i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [127,127,127,127,127,127,127,127]
+; AVX512F-NEXT: vpminsd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168]
+; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_ssat_v8i32_v8i8:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpminsd {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmaxsd {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_ssat_v8i32_v8i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [127,127,127,127,127,127,127,127]
+; AVX512BW-NEXT: vpminsd %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168]
+; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_ssat_v8i32_v8i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpminsd {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmaxsd {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <8 x i32> %a0, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+ %2 = select <8 x i1> %1, <8 x i32> %a0, <8 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+ %3 = icmp sgt <8 x i32> %2, <i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128>
+ %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> <i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128>
+ %5 = trunc <8 x i32> %4 to <8 x i8>
+ ret <8 x i8> %5
+}
+
+define void @trunc_ssat_v8i32_v8i8_store(<8 x i32> %a0, <8 x i8> *%p1) {
+; SSE-LABEL: trunc_ssat_v8i32_v8i8_store:
+; SSE: # %bb.0:
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packsswb %xmm0, %xmm0
+; SSE-NEXT: movq %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_ssat_v8i32_v8i8_store:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_ssat_v8i32_v8i8_store:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vmovq %xmm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_ssat_v8i32_v8i8_store:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [127,127,127,127,127,127,127,127]
+; AVX512F-NEXT: vpminsd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168]
+; AVX512F-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vmovq %xmm0, (%rdi)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_ssat_v8i32_v8i8_store:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpmovsdb %ymm0, (%rdi)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_ssat_v8i32_v8i8_store:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [127,127,127,127,127,127,127,127]
+; AVX512BW-NEXT: vpminsd %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168,4294967168]
+; AVX512BW-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vmovq %xmm0, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_ssat_v8i32_v8i8_store:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpmovsdb %ymm0, (%rdi)
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <8 x i32> %a0, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+ %2 = select <8 x i1> %1, <8 x i32> %a0, <8 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+ %3 = icmp sgt <8 x i32> %2, <i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128>
+ %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> <i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128>
+ %5 = trunc <8 x i32> %4 to <8 x i8>
+ store <8 x i8> %5, <8 x i8> *%p1
+ ret void
+}
+
+define <16 x i8> @trunc_ssat_v16i32_v16i8(<16 x i32> %a0) {
+; SSE-LABEL: trunc_ssat_v16i32_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: packssdw %xmm3, %xmm2
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packsswb %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_ssat_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_ssat_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_ssat_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovsdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp slt <16 x i32> %a0, <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+ %2 = select <16 x i1> %1, <16 x i32> %a0, <16 x i32> <i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127, i32 127>
+ %3 = icmp sgt <16 x i32> %2, <i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128>
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> <i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128, i32 -128>
+ %5 = trunc <16 x i32> %4 to <16 x i8>
+ ret <16 x i8> %5
+}
+
+define <16 x i8> @trunc_ssat_v16i16_v16i8(<16 x i16> %a0) {
+; SSE-LABEL: trunc_ssat_v16i16_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_ssat_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_ssat_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_ssat_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT: vpmaxsw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_ssat_v16i16_v16i8:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: vpmaxsw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_ssat_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpminsw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT: vpmaxsw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_ssat_v16i16_v16i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpmovswb %ymm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <16 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+ %2 = select <16 x i1> %1, <16 x i16> %a0, <16 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+ %3 = icmp sgt <16 x i16> %2, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+ %4 = select <16 x i1> %3, <16 x i16> %2, <16 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+ %5 = trunc <16 x i16> %4 to <16 x i8>
+ ret <16 x i8> %5
+}
+
+define <32 x i8> @trunc_ssat_v32i16_v32i8(<32 x i16> %a0) {
+; SSE-LABEL: trunc_ssat_v32i16_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: packsswb %xmm3, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc_ssat_v32i16_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_ssat_v32i16_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_ssat_v32i16_v32i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT: vpminsw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpminsw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408]
+; AVX512F-NEXT: vpmaxsw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpmaxsw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_ssat_v32i16_v32i8:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-NEXT: vpminsw %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpminsw %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408,65408]
+; AVX512VL-NEXT: vpmaxsw %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpmaxsw %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_ssat_v32i16_v32i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpmovswb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_ssat_v32i16_v32i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpmovswb %zmm0, %ymm0
+; AVX512BWVL-NEXT: retq
+ %1 = icmp slt <32 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+ %2 = select <32 x i1> %1, <32 x i16> %a0, <32 x i16> <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
+ %3 = icmp sgt <32 x i16> %2, <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+ %4 = select <32 x i1> %3, <32 x i16> %2, <32 x i16> <i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128, i16 -128>
+ %5 = trunc <32 x i16> %4 to <32 x i8>
+ ret <32 x i8> %5
+}
Added: llvm/trunk/test/CodeGen/X86/vector-trunc-usat-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-usat-widen.ll?rev=346745&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-usat-widen.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-usat-widen.ll Mon Nov 12 23:47:52 2018
@@ -0,0 +1,2587 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-SLOW
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
+
+;
+; Unsigned saturation truncation to vXi32
+;
+
+define <4 x i32> @trunc_usat_v4i64_v4i32(<4 x i64> %a0) {
+; SSE2-LABEL: trunc_usat_v4i64_v4i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259455,9223372039002259455]
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT: por %xmm3, %xmm5
+; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm4, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pandn {{.*}}(%rip), %xmm3
+; SSE2-NEXT: por %xmm1, %xmm3
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: pandn {{.*}}(%rip), %xmm5
+; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_usat_v4i64_v4i32:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: pxor %xmm2, %xmm3
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259455,9223372039002259455]
+; SSSE3-NEXT: movdqa %xmm4, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm4, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSSE3-NEXT: por %xmm3, %xmm5
+; SSSE3-NEXT: pxor %xmm1, %xmm2
+; SSSE3-NEXT: movdqa %xmm4, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm4, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm1
+; SSSE3-NEXT: pandn {{.*}}(%rip), %xmm3
+; SSSE3-NEXT: por %xmm1, %xmm3
+; SSSE3-NEXT: pand %xmm5, %xmm0
+; SSSE3-NEXT: pandn {{.*}}(%rip), %xmm5
+; SSSE3-NEXT: por %xmm5, %xmm0
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_usat_v4i64_v4i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm2
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: movdqa %xmm2, %xmm3
+; SSE41-NEXT: pxor %xmm0, %xmm3
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [9223372039002259455,9223372039002259455]
+; SSE41-NEXT: movdqa %xmm4, %xmm5
+; SSE41-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm3
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm5
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm4, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm6, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movapd {{.*#+}} xmm4 = [4294967295,4294967295]
+; SSE41-NEXT: movapd {{.*#+}} xmm5 = [4294967295,429496729]
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm5
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm4
+; SSE41-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2]
+; SSE41-NEXT: movaps %xmm4, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_usat_v4i64_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372041149743103,9223372041149743103]
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,429496729]
+; AVX1-NEXT: vblendvpd %ymm1, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_usat_v4i64_v4i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-SLOW-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372041149743103,9223372041149743103,9223372041149743103,9223372041149743103]
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm1
+; AVX2-SLOW-NEXT: vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,429496729]
+; AVX2-SLOW-NEXT: vblendvpd %ymm1, %ymm0, %ymm2, %ymm0
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_usat_v4i64_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-FAST-NEXT: vpxor %ymm1, %ymm0, %ymm1
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372041149743103,9223372041149743103,9223372041149743103,9223372041149743103]
+; AVX2-FAST-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,429496729]
+; AVX2-FAST-NEXT: vblendvpd %ymm1, %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512F-LABEL: trunc_usat_v4i64_v4i32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
+; AVX512F-NEXT: vpcmpltuq %zmm1, %zmm0, %k1
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,429496729]
+; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512F-NEXT: vpmovqd %zmm1, %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_usat_v4i64_v4i32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpcmpltuq {{.*}}(%rip){1to4}, %ymm0, %k1
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,429496729]
+; AVX512VL-NEXT: vmovdqa64 %ymm0, %ymm1 {%k1}
+; AVX512VL-NEXT: vpmovqd %ymm1, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_usat_v4i64_v4i32:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
+; AVX512BW-NEXT: vpcmpltuq %zmm1, %zmm0, %k1
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,429496729]
+; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; AVX512BW-NEXT: vpmovqd %zmm1, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_usat_v4i64_v4i32:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpcmpltuq {{.*}}(%rip){1to4}, %ymm0, %k1
+; AVX512BWVL-NEXT: vmovdqa {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,429496729]
+; AVX512BWVL-NEXT: vmovdqa64 %ymm0, %ymm1 {%k1}
+; AVX512BWVL-NEXT: vpmovqd %ymm1, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp ult <4 x i64> %a0, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+ %2 = select <4 x i1> %1, <4 x i64> %a0, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 429496729>
+ %3 = trunc <4 x i64> %2 to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <8 x i32> @trunc_usat_v8i64_v8i32(<8 x i64> %a0) {
+; SSE2-LABEL: trunc_usat_v8i64_v8i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [4294967295,4294967295]
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: pxor %xmm5, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259455,9223372039002259455]
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm4
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm5, %xmm3
+; SSE2-NEXT: movdqa %xmm9, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm5, %xmm2
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm6, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm4
+; SSE2-NEXT: por %xmm1, %xmm4
+; SSE2-NEXT: pxor %xmm0, %xmm5
+; SSE2-NEXT: movdqa %xmm9, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm2, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSE2-NEXT: movaps %xmm3, %xmm1
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_usat_v8i64_v8i32:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [4294967295,4294967295]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT: movdqa %xmm3, %xmm7
+; SSSE3-NEXT: pxor %xmm5, %xmm7
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259455,9223372039002259455]
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm7, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm4
+; SSSE3-NEXT: por %xmm3, %xmm4
+; SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSSE3-NEXT: pxor %xmm5, %xmm3
+; SSSE3-NEXT: movdqa %xmm9, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm3
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
+; SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSSE3-NEXT: pxor %xmm5, %xmm2
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm6, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm2, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm4
+; SSSE3-NEXT: por %xmm1, %xmm4
+; SSSE3-NEXT: pxor %xmm0, %xmm5
+; SSSE3-NEXT: movdqa %xmm9, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSSE3-NEXT: pand %xmm2, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: por %xmm5, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
+; SSSE3-NEXT: movaps %xmm3, %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_usat_v8i64_v8i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm8
+; SSE41-NEXT: movapd {{.*#+}} xmm6 = [4294967295,4294967295]
+; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259455,9223372039002259455]
+; SSE41-NEXT: movdqa %xmm9, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm10, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm6, %xmm5
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm5
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa %xmm9, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm10, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movapd %xmm6, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm4
+; SSE41-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2]
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa %xmm9, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm6, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
+; SSE41-NEXT: pxor %xmm8, %xmm7
+; SSE41-NEXT: movdqa %xmm9, %xmm0
+; SSE41-NEXT: pcmpgtd %xmm7, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm7[1,1,3,3]
+; SSE41-NEXT: pand %xmm1, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT: por %xmm3, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm6
+; SSE41-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm2[0,2]
+; SSE41-NEXT: movaps %xmm6, %xmm0
+; SSE41-NEXT: movaps %xmm4, %xmm1
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_usat_v8i64_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372041149743103,9223372041149743103]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm6
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc_usat_v8i64_v8i32:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-SLOW-NEXT: vpxor %ymm3, %ymm1, %ymm4
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372041149743103,9223372041149743103,9223372041149743103,9223372041149743103]
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm4, %ymm5, %ymm4
+; AVX2-SLOW-NEXT: vblendvpd %ymm4, %ymm1, %ymm2, %ymm1
+; AVX2-SLOW-NEXT: vpxor %ymm3, %ymm0, %ymm3
+; AVX2-SLOW-NEXT: vpcmpgtq %ymm3, %ymm5, %ymm3
+; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_usat_v8i64_v8i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-FAST-NEXT: vpxor %ymm3, %ymm1, %ymm4
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372041149743103,9223372041149743103,9223372041149743103,9223372041149743103]
+; AVX2-FAST-NEXT: vpcmpgtq %ymm4, %ymm5, %ymm4
+; AVX2-FAST-NEXT: vblendvpd %ymm4, %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vpxor %ymm3, %ymm0, %ymm3
+; AVX2-FAST-NEXT: vpcmpgtq %ymm3, %ymm5, %ymm3
+; AVX2-FAST-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vmovapd {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermps %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc_usat_v8i64_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovusqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %1 = icmp ult <8 x i64> %a0, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+ %2 = select <8 x i1> %1, <8 x i64> %a0, <8 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
+ %3 = trunc <8 x i64> %2 to <8 x i32>
+ ret <8 x i32> %3
+}
+
+;
+; Unsigned saturation truncation to vXi16
+;
+
+define <8 x i16> @trunc_usat_v8i64_v8i16(<8 x i64> %a0) {
+; SSE2-LABEL: trunc_usat_v8i64_v8i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535]
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: pxor %xmm6, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002324991,9223372039002324991]
+; SSE2-NEXT: movdqa %xmm9, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm5
+; SSE2-NEXT: por %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: pxor %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm6, %xmm3
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm7, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm4
+; SSE2-NEXT: por %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm1, %xmm6
+; SSE2-NEXT: movdqa %xmm9, %xmm0
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT: pand %xmm3, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm0
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_usat_v8i64_v8i16:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT: movdqa %xmm2, %xmm5
+; SSSE3-NEXT: pxor %xmm6, %xmm5
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002324991,9223372039002324991]
+; SSSE3-NEXT: movdqa %xmm9, %xmm7
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm5
+; SSSE3-NEXT: por %xmm2, %xmm5
+; SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSSE3-NEXT: pxor %xmm6, %xmm2
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm3, %xmm2
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: pxor %xmm6, %xmm3
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm7, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm3, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm4
+; SSSE3-NEXT: por %xmm0, %xmm4
+; SSSE3-NEXT: pxor %xmm1, %xmm6
+; SSSE3-NEXT: movdqa %xmm9, %xmm0
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSSE3-NEXT: pand %xmm3, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm0
+; SSSE3-NEXT: pand %xmm0, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm0
+; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm0[0,1,0,2,4,5,6,7]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_usat_v8i64_v8i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm8
+; SSE41-NEXT: movapd {{.*#+}} xmm5 = [65535,65535]
+; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm6, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002324991,9223372039002324991]
+; SSE41-NEXT: movdqa %xmm9, %xmm7
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm10, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movapd %xmm5, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm4
+; SSE41-NEXT: movdqa %xmm8, %xmm0
+; SSE41-NEXT: pxor %xmm6, %xmm0
+; SSE41-NEXT: movdqa %xmm9, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm10, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm5, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm1
+; SSE41-NEXT: packusdw %xmm4, %xmm1
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm6, %xmm0
+; SSE41-NEXT: movdqa %xmm9, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm8, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm7, %xmm0
+; SSE41-NEXT: movapd %xmm5, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm4
+; SSE41-NEXT: pxor %xmm2, %xmm6
+; SSE41-NEXT: movdqa %xmm9, %xmm0
+; SSE41-NEXT: pcmpgtd %xmm6, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm5
+; SSE41-NEXT: packusdw %xmm4, %xmm5
+; SSE41-NEXT: packusdw %xmm5, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_usat_v8i64_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [65535,65535,65535,65535]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854841343,9223372036854841343]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm6
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_usat_v8i64_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [65535,65535,65535,65535]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372036854841343,9223372036854841343,9223372036854841343,9223372036854841343]
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm5, %ymm4
+; AVX2-NEXT: vblendvpd %ymm4, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm3
+; AVX2-NEXT: vpcmpgtq %ymm3, %ymm5, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_usat_v8i64_v8i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovusqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp ult <8 x i64> %a0, <i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535>
+ %2 = select <8 x i1> %1, <8 x i64> %a0, <8 x i64> <i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535, i64 65535>
+ %3 = trunc <8 x i64> %2 to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <8 x i16> @trunc_usat_v8i32_v8i16(<8 x i32> %a0) {
+; SSE2-LABEL: trunc_usat_v8i32_v8i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147549183,2147549183,2147549183,2147549183]
+; SSE2-NEXT: movdqa %xmm4, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: pxor %xmm3, %xmm5
+; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm1, %xmm4
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: pslld $16, %xmm4
+; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: pslld $16, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm4, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_usat_v8i32_v8i16:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm0, %xmm4
+; SSSE3-NEXT: pxor %xmm3, %xmm4
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147549183,2147549183,2147549183,2147549183]
+; SSSE3-NEXT: movdqa %xmm5, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6
+; SSSE3-NEXT: pand %xmm6, %xmm0
+; SSSE3-NEXT: pandn %xmm2, %xmm6
+; SSSE3-NEXT: por %xmm6, %xmm0
+; SSSE3-NEXT: pxor %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm1
+; SSSE3-NEXT: pandn %xmm2, %xmm5
+; SSSE3-NEXT: por %xmm1, %xmm5
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSSE3-NEXT: pshufb %xmm1, %xmm5
+; SSSE3-NEXT: pshufb %xmm1, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_usat_v8i32_v8i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
+; SSE41-NEXT: pminud %xmm2, %xmm1
+; SSE41-NEXT: pminud %xmm2, %xmm0
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_usat_v8i32_v8i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [65535,65535,65535,65535]
+; AVX1-NEXT: vpminud %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_usat_v8i32_v8i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_usat_v8i32_v8i16:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-NEXT: vpminud %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_usat_v8i32_v8i16:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpmovusdw %ymm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_usat_v8i32_v8i16:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512BW-NEXT: vpminud %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_usat_v8i32_v8i16:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpmovusdw %ymm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp ult <8 x i32> %a0, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+ %2 = select <8 x i1> %1, <8 x i32> %a0, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+ %3 = trunc <8 x i32> %2 to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <16 x i16> @trunc_usat_v16i32_v16i16(<16 x i32> %a0) {
+; SSE2-LABEL: trunc_usat_v16i32_v16i16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm1, %xmm8
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm2, %xmm7
+; SSE2-NEXT: pxor %xmm6, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147549183,2147549183,2147549183,2147549183]
+; SSE2-NEXT: movdqa %xmm5, %xmm1
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm7, %xmm1
+; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm6, %xmm4
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm7, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm6, %xmm3
+; SSE2-NEXT: movdqa %xmm5, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: pxor %xmm7, %xmm4
+; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: pxor %xmm8, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm5
+; SSE2-NEXT: pxor %xmm5, %xmm7
+; SSE2-NEXT: pand %xmm8, %xmm5
+; SSE2-NEXT: por %xmm7, %xmm5
+; SSE2-NEXT: pslld $16, %xmm5
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: pslld $16, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm5, %xmm0
+; SSE2-NEXT: pslld $16, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: packssdw %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_usat_v16i32_v16i16:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa %xmm1, %xmm8
+; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm2, %xmm7
+; SSSE3-NEXT: pxor %xmm6, %xmm7
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147549183,2147549183,2147549183,2147549183]
+; SSSE3-NEXT: movdqa %xmm5, %xmm1
+; SSSE3-NEXT: pcmpgtd %xmm7, %xmm1
+; SSSE3-NEXT: pcmpeqd %xmm7, %xmm7
+; SSSE3-NEXT: pand %xmm1, %xmm2
+; SSSE3-NEXT: pxor %xmm7, %xmm1
+; SSSE3-NEXT: por %xmm2, %xmm1
+; SSSE3-NEXT: movdqa %xmm3, %xmm4
+; SSSE3-NEXT: pxor %xmm6, %xmm4
+; SSSE3-NEXT: movdqa %xmm5, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pxor %xmm7, %xmm2
+; SSSE3-NEXT: por %xmm3, %xmm2
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: pxor %xmm6, %xmm3
+; SSSE3-NEXT: movdqa %xmm5, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm0
+; SSSE3-NEXT: pxor %xmm7, %xmm4
+; SSSE3-NEXT: por %xmm4, %xmm0
+; SSSE3-NEXT: pxor %xmm8, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm5
+; SSSE3-NEXT: pxor %xmm5, %xmm7
+; SSSE3-NEXT: pand %xmm8, %xmm5
+; SSSE3-NEXT: por %xmm7, %xmm5
+; SSSE3-NEXT: pslld $16, %xmm5
+; SSSE3-NEXT: psrad $16, %xmm5
+; SSSE3-NEXT: pslld $16, %xmm0
+; SSSE3-NEXT: psrad $16, %xmm0
+; SSSE3-NEXT: packssdw %xmm5, %xmm0
+; SSSE3-NEXT: pslld $16, %xmm2
+; SSSE3-NEXT: psrad $16, %xmm2
+; SSSE3-NEXT: pslld $16, %xmm1
+; SSSE3-NEXT: psrad $16, %xmm1
+; SSSE3-NEXT: packssdw %xmm2, %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_usat_v16i32_v16i16:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535]
+; SSE41-NEXT: pminud %xmm4, %xmm3
+; SSE41-NEXT: pminud %xmm4, %xmm2
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: pminud %xmm4, %xmm1
+; SSE41-NEXT: pminud %xmm4, %xmm0
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm2, %xmm1
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_usat_v16i32_v16i16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65535,65535,65535,65535]
+; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpminud %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpminud %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_usat_v16i32_v16i16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX2-NEXT: vpminud %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpminud %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_usat_v16i32_v16i16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovusdw %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %1 = icmp ult <16 x i32> %a0, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+ %2 = select <16 x i1> %1, <16 x i32> %a0, <16 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
+ %3 = trunc <16 x i32> %2 to <16 x i16>
+ ret <16 x i16> %3
+}
+
+;
+; Unsigned saturation truncation to v16i8
+;
+
+define <8 x i8> @trunc_usat_v8i64_v8i8(<8 x i64> %a0) {
+; SSE2-LABEL: trunc_usat_v8i64_v8i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255]
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: pxor %xmm6, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259711,9223372039002259711]
+; SSE2-NEXT: movdqa %xmm9, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm4
+; SSE2-NEXT: pandn %xmm8, %xmm0
+; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pxor %xmm6, %xmm4
+; SSE2-NEXT: movdqa %xmm9, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm5[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm4
+; SSE2-NEXT: por %xmm1, %xmm4
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm6, %xmm1
+; SSE2-NEXT: movdqa %xmm9, %xmm5
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm5[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm3, %xmm6
+; SSE2-NEXT: movdqa %xmm9, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT: pand %xmm5, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm8, %xmm2
+; SSE2-NEXT: pand %xmm8, %xmm1
+; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm8, %xmm4
+; SSE2-NEXT: pand %xmm8, %xmm0
+; SSE2-NEXT: packuswb %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_usat_v8i64_v8i8:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa %xmm0, %xmm4
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [255,255]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT: pxor %xmm6, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259711,9223372039002259711]
+; SSSE3-NEXT: movdqa %xmm9, %xmm7
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,3,3]
+; SSSE3-NEXT: por %xmm5, %xmm0
+; SSSE3-NEXT: pand %xmm0, %xmm4
+; SSSE3-NEXT: pandn %xmm8, %xmm0
+; SSSE3-NEXT: por %xmm4, %xmm0
+; SSSE3-NEXT: movdqa %xmm1, %xmm4
+; SSSE3-NEXT: pxor %xmm6, %xmm4
+; SSSE3-NEXT: movdqa %xmm9, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm5[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm4
+; SSSE3-NEXT: por %xmm1, %xmm4
+; SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm6, %xmm1
+; SSSE3-NEXT: movdqa %xmm9, %xmm5
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm5[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm3, %xmm6
+; SSSE3-NEXT: movdqa %xmm9, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSSE3-NEXT: pand %xmm5, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm3, %xmm2
+; SSSE3-NEXT: pand %xmm8, %xmm2
+; SSSE3-NEXT: pand %xmm8, %xmm1
+; SSSE3-NEXT: packuswb %xmm2, %xmm1
+; SSSE3-NEXT: pand %xmm8, %xmm4
+; SSSE3-NEXT: pand %xmm8, %xmm0
+; SSSE3-NEXT: packuswb %xmm4, %xmm0
+; SSSE3-NEXT: packuswb %xmm1, %xmm0
+; SSSE3-NEXT: packuswb %xmm0, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_usat_v8i64_v8i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm5
+; SSE41-NEXT: movapd {{.*#+}} xmm8 = [255,255]
+; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259711,9223372039002259711]
+; SSE41-NEXT: movdqa %xmm9, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm10, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm4
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa %xmm9, %xmm5
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm5[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm10, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm5
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm5
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa %xmm9, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm10, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; SSE41-NEXT: pxor %xmm3, %xmm7
+; SSE41-NEXT: movdqa %xmm9, %xmm0
+; SSE41-NEXT: pcmpgtd %xmm7, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3]
+; SSE41-NEXT: pand %xmm2, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT: por %xmm6, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
+; SSE41-NEXT: andpd %xmm8, %xmm2
+; SSE41-NEXT: andpd %xmm8, %xmm1
+; SSE41-NEXT: packusdw %xmm2, %xmm1
+; SSE41-NEXT: andpd %xmm8, %xmm5
+; SSE41-NEXT: andpd %xmm8, %xmm4
+; SSE41-NEXT: packusdw %xmm5, %xmm4
+; SSE41-NEXT: packusdw %xmm1, %xmm4
+; SSE41-NEXT: packuswb %xmm4, %xmm4
+; SSE41-NEXT: movdqa %xmm4, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_usat_v8i64_v8i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [255,255,255,255]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854776063,9223372036854776063]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm6
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm3 = mem[0,0]
+; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_usat_v8i64_v8i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [255,255,255,255]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm4
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372036854776063,9223372036854776063,9223372036854776063,9223372036854776063]
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm5, %ymm4
+; AVX2-NEXT: vblendvpd %ymm4, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm3
+; AVX2-NEXT: vpcmpgtq %ymm3, %ymm5, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_usat_v8i64_v8i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpminuq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512F-NEXT: vmovq %xmm0, %rcx
+; AVX512F-NEXT: vmovd %ecx, %xmm1
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512F-NEXT: vmovq %xmm2, %rax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm2
+; AVX512F-NEXT: vmovq %xmm2, %rax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vextracti32x4 $3, %zmm0, %xmm0
+; AVX512F-NEXT: vmovq %xmm0, %rax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm1, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_usat_v8i64_v8i8:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpminuq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512VL-NEXT: vmovq %xmm0, %rcx
+; AVX512VL-NEXT: vmovd %ecx, %xmm1
+; AVX512VL-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512VL-NEXT: vmovq %xmm2, %rax
+; AVX512VL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512VL-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vextracti32x4 $2, %zmm0, %xmm2
+; AVX512VL-NEXT: vmovq %xmm2, %rax
+; AVX512VL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512VL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vextracti32x4 $3, %zmm0, %xmm0
+; AVX512VL-NEXT: vmovq %xmm0, %rax
+; AVX512VL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX512VL-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512VL-NEXT: vpinsrb $7, %eax, %xmm1, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_usat_v8i64_v8i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpminuq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX512BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_usat_v8i64_v8i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpminuq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512BWVL-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BWVL-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512BWVL-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX512BWVL-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BWVL-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX512BWVL-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX512BWVL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX512BWVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp ult <8 x i64> %a0, <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %2 = select <8 x i1> %1, <8 x i64> %a0, <8 x i64> <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %3 = trunc <8 x i64> %2 to <8 x i8>
+ ret <8 x i8> %3
+}
+
+define void @trunc_usat_v8i64_v8i8_store(<8 x i64> %a0, <8 x i8> *%p1) {
+; SSE2-LABEL: trunc_usat_v8i64_v8i8_store:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255]
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: pxor %xmm6, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259711,9223372039002259711]
+; SSE2-NEXT: movdqa %xmm9, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSE2-NEXT: por %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm5
+; SSE2-NEXT: por %xmm0, %xmm5
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pxor %xmm6, %xmm0
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm0
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm6, %xmm1
+; SSE2-NEXT: movdqa %xmm9, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm10, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSE2-NEXT: por %xmm7, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm3, %xmm6
+; SSE2-NEXT: movdqa %xmm9, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm9, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm6, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm8, %xmm2
+; SSE2-NEXT: pand %xmm8, %xmm1
+; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm8, %xmm0
+; SSE2-NEXT: pand %xmm8, %xmm5
+; SSE2-NEXT: packuswb %xmm0, %xmm5
+; SSE2-NEXT: packuswb %xmm1, %xmm5
+; SSE2-NEXT: packuswb %xmm5, %xmm5
+; SSE2-NEXT: movq %xmm5, (%rdi)
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_usat_v8i64_v8i8_store:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [255,255]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT: movdqa %xmm0, %xmm5
+; SSSE3-NEXT: pxor %xmm6, %xmm5
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259711,9223372039002259711]
+; SSSE3-NEXT: movdqa %xmm9, %xmm7
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSSE3-NEXT: por %xmm4, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm5
+; SSSE3-NEXT: por %xmm0, %xmm5
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pxor %xmm6, %xmm0
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm0
+; SSSE3-NEXT: pand %xmm0, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm0
+; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm6, %xmm1
+; SSSE3-NEXT: movdqa %xmm9, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm10, %xmm7
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSSE3-NEXT: por %xmm7, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm3, %xmm6
+; SSSE3-NEXT: movdqa %xmm9, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm9, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm6
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm6, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm3, %xmm2
+; SSSE3-NEXT: pand %xmm8, %xmm2
+; SSSE3-NEXT: pand %xmm8, %xmm1
+; SSSE3-NEXT: packuswb %xmm2, %xmm1
+; SSSE3-NEXT: pand %xmm8, %xmm0
+; SSSE3-NEXT: pand %xmm8, %xmm5
+; SSSE3-NEXT: packuswb %xmm0, %xmm5
+; SSSE3-NEXT: packuswb %xmm1, %xmm5
+; SSSE3-NEXT: packuswb %xmm5, %xmm5
+; SSSE3-NEXT: movq %xmm5, (%rdi)
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_usat_v8i64_v8i8_store:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: movapd {{.*#+}} xmm8 = [255,255]
+; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259711,9223372039002259711]
+; SSE41-NEXT: movdqa %xmm9, %xmm6
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm6
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm6[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm10, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm6
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm6
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa %xmm9, %xmm4
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm4[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm10, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm4
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm4
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm7, %xmm0
+; SSE41-NEXT: movdqa %xmm9, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm10, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; SSE41-NEXT: pxor %xmm3, %xmm7
+; SSE41-NEXT: movdqa %xmm9, %xmm0
+; SSE41-NEXT: pcmpgtd %xmm7, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm9, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,3,3]
+; SSE41-NEXT: pand %xmm2, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm8, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm2
+; SSE41-NEXT: andpd %xmm8, %xmm2
+; SSE41-NEXT: andpd %xmm8, %xmm1
+; SSE41-NEXT: packusdw %xmm2, %xmm1
+; SSE41-NEXT: andpd %xmm8, %xmm4
+; SSE41-NEXT: andpd %xmm8, %xmm6
+; SSE41-NEXT: packusdw %xmm4, %xmm6
+; SSE41-NEXT: packusdw %xmm1, %xmm6
+; SSE41-NEXT: packuswb %xmm6, %xmm6
+; SSE41-NEXT: movq %xmm6, (%rdi)
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_usat_v8i64_v8i8_store:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [255,255,255,255]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854776063,9223372036854776063]
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm6
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm3 = mem[0,0]
+; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_usat_v8i64_v8i8_store:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [255,255,255,255]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm4
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372036854776063,9223372036854776063,9223372036854776063,9223372036854776063]
+; AVX2-NEXT: vpcmpgtq %ymm4, %ymm5, %ymm4
+; AVX2-NEXT: vblendvpd %ymm4, %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm3
+; AVX2-NEXT: vpcmpgtq %ymm3, %ymm5, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-NEXT: vmovq %xmm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_usat_v8i64_v8i8_store:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovusqb %zmm0, (%rdi)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp ult <8 x i64> %a0, <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %2 = select <8 x i1> %1, <8 x i64> %a0, <8 x i64> <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %3 = trunc <8 x i64> %2 to <8 x i8>
+ store <8 x i8> %3, <8 x i8> *%p1
+ ret void
+}
+
+define <16 x i8> @trunc_usat_v16i64_v16i8(<16 x i64> %a0) {
+; SSE2-LABEL: trunc_usat_v16i64_v16i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255]
+; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT: movdqa %xmm1, %xmm11
+; SSE2-NEXT: pxor %xmm9, %xmm11
+; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [9223372039002259711,9223372039002259711]
+; SSE2-NEXT: movdqa %xmm10, %xmm12
+; SSE2-NEXT: pcmpgtd %xmm11, %xmm12
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm11
+; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm11[1,1,3,3]
+; SSE2-NEXT: pand %xmm13, %xmm11
+; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm12[1,1,3,3]
+; SSE2-NEXT: por %xmm11, %xmm12
+; SSE2-NEXT: pand %xmm12, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm12
+; SSE2-NEXT: por %xmm1, %xmm12
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm9, %xmm1
+; SSE2-NEXT: movdqa %xmm10, %xmm11
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm11
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm11[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm14 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm13, %xmm14
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm11[1,1,3,3]
+; SSE2-NEXT: por %xmm14, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm12, %xmm0
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm9, %xmm1
+; SSE2-NEXT: movdqa %xmm10, %xmm11
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm11
+; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm12, %xmm13
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm11[1,1,3,3]
+; SSE2-NEXT: por %xmm13, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm9, %xmm3
+; SSE2-NEXT: movdqa %xmm10, %xmm11
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm11
+; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm3[1,1,3,3]
+; SSE2-NEXT: pand %xmm12, %xmm13
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm11[1,1,3,3]
+; SSE2-NEXT: por %xmm13, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: packuswb %xmm1, %xmm3
+; SSE2-NEXT: packuswb %xmm3, %xmm0
+; SSE2-NEXT: movdqa %xmm5, %xmm1
+; SSE2-NEXT: pxor %xmm9, %xmm1
+; SSE2-NEXT: movdqa %xmm10, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm5
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm5, %xmm2
+; SSE2-NEXT: movdqa %xmm4, %xmm1
+; SSE2-NEXT: pxor %xmm9, %xmm1
+; SSE2-NEXT: movdqa %xmm10, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; SSE2-NEXT: pand %xmm11, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: pand %xmm1, %xmm4
+; SSE2-NEXT: pandn %xmm8, %xmm1
+; SSE2-NEXT: por %xmm4, %xmm1
+; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm7, %xmm2
+; SSE2-NEXT: pxor %xmm9, %xmm2
+; SSE2-NEXT: movdqa %xmm10, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm7
+; SSE2-NEXT: pandn %xmm8, %xmm3
+; SSE2-NEXT: por %xmm7, %xmm3
+; SSE2-NEXT: pxor %xmm6, %xmm9
+; SSE2-NEXT: movdqa %xmm10, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm9, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE2-NEXT: pcmpeqd %xmm10, %xmm9
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm9[1,1,3,3]
+; SSE2-NEXT: pand %xmm4, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT: por %xmm5, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm6
+; SSE2-NEXT: pandn %xmm8, %xmm2
+; SSE2-NEXT: por %xmm6, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: packuswb %xmm2, %xmm1
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_usat_v16i64_v16i8:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [255,255]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259456,9223372039002259456]
+; SSSE3-NEXT: movdqa %xmm1, %xmm11
+; SSSE3-NEXT: pxor %xmm9, %xmm11
+; SSSE3-NEXT: movdqa {{.*#+}} xmm10 = [9223372039002259711,9223372039002259711]
+; SSSE3-NEXT: movdqa %xmm10, %xmm12
+; SSSE3-NEXT: pcmpgtd %xmm11, %xmm12
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm11
+; SSSE3-NEXT: pshufd {{.*#+}} xmm11 = xmm11[1,1,3,3]
+; SSSE3-NEXT: pand %xmm13, %xmm11
+; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm12[1,1,3,3]
+; SSSE3-NEXT: por %xmm11, %xmm12
+; SSSE3-NEXT: pand %xmm12, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm12
+; SSSE3-NEXT: por %xmm1, %xmm12
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm9, %xmm1
+; SSSE3-NEXT: movdqa %xmm10, %xmm11
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm11
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm11[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm14 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm13, %xmm14
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm11[1,1,3,3]
+; SSSE3-NEXT: por %xmm14, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm1, %xmm0
+; SSSE3-NEXT: packuswb %xmm12, %xmm0
+; SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSSE3-NEXT: pxor %xmm9, %xmm1
+; SSSE3-NEXT: movdqa %xmm10, %xmm11
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm11
+; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm12, %xmm13
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm11[1,1,3,3]
+; SSSE3-NEXT: por %xmm13, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm3, %xmm1
+; SSSE3-NEXT: movdqa %xmm2, %xmm3
+; SSSE3-NEXT: pxor %xmm9, %xmm3
+; SSSE3-NEXT: movdqa %xmm10, %xmm11
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm11
+; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm11[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm3[1,1,3,3]
+; SSSE3-NEXT: pand %xmm12, %xmm13
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm11[1,1,3,3]
+; SSSE3-NEXT: por %xmm13, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm3
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: packuswb %xmm1, %xmm3
+; SSSE3-NEXT: packuswb %xmm3, %xmm0
+; SSSE3-NEXT: movdqa %xmm5, %xmm1
+; SSSE3-NEXT: pxor %xmm9, %xmm1
+; SSSE3-NEXT: movdqa %xmm10, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm3, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm1, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm5
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm5, %xmm2
+; SSSE3-NEXT: movdqa %xmm4, %xmm1
+; SSSE3-NEXT: pxor %xmm9, %xmm1
+; SSSE3-NEXT: movdqa %xmm10, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm11 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pand %xmm11, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm5, %xmm1
+; SSSE3-NEXT: pand %xmm1, %xmm4
+; SSSE3-NEXT: pandn %xmm8, %xmm1
+; SSSE3-NEXT: por %xmm4, %xmm1
+; SSSE3-NEXT: packuswb %xmm2, %xmm1
+; SSSE3-NEXT: movdqa %xmm7, %xmm2
+; SSSE3-NEXT: pxor %xmm9, %xmm2
+; SSSE3-NEXT: movdqa %xmm10, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm2, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSSE3-NEXT: por %xmm2, %xmm3
+; SSSE3-NEXT: pand %xmm3, %xmm7
+; SSSE3-NEXT: pandn %xmm8, %xmm3
+; SSSE3-NEXT: por %xmm7, %xmm3
+; SSSE3-NEXT: pxor %xmm6, %xmm9
+; SSSE3-NEXT: movdqa %xmm10, %xmm2
+; SSSE3-NEXT: pcmpgtd %xmm9, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSSE3-NEXT: pcmpeqd %xmm10, %xmm9
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm9[1,1,3,3]
+; SSSE3-NEXT: pand %xmm4, %xmm5
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSSE3-NEXT: por %xmm5, %xmm2
+; SSSE3-NEXT: pand %xmm2, %xmm6
+; SSSE3-NEXT: pandn %xmm8, %xmm2
+; SSSE3-NEXT: por %xmm6, %xmm2
+; SSSE3-NEXT: packuswb %xmm3, %xmm2
+; SSSE3-NEXT: packuswb %xmm2, %xmm1
+; SSSE3-NEXT: packuswb %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_usat_v16i64_v16i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm8
+; SSE41-NEXT: movapd {{.*#+}} xmm9 = [255,255]
+; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm11 = [9223372039002259711,9223372039002259711]
+; SSE41-NEXT: movdqa %xmm11, %xmm12
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm12
+; SSE41-NEXT: pshufd {{.*#+}} xmm13 = xmm12[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm13, %xmm14
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm12[1,1,3,3]
+; SSE41-NEXT: por %xmm14, %xmm0
+; SSE41-NEXT: movapd %xmm9, %xmm12
+; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm12
+; SSE41-NEXT: movdqa %xmm8, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm13 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm13, %xmm14
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm14, %xmm0
+; SSE41-NEXT: movapd %xmm9, %xmm13
+; SSE41-NEXT: blendvpd %xmm0, %xmm8, %xmm13
+; SSE41-NEXT: packusdw %xmm12, %xmm13
+; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm12 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm8, %xmm12
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm12, %xmm0
+; SSE41-NEXT: movapd %xmm9, %xmm12
+; SSE41-NEXT: blendvpd %xmm0, %xmm3, %xmm12
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm3
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm3[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm8, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
+; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: movapd %xmm9, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
+; SSE41-NEXT: packusdw %xmm12, %xmm1
+; SSE41-NEXT: packusdw %xmm1, %xmm13
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm2, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm3, %xmm0
+; SSE41-NEXT: movapd %xmm9, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm5, %xmm1
+; SSE41-NEXT: movdqa %xmm4, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm2
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm5
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm5, %xmm0
+; SSE41-NEXT: movapd %xmm9, %xmm2
+; SSE41-NEXT: blendvpd %xmm0, %xmm4, %xmm2
+; SSE41-NEXT: packusdw %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm7, %xmm0
+; SSE41-NEXT: pxor %xmm10, %xmm0
+; SSE41-NEXT: movdqa %xmm11, %xmm1
+; SSE41-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: movapd %xmm9, %xmm1
+; SSE41-NEXT: blendvpd %xmm0, %xmm7, %xmm1
+; SSE41-NEXT: pxor %xmm6, %xmm10
+; SSE41-NEXT: movdqa %xmm11, %xmm0
+; SSE41-NEXT: pcmpgtd %xmm10, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,2,2]
+; SSE41-NEXT: pcmpeqd %xmm11, %xmm10
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm10[1,1,3,3]
+; SSE41-NEXT: pand %xmm3, %xmm4
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT: por %xmm4, %xmm0
+; SSE41-NEXT: blendvpd %xmm0, %xmm6, %xmm9
+; SSE41-NEXT: packusdw %xmm1, %xmm9
+; SSE41-NEXT: packusdw %xmm9, %xmm2
+; SSE41-NEXT: packuswb %xmm2, %xmm13
+; SSE41-NEXT: movdqa %xmm13, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_usat_v16i64_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovapd {{.*#+}} ymm8 = [255,255,255,255]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [9223372036854776063,9223372036854776063]
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT: vpxor %xmm6, %xmm0, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm7, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm4
+; AVX1-NEXT: vblendvpd %ymm4, %ymm0, %ymm8, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpxor %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm7, %xmm4
+; AVX1-NEXT: vpxor %xmm6, %xmm1, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
+; AVX1-NEXT: vblendvpd %ymm4, %ymm1, %ymm8, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vpxor %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm7, %xmm4
+; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
+; AVX1-NEXT: vblendvpd %ymm4, %ymm2, %ymm8, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vpxor %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm7, %xmm4
+; AVX1-NEXT: vpxor %xmm6, %xmm3, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
+; AVX1-NEXT: vblendvpd %ymm4, %ymm3, %ymm8, %ymm3
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
+; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_usat_v16i64_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm4 = [255,255,255,255]
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT: vpxor %ymm5, %ymm1, %ymm6
+; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm7 = [9223372036854776063,9223372036854776063,9223372036854776063,9223372036854776063]
+; AVX2-NEXT: vpcmpgtq %ymm6, %ymm7, %ymm6
+; AVX2-NEXT: vblendvpd %ymm6, %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpxor %ymm5, %ymm0, %ymm6
+; AVX2-NEXT: vpcmpgtq %ymm6, %ymm7, %ymm6
+; AVX2-NEXT: vblendvpd %ymm6, %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm5, %ymm3, %ymm1
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm7, %ymm1
+; AVX2-NEXT: vblendvpd %ymm1, %ymm3, %ymm4, %ymm1
+; AVX2-NEXT: vpxor %ymm5, %ymm2, %ymm3
+; AVX2-NEXT: vpcmpgtq %ymm3, %ymm7, %ymm3
+; AVX2-NEXT: vblendvpd %ymm3, %ymm2, %ymm4, %ymm2
+; AVX2-NEXT: vpackusdw %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_usat_v16i64_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255]
+; AVX512-NEXT: vpminuq %zmm2, %zmm1, %zmm1
+; AVX512-NEXT: vpminuq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp ult <16 x i64> %a0, <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %2 = select <16 x i1> %1, <16 x i64> %a0, <16 x i64> <i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255, i64 255>
+ %3 = trunc <16 x i64> %2 to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i8> @trunc_usat_v8i32_v8i8(<8 x i32> %a0) {
+; SSE2-LABEL: trunc_usat_v8i32_v8i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255]
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: pxor %xmm4, %xmm5
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483903,2147483903,2147483903,2147483903]
+; SSE2-NEXT: movdqa %xmm6, %xmm2
+; SSE2-NEXT: pcmpgtd %xmm5, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pandn %xmm3, %xmm2
+; SSE2-NEXT: por %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm1, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm6
+; SSE2-NEXT: pand %xmm6, %xmm1
+; SSE2-NEXT: pandn %xmm3, %xmm6
+; SSE2-NEXT: por %xmm1, %xmm6
+; SSE2-NEXT: pand %xmm3, %xmm6
+; SSE2-NEXT: pand %xmm3, %xmm2
+; SSE2-NEXT: packuswb %xmm6, %xmm2
+; SSE2-NEXT: packuswb %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_usat_v8i32_v8i8:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm0, %xmm4
+; SSSE3-NEXT: pxor %xmm3, %xmm4
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147483903,2147483903,2147483903,2147483903]
+; SSSE3-NEXT: movdqa %xmm5, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6
+; SSSE3-NEXT: pand %xmm6, %xmm0
+; SSSE3-NEXT: pandn %xmm2, %xmm6
+; SSSE3-NEXT: por %xmm6, %xmm0
+; SSSE3-NEXT: pxor %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm1
+; SSSE3-NEXT: pandn %xmm2, %xmm5
+; SSSE3-NEXT: por %xmm1, %xmm5
+; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; SSSE3-NEXT: pshufb %xmm1, %xmm5
+; SSSE3-NEXT: pshufb %xmm1, %xmm0
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_usat_v8i32_v8i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255]
+; SSE41-NEXT: pminud %xmm2, %xmm0
+; SSE41-NEXT: pminud %xmm2, %xmm1
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_usat_v8i32_v8i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255]
+; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_usat_v8i32_v8i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_usat_v8i32_v8i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
+; AVX512F-NEXT: vpminud %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_usat_v8i32_v8i8:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpminud {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_usat_v8i32_v8i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT: vpminud %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_usat_v8i32_v8i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpminud {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp ult <8 x i32> %a0, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %2 = select <8 x i1> %1, <8 x i32> %a0, <8 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %3 = trunc <8 x i32> %2 to <8 x i8>
+ ret <8 x i8> %3
+}
+
+define void @trunc_usat_v8i32_v8i8_store(<8 x i32> %a0, <8 x i8> *%p1) {
+; SSE2-LABEL: trunc_usat_v8i32_v8i8_store:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255]
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm3, %xmm4
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483903,2147483903,2147483903,2147483903]
+; SSE2-NEXT: movdqa %xmm5, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm4, %xmm6
+; SSE2-NEXT: pand %xmm6, %xmm0
+; SSE2-NEXT: pandn %xmm2, %xmm6
+; SSE2-NEXT: por %xmm0, %xmm6
+; SSE2-NEXT: pxor %xmm1, %xmm3
+; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: pandn %xmm2, %xmm5
+; SSE2-NEXT: por %xmm1, %xmm5
+; SSE2-NEXT: pand %xmm2, %xmm5
+; SSE2-NEXT: pand %xmm2, %xmm6
+; SSE2-NEXT: packuswb %xmm5, %xmm6
+; SSE2-NEXT: packuswb %xmm6, %xmm6
+; SSE2-NEXT: movq %xmm6, (%rdi)
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_usat_v8i32_v8i8_store:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm0, %xmm4
+; SSSE3-NEXT: pxor %xmm3, %xmm4
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147483903,2147483903,2147483903,2147483903]
+; SSSE3-NEXT: movdqa %xmm5, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm4, %xmm6
+; SSSE3-NEXT: pand %xmm6, %xmm0
+; SSSE3-NEXT: pandn %xmm2, %xmm6
+; SSSE3-NEXT: por %xmm0, %xmm6
+; SSSE3-NEXT: pxor %xmm1, %xmm3
+; SSSE3-NEXT: pcmpgtd %xmm3, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm1
+; SSSE3-NEXT: pandn %xmm2, %xmm5
+; SSSE3-NEXT: por %xmm1, %xmm5
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; SSSE3-NEXT: pshufb %xmm0, %xmm5
+; SSSE3-NEXT: pshufb %xmm0, %xmm6
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; SSSE3-NEXT: movq %xmm6, (%rdi)
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_usat_v8i32_v8i8_store:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255]
+; SSE41-NEXT: pminud %xmm2, %xmm0
+; SSE41-NEXT: pminud %xmm2, %xmm1
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE41-NEXT: movq %xmm0, (%rdi)
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_usat_v8i32_v8i8_store:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255]
+; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX1-NEXT: vmovq %xmm0, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_usat_v8i32_v8i8_store:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-NEXT: vmovq %xmm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_usat_v8i32_v8i8_store:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
+; AVX512F-NEXT: vpminud %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vmovq %xmm0, (%rdi)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_usat_v8i32_v8i8_store:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpmovusdb %ymm0, (%rdi)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_usat_v8i32_v8i8_store:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT: vpminud %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vmovq %xmm0, (%rdi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_usat_v8i32_v8i8_store:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpmovusdb %ymm0, (%rdi)
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp ult <8 x i32> %a0, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %2 = select <8 x i1> %1, <8 x i32> %a0, <8 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %3 = trunc <8 x i32> %2 to <8 x i8>
+ store <8 x i8> %3, <8 x i8> *%p1
+ ret void
+}
+
+define <16 x i8> @trunc_usat_v16i32_v16i8(<16 x i32> %a0) {
+; SSE2-LABEL: trunc_usat_v16i32_v16i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255]
+; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT: movdqa %xmm1, %xmm7
+; SSE2-NEXT: pxor %xmm6, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [2147483903,2147483903,2147483903,2147483903]
+; SSE2-NEXT: movdqa %xmm5, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm7, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pandn %xmm8, %xmm4
+; SSE2-NEXT: por %xmm1, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm6, %xmm1
+; SSE2-NEXT: movdqa %xmm5, %xmm7
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm7
+; SSE2-NEXT: pand %xmm7, %xmm0
+; SSE2-NEXT: pandn %xmm8, %xmm7
+; SSE2-NEXT: por %xmm7, %xmm0
+; SSE2-NEXT: packuswb %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm6, %xmm1
+; SSE2-NEXT: movdqa %xmm5, %xmm4
+; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pandn %xmm8, %xmm4
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm2, %xmm6
+; SSE2-NEXT: pcmpgtd %xmm6, %xmm5
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pandn %xmm8, %xmm5
+; SSE2-NEXT: por %xmm2, %xmm5
+; SSE2-NEXT: packuswb %xmm4, %xmm5
+; SSE2-NEXT: packuswb %xmm5, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_usat_v16i32_v16i8:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm6 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm1, %xmm7
+; SSSE3-NEXT: pxor %xmm6, %xmm7
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2147483903,2147483903,2147483903,2147483903]
+; SSSE3-NEXT: movdqa %xmm5, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm7, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pandn %xmm8, %xmm4
+; SSSE3-NEXT: por %xmm1, %xmm4
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm6, %xmm1
+; SSSE3-NEXT: movdqa %xmm5, %xmm7
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm7
+; SSSE3-NEXT: pand %xmm7, %xmm0
+; SSSE3-NEXT: pandn %xmm8, %xmm7
+; SSSE3-NEXT: por %xmm7, %xmm0
+; SSSE3-NEXT: packuswb %xmm4, %xmm0
+; SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSSE3-NEXT: pxor %xmm6, %xmm1
+; SSSE3-NEXT: movdqa %xmm5, %xmm4
+; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4
+; SSSE3-NEXT: pand %xmm4, %xmm3
+; SSSE3-NEXT: pandn %xmm8, %xmm4
+; SSSE3-NEXT: por %xmm3, %xmm4
+; SSSE3-NEXT: pxor %xmm2, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm6, %xmm5
+; SSSE3-NEXT: pand %xmm5, %xmm2
+; SSSE3-NEXT: pandn %xmm8, %xmm5
+; SSSE3-NEXT: por %xmm2, %xmm5
+; SSSE3-NEXT: packuswb %xmm4, %xmm5
+; SSSE3-NEXT: packuswb %xmm5, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_usat_v16i32_v16i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255]
+; SSE41-NEXT: pminud %xmm4, %xmm1
+; SSE41-NEXT: pminud %xmm4, %xmm0
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: pminud %xmm4, %xmm3
+; SSE41-NEXT: pminud %xmm4, %xmm2
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: packuswb %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_usat_v16i32_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255]
+; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpminud %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpminud %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_usat_v16i32_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vpminud %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpminud %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc_usat_v16i32_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovusdb %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %1 = icmp ult <16 x i32> %a0, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %2 = select <16 x i1> %1, <16 x i32> %a0, <16 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
+ %3 = trunc <16 x i32> %2 to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <16 x i8> @trunc_usat_v16i16_v16i8(<16 x i16> %a0) {
+; SSE2-LABEL: trunc_usat_v16i16_v16i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; SSE2-NEXT: pxor %xmm2, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [33023,33023,33023,33023,33023,33023,33023,33023]
+; SSE2-NEXT: pminsw %xmm3, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: pminsw %xmm3, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_usat_v16i16_v16i8:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; SSSE3-NEXT: pxor %xmm2, %xmm1
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [33023,33023,33023,33023,33023,33023,33023,33023]
+; SSSE3-NEXT: pminsw %xmm3, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm0
+; SSSE3-NEXT: pminsw %xmm3, %xmm0
+; SSSE3-NEXT: pxor %xmm2, %xmm0
+; SSSE3-NEXT: packuswb %xmm1, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_usat_v16i16_v16i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT: pminuw %xmm2, %xmm1
+; SSE41-NEXT: pminuw %xmm2, %xmm0
+; SSE41-NEXT: packuswb %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_usat_v16i16_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpminuw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpminuw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_usat_v16i16_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_usat_v16i16_v16i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_usat_v16i16_v16i8:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_usat_v16i16_v16i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpminuw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_usat_v16i16_v16i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpmovuswb %ymm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = icmp ult <16 x i16> %a0, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %2 = select <16 x i1> %1, <16 x i16> %a0, <16 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %3 = trunc <16 x i16> %2 to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <32 x i8> @trunc_usat_v32i16_v32i8(<32 x i16> %a0) {
+; SSE2-LABEL: trunc_usat_v32i16_v32i8:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; SSE2-NEXT: pxor %xmm4, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [33023,33023,33023,33023,33023,33023,33023,33023]
+; SSE2-NEXT: pminsw %xmm5, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm2
+; SSE2-NEXT: pminsw %xmm5, %xmm2
+; SSE2-NEXT: pxor %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: pminsw %xmm5, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm0
+; SSE2-NEXT: pminsw %xmm5, %xmm0
+; SSE2-NEXT: pxor %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc_usat_v32i16_v32i8:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; SSSE3-NEXT: pxor %xmm4, %xmm3
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [33023,33023,33023,33023,33023,33023,33023,33023]
+; SSSE3-NEXT: pminsw %xmm5, %xmm3
+; SSSE3-NEXT: pxor %xmm4, %xmm3
+; SSSE3-NEXT: pxor %xmm4, %xmm2
+; SSSE3-NEXT: pminsw %xmm5, %xmm2
+; SSSE3-NEXT: pxor %xmm4, %xmm2
+; SSSE3-NEXT: packuswb %xmm3, %xmm2
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: pminsw %xmm5, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm0
+; SSSE3-NEXT: pminsw %xmm5, %xmm0
+; SSSE3-NEXT: pxor %xmm4, %xmm0
+; SSSE3-NEXT: packuswb %xmm1, %xmm0
+; SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc_usat_v32i16_v32i8:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT: pminuw %xmm4, %xmm3
+; SSE41-NEXT: pminuw %xmm4, %xmm2
+; SSE41-NEXT: packuswb %xmm3, %xmm2
+; SSE41-NEXT: pminuw %xmm4, %xmm1
+; SSE41-NEXT: pminuw %xmm4, %xmm0
+; SSE41-NEXT: packuswb %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm2, %xmm1
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc_usat_v32i16_v32i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpminuw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpminuw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpminuw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpminuw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc_usat_v32i16_v32i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vpminuw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpminuw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc_usat_v32i16_v32i8:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT: vpminuw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpminuw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc_usat_v32i16_v32i8:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-NEXT: vpminuw %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpminuw %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc_usat_v32i16_v32i8:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpmovuswb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc_usat_v32i16_v32i8:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpmovuswb %zmm0, %ymm0
+; AVX512BWVL-NEXT: retq
+ %1 = icmp ult <32 x i16> %a0, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %2 = select <32 x i1> %1, <32 x i16> %a0, <32 x i16> <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ %3 = trunc <32 x i16> %2 to <32 x i8>
+ ret <32 x i8> %3
+}
Added: llvm/trunk/test/CodeGen/X86/vector-trunc-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-trunc-widen.ll?rev=346745&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-trunc-widen.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-trunc-widen.ll Mon Nov 12 23:47:52 2018
@@ -0,0 +1,2223 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-SLOW
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
+
+define <8 x i32> @trunc8i64_8i32(<8 x i64> %a) {
+; SSE-LABEL: trunc8i64_8i32:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
+; SSE-NEXT: movaps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc8i64_8i32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc8i64_8i32:
+; AVX2-SLOW: # %bb.0: # %entry
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc8i64_8i32:
+; AVX2-FAST: # %bb.0: # %entry
+; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermps %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc8i64_8i32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+entry:
+ %0 = trunc <8 x i64> %a to <8 x i32>
+ ret <8 x i32> %0
+}
+
+define <8 x i32> @trunc8i64_8i32_ashr(<8 x i64> %a) {
+; SSE-LABEL: trunc8i64_8i32_ashr:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
+; SSE-NEXT: movaps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc8i64_8i32_ashr:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc8i64_8i32_ashr:
+; AVX2-SLOW: # %bb.0: # %entry
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc8i64_8i32_ashr:
+; AVX2-FAST: # %bb.0: # %entry
+; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = [1,3,5,7,5,7,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermps %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc8i64_8i32_ashr:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpsraq $32, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+entry:
+ %0 = ashr <8 x i64> %a, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %1 = trunc <8 x i64> %0 to <8 x i32>
+ ret <8 x i32> %1
+}
+
+define <8 x i32> @trunc8i64_8i32_lshr(<8 x i64> %a) {
+; SSE-LABEL: trunc8i64_8i32_lshr:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
+; SSE-NEXT: movaps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc8i64_8i32_lshr:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc8i64_8i32_lshr:
+; AVX2-SLOW: # %bb.0: # %entry
+; AVX2-SLOW-NEXT: vpsrlq $32, %ymm1, %ymm1
+; AVX2-SLOW-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc8i64_8i32_lshr:
+; AVX2-FAST: # %bb.0: # %entry
+; AVX2-FAST-NEXT: vpsrlq $32, %ymm1, %ymm1
+; AVX2-FAST-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc8i64_8i32_lshr:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpsrlq $32, %zmm0, %zmm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: retq
+entry:
+ %0 = lshr <8 x i64> %a, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %1 = trunc <8 x i64> %0 to <8 x i32>
+ ret <8 x i32> %1
+}
+
+define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
+; SSE2-LABEL: trunc8i64_8i16:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc8i64_8i16:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc8i64_8i16:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: packusdw %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc8i64_8i16:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4],xmm3[5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3],xmm0[4],xmm3[5,6,7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc8i64_8i16:
+; AVX2-SLOW: # %bb.0: # %entry
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc8i64_8i16:
+; AVX2-FAST: # %bb.0: # %entry
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc8i64_8i16:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+entry:
+ %0 = trunc <8 x i64> %a to <8 x i16>
+ ret <8 x i16> %0
+}
+
+define void @trunc8i64_8i8(<8 x i64> %a) {
+; SSE2-LABEL: trunc8i64_8i8:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: movq %xmm0, (%rax)
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc8i64_8i8:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSSE3-NEXT: pand %xmm4, %xmm3
+; SSSE3-NEXT: pand %xmm4, %xmm2
+; SSSE3-NEXT: packuswb %xmm3, %xmm2
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pand %xmm4, %xmm0
+; SSSE3-NEXT: packuswb %xmm1, %xmm0
+; SSSE3-NEXT: packuswb %xmm2, %xmm0
+; SSSE3-NEXT: packuswb %xmm0, %xmm0
+; SSSE3-NEXT: movq %xmm0, (%rax)
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc8i64_8i8:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
+; SSE41-NEXT: pand %xmm4, %xmm3
+; SSE41-NEXT: pand %xmm4, %xmm2
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: pand %xmm4, %xmm1
+; SSE41-NEXT: pand %xmm4, %xmm0
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: packusdw %xmm2, %xmm0
+; SSE41-NEXT: packuswb %xmm0, %xmm0
+; SSE41-NEXT: movq %xmm0, (%rax)
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc8i64_8i8:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [1.2598673968951787E-321,1.2598673968951787E-321]
+; AVX1-NEXT: # xmm3 = mem[0,0]
+; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandpd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vmovq %xmm0, (%rax)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc8i64_8i8:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <u,u,0,8,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-NEXT: vmovq %xmm0, (%rax)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc8i64_8i8:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovqb %zmm0, (%rax)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+entry:
+ %0 = trunc <8 x i64> %a to <8 x i8>
+ store <8 x i8> %0, <8 x i8>* undef, align 4
+ ret void
+}
+
+define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
+; SSE2-LABEL: trunc8i32_8i16:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: pslld $16, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc8i32_8i16:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSSE3-NEXT: pshufb %xmm2, %xmm1
+; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc8i32_8i16:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc8i32_8i16:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc8i32_8i16:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc8i32_8i16:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc8i32_8i16:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc8i32_8i16:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc8i32_8i16:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = trunc <8 x i32> %a to <8 x i16>
+ ret <8 x i16> %0
+}
+
+define <8 x i16> @trunc8i32_8i16_ashr(<8 x i32> %a) {
+; SSE-LABEL: trunc8i32_8i16_ashr:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc8i32_8i16_ashr:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc8i32_8i16_ashr:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpsrad $16, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc8i32_8i16_ashr:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: vpsrad $16, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc8i32_8i16_ashr:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: vpsrad $16, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc8i32_8i16_ashr:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: vpsrad $16, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc8i32_8i16_ashr:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: vpsrad $16, %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = ashr <8 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %1 = trunc <8 x i32> %0 to <8 x i16>
+ ret <8 x i16> %1
+}
+
+define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
+; SSE2-LABEL: trunc8i32_8i16_lshr:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: pslld $16, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc8i32_8i16_lshr:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,10,11,14,15,14,15,255,255]
+; SSSE3-NEXT: pshufb %xmm2, %xmm1
+; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc8i32_8i16_lshr:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc8i32_8i16_lshr:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrld $16, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc8i32_8i16_lshr:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc8i32_8i16_lshr:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc8i32_8i16_lshr:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc8i32_8i16_lshr:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc8i32_8i16_lshr:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = lshr <8 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %1 = trunc <8 x i32> %0 to <8 x i16>
+ ret <8 x i16> %1
+}
+
+define void @trunc8i32_8i8(<8 x i32> %a) {
+; SSE2-LABEL: trunc8i32_8i8:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: movq %xmm0, (%rax)
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc8i32_8i8:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; SSSE3-NEXT: pshufb %xmm2, %xmm1
+; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: movq %xmm0, (%rax)
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc8i32_8i8:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE41-NEXT: movq %xmm0, (%rax)
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc8i32_8i8:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX1-NEXT: vmovq %xmm0, (%rax)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc8i32_8i8:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-NEXT: vmovq %xmm0, (%rax)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc8i32_8i8:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vmovq %xmm0, (%rax)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc8i32_8i8:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: vpmovdb %ymm0, (%rax)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc8i32_8i8:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: vmovq %xmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc8i32_8i8:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: vpmovdb %ymm0, (%rax)
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = trunc <8 x i32> %a to <8 x i8>
+ store <8 x i8> %0, <8 x i8>* undef, align 4
+ ret void
+}
+
+define void @trunc16i32_16i16(<16 x i32> %a) {
+; SSE2-LABEL: trunc16i32_16i16:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: pslld $16, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: pslld $16, %xmm3
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: pslld $16, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: packssdw %xmm3, %xmm2
+; SSE2-NEXT: movdqu %xmm2, (%rax)
+; SSE2-NEXT: movdqu %xmm0, (%rax)
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc16i32_16i16:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: pslld $16, %xmm1
+; SSSE3-NEXT: psrad $16, %xmm1
+; SSSE3-NEXT: pslld $16, %xmm0
+; SSSE3-NEXT: psrad $16, %xmm0
+; SSSE3-NEXT: packssdw %xmm1, %xmm0
+; SSSE3-NEXT: pslld $16, %xmm3
+; SSSE3-NEXT: psrad $16, %xmm3
+; SSSE3-NEXT: pslld $16, %xmm2
+; SSSE3-NEXT: psrad $16, %xmm2
+; SSSE3-NEXT: packssdw %xmm3, %xmm2
+; SSSE3-NEXT: movdqu %xmm2, (%rax)
+; SSSE3-NEXT: movdqu %xmm0, (%rax)
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc16i32_16i16:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pxor %xmm4, %xmm4
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: movdqu %xmm2, (%rax)
+; SSE41-NEXT: movdqu %xmm0, (%rax)
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc16i32_16i16:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3],xmm2[4],xmm3[5],xmm2[6],xmm3[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2],xmm3[3],xmm1[4],xmm3[5],xmm1[6],xmm3[7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3],xmm2[4],xmm3[5],xmm2[6],xmm3[7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2],xmm3[3],xmm0[4],xmm3[5],xmm0[6],xmm3[7]
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovups %ymm0, (%rax)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc16i32_16i16:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovdqu %ymm0, (%rax)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc16i32_16i16:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovdw %zmm0, (%rax)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+entry:
+ %0 = trunc <16 x i32> %a to <16 x i16>
+ store <16 x i16> %0, <16 x i16>* undef, align 4
+ ret void
+}
+
+define void @trunc16i32_16i16_ashr(<16 x i32> %a) {
+; SSE-LABEL: trunc16i32_16i16_ashr:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: psrad $16, %xmm3
+; SSE-NEXT: psrad $16, %xmm2
+; SSE-NEXT: packssdw %xmm3, %xmm2
+; SSE-NEXT: psrad $16, %xmm1
+; SSE-NEXT: psrad $16, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: movdqu %xmm2, (%rax)
+; SSE-NEXT: movdqu %xmm0, (%rax)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc16i32_16i16_ashr:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrad $16, %xmm2, %xmm2
+; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpsrad $16, %xmm2, %xmm2
+; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1
+; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovups %ymm0, (%rax)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc16i32_16i16_ashr:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpsrad $16, %ymm1, %ymm1
+; AVX2-NEXT: vpsrad $16, %ymm0, %ymm0
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vmovdqu %ymm0, (%rax)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc16i32_16i16_ashr:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpsrld $16, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdw %zmm0, (%rax)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+entry:
+ %0 = ashr <16 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %1 = trunc <16 x i32> %0 to <16 x i16>
+ store <16 x i16> %1, <16 x i16>* undef, align 4
+ ret void
+}
+
+define void @trunc16i32_16i16_lshr(<16 x i32> %a) {
+; SSE2-LABEL: trunc16i32_16i16_lshr:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: psrld $16, %xmm2
+; SSE2-NEXT: psrld $16, %xmm3
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pslld $16, %xmm1
+; SSE2-NEXT: psrad $16, %xmm1
+; SSE2-NEXT: pslld $16, %xmm0
+; SSE2-NEXT: psrad $16, %xmm0
+; SSE2-NEXT: packssdw %xmm1, %xmm0
+; SSE2-NEXT: pslld $16, %xmm3
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: pslld $16, %xmm2
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: packssdw %xmm3, %xmm2
+; SSE2-NEXT: movdqu %xmm2, (%rax)
+; SSE2-NEXT: movdqu %xmm0, (%rax)
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc16i32_16i16_lshr:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: psrld $16, %xmm2
+; SSSE3-NEXT: psrld $16, %xmm3
+; SSSE3-NEXT: psrld $16, %xmm0
+; SSSE3-NEXT: psrld $16, %xmm1
+; SSSE3-NEXT: pslld $16, %xmm1
+; SSSE3-NEXT: psrad $16, %xmm1
+; SSSE3-NEXT: pslld $16, %xmm0
+; SSSE3-NEXT: psrad $16, %xmm0
+; SSSE3-NEXT: packssdw %xmm1, %xmm0
+; SSSE3-NEXT: pslld $16, %xmm3
+; SSSE3-NEXT: psrad $16, %xmm3
+; SSSE3-NEXT: pslld $16, %xmm2
+; SSSE3-NEXT: psrad $16, %xmm2
+; SSSE3-NEXT: packssdw %xmm3, %xmm2
+; SSSE3-NEXT: movdqu %xmm2, (%rax)
+; SSSE3-NEXT: movdqu %xmm0, (%rax)
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc16i32_16i16_lshr:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: psrld $16, %xmm3
+; SSE41-NEXT: psrld $16, %xmm2
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: movdqu %xmm2, (%rax)
+; SSE41-NEXT: movdqu %xmm0, (%rax)
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc16i32_16i16_lshr:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrld $16, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpsrld $16, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $16, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovups %ymm0, (%rax)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc16i32_16i16_lshr:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vmovdqu %ymm0, (%rax)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc16i32_16i16_lshr:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpsrld $16, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdw %zmm0, (%rax)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+entry:
+ %0 = lshr <16 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %1 = trunc <16 x i32> %0 to <16 x i16>
+ store <16 x i16> %1, <16 x i16>* undef, align 4
+ ret void
+}
+
+define void @trunc16i32_16i8(<16 x i32> %a) {
+; SSE2-LABEL: trunc16i32_16i8:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: movdqu %xmm0, (%rax)
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc16i32_16i8:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSSE3-NEXT: pand %xmm4, %xmm3
+; SSSE3-NEXT: pand %xmm4, %xmm2
+; SSSE3-NEXT: packuswb %xmm3, %xmm2
+; SSSE3-NEXT: pand %xmm4, %xmm1
+; SSSE3-NEXT: pand %xmm4, %xmm0
+; SSSE3-NEXT: packuswb %xmm1, %xmm0
+; SSSE3-NEXT: packuswb %xmm2, %xmm0
+; SSSE3-NEXT: movdqu %xmm0, (%rax)
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc16i32_16i8:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE41-NEXT: pand %xmm4, %xmm3
+; SSE41-NEXT: pand %xmm4, %xmm2
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: pand %xmm4, %xmm1
+; SSE41-NEXT: pand %xmm4, %xmm0
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: packuswb %xmm2, %xmm0
+; SSE41-NEXT: movdqu %xmm0, (%rax)
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc16i32_16i8:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [3.57331108E-43,3.57331108E-43,3.57331108E-43,3.57331108E-43]
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqu %xmm0, (%rax)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc16i32_16i8:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vmovdqu %xmm0, (%rax)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc16i32_16i8:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovdb %zmm0, (%rax)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+entry:
+ %0 = trunc <16 x i32> %a to <16 x i8>
+ store <16 x i8> %0, <16 x i8>* undef, align 4
+ ret void
+}
+
+define void @trunc16i32_16i8_ashr(<16 x i32> %a) {
+; SSE-LABEL: trunc16i32_16i8_ashr:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: psrad $24, %xmm1
+; SSE-NEXT: psrad $24, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: psrad $24, %xmm3
+; SSE-NEXT: psrad $24, %xmm2
+; SSE-NEXT: packssdw %xmm3, %xmm2
+; SSE-NEXT: packsswb %xmm2, %xmm0
+; SSE-NEXT: movdqu %xmm0, (%rax)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc16i32_16i8_ashr:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrad $24, %xmm2, %xmm2
+; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpsrad $24, %xmm2, %xmm2
+; AVX1-NEXT: vpsrad $24, %xmm1, %xmm1
+; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqu %xmm0, (%rax)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc16i32_16i8_ashr:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpsrad $24, %ymm1, %ymm1
+; AVX2-NEXT: vpsrad $24, %ymm0, %ymm0
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovdqu %xmm0, (%rax)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc16i32_16i8_ashr:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpsrld $24, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, (%rax)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+entry:
+ %0 = ashr <16 x i32> %a, <i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
+ %1 = trunc <16 x i32> %0 to <16 x i8>
+ store <16 x i8> %1, <16 x i8>* undef, align 4
+ ret void
+}
+
+define void @trunc16i32_16i8_lshr(<16 x i32> %a) {
+; SSE2-LABEL: trunc16i32_16i8_lshr:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: psrld $24, %xmm1
+; SSE2-NEXT: psrld $24, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: psrld $24, %xmm3
+; SSE2-NEXT: psrld $24, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: packuswb %xmm2, %xmm0
+; SSE2-NEXT: movdqu %xmm0, (%rax)
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc16i32_16i8_lshr:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: psrld $24, %xmm1
+; SSSE3-NEXT: psrld $24, %xmm0
+; SSSE3-NEXT: packuswb %xmm1, %xmm0
+; SSSE3-NEXT: psrld $24, %xmm3
+; SSSE3-NEXT: psrld $24, %xmm2
+; SSSE3-NEXT: packuswb %xmm3, %xmm2
+; SSSE3-NEXT: packuswb %xmm2, %xmm0
+; SSSE3-NEXT: movdqu %xmm0, (%rax)
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc16i32_16i8_lshr:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: psrld $24, %xmm1
+; SSE41-NEXT: psrld $24, %xmm0
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: psrld $24, %xmm3
+; SSE41-NEXT: psrld $24, %xmm2
+; SSE41-NEXT: packusdw %xmm3, %xmm2
+; SSE41-NEXT: packuswb %xmm2, %xmm0
+; SSE41-NEXT: movdqu %xmm0, (%rax)
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc16i32_16i8_lshr:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrld $24, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $24, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpsrld $24, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $24, %xmm1, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqu %xmm0, (%rax)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc16i32_16i8_lshr:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpsrld $24, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $24, %ymm0, %ymm0
+; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovdqu %xmm0, (%rax)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: trunc16i32_16i8_lshr:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpsrld $24, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, (%rax)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+entry:
+ %0 = lshr <16 x i32> %a, <i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
+ %1 = trunc <16 x i32> %0 to <16 x i8>
+ store <16 x i8> %1, <16 x i8>* undef, align 4
+ ret void
+}
+
+;PR25684
+define void @trunc16i16_16i8(<16 x i16> %a) {
+; SSE2-LABEL: trunc16i16_16i8:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: movdqu %xmm0, (%rax)
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc16i16_16i8:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSSE3-NEXT: pshufb %xmm2, %xmm1
+; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: movdqu %xmm0, (%rax)
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc16i16_16i8:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: movdqu %xmm0, (%rax)
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc16i16_16i8:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vmovdqu %xmm0, (%rax)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc16i16_16i8:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vmovdqu %xmm0, (%rax)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc16i16_16i8:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, (%rax)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc16i16_16i8:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, (%rax)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc16i16_16i8:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc16i16_16i8:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rax)
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = trunc <16 x i16> %a to <16 x i8>
+ store <16 x i8> %0, <16 x i8>* undef, align 4
+ ret void
+}
+
+define void @trunc16i16_16i8_ashr(<16 x i16> %a) {
+; SSE-LABEL: trunc16i16_16i8_ashr:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: psraw $8, %xmm1
+; SSE-NEXT: psraw $8, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: movdqu %xmm0, (%rax)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc16i16_16i8_ashr:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
+; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqu %xmm0, (%rax)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc16i16_16i8_ashr:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpsraw $8, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovdqu %xmm0, (%rax)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc16i16_16i8_ashr:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: vpsraw $8, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, (%rax)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc16i16_16i8_ashr:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: vpsraw $8, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, (%rax)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc16i16_16i8_ashr:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: vpsraw $8, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc16i16_16i8_ashr:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rax)
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = ashr <16 x i16> %a, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %1 = trunc <16 x i16> %0 to <16 x i8>
+ store <16 x i8> %1, <16 x i8>* undef, align 4
+ ret void
+}
+
+define void @trunc16i16_16i8_lshr(<16 x i16> %a) {
+; SSE-LABEL: trunc16i16_16i8_lshr:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: psrlw $8, %xmm1
+; SSE-NEXT: psrlw $8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: movdqu %xmm0, (%rax)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc16i16_16i8_lshr:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqu %xmm0, (%rax)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc16i16_16i8_lshr:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovdqu %xmm0, (%rax)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc16i16_16i8_lshr:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, (%rax)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc16i16_16i8_lshr:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, (%rax)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc16i16_16i8_lshr:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc16i16_16i8_lshr:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rax)
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = lshr <16 x i16> %a, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %1 = trunc <16 x i16> %0 to <16 x i8>
+ store <16 x i8> %1, <16 x i8>* undef, align 4
+ ret void
+}
+
+define void @trunc32i16_32i8(<32 x i16> %a) {
+; SSE2-LABEL: trunc32i16_32i8:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm4, %xmm1
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: movdqu %xmm2, (%rax)
+; SSE2-NEXT: movdqu %xmm0, (%rax)
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc32i16_32i8:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSSE3-NEXT: pshufb %xmm4, %xmm1
+; SSSE3-NEXT: pshufb %xmm4, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: pshufb %xmm4, %xmm3
+; SSSE3-NEXT: pshufb %xmm4, %xmm2
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSSE3-NEXT: movdqu %xmm2, (%rax)
+; SSSE3-NEXT: movdqu %xmm0, (%rax)
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc32i16_32i8:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm4, %xmm1
+; SSE41-NEXT: pshufb %xmm4, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: pshufb %xmm4, %xmm3
+; SSE41-NEXT: pshufb %xmm4, %xmm2
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE41-NEXT: movdqu %xmm2, (%rax)
+; SSE41-NEXT: movdqu %xmm0, (%rax)
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc32i16_32i8:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovups %ymm0, (%rax)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc32i16_32i8:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovdqu %ymm0, (%rax)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc32i16_32i8:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc32i16_32i8:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc32i16_32i8:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: vpmovwb %zmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc32i16_32i8:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: vpmovwb %zmm0, (%rax)
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = trunc <32 x i16> %a to <32 x i8>
+ store <32 x i8> %0, <32 x i8>* undef, align 4
+ ret void
+}
+
+define <8 x i32> @trunc2x4i64_8i32(<4 x i64> %a, <4 x i64> %b) {
+; SSE-LABEL: trunc2x4i64_8i32:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
+; SSE-NEXT: movaps %xmm2, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc2x4i64_8i32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc2x4i64_8i32:
+; AVX2-SLOW: # %bb.0: # %entry
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc2x4i64_8i32:
+; AVX2-FAST: # %bb.0: # %entry
+; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermps %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT: retq
+;
+; AVX512F-LABEL: trunc2x4i64_8i32:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc2x4i64_8i32:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512VL-NEXT: vpmovqd %ymm1, %xmm1
+; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc2x4i64_8i32:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc2x4i64_8i32:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512BWVL-NEXT: vpmovqd %ymm1, %xmm1
+; AVX512BWVL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = trunc <4 x i64> %a to <4 x i32>
+ %1 = trunc <4 x i64> %b to <4 x i32>
+ %2 = shufflevector <4 x i32> %0, <4 x i32> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i32> %2
+}
+
+define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
+; SSE2-LABEL: trunc2x4i64_8i16:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc2x4i64_8i16:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc2x4i64_8i16:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
+; SSE41-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc2x4i64_8i16:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7]
+; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc2x4i64_8i16:
+; AVX2-SLOW: # %bb.0: # %entry
+; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7]
+; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc2x4i64_8i16:
+; AVX2-FAST: # %bb.0: # %entry
+; AVX2-FAST-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,0,1,8,9,8,9,10,11,12,13,14,15]
+; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; AVX2-FAST-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512-LABEL: trunc2x4i64_8i16:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512-NEXT: vpmovqw %zmm1, %xmm1
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+entry:
+ %0 = trunc <4 x i64> %a to <4 x i16>
+ %1 = trunc <4 x i64> %b to <4 x i16>
+ %2 = shufflevector <4 x i16> %0, <4 x i16> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @trunc2x2i64_4i32(<2 x i64> %a, <2 x i64> %b) {
+; SSE-LABEL: trunc2x2i64_4i32:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc2x2i64_4i32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc2x2i64_4i32:
+; AVX2-SLOW: # %bb.0: # %entry
+; AVX2-SLOW-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc2x2i64_4i32:
+; AVX2-FAST: # %bb.0: # %entry
+; AVX2-FAST-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm0
+; AVX2-FAST-NEXT: vpermps %ymm1, %ymm2, %ymm1
+; AVX2-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512F-LABEL: trunc2x2i64_4i32:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc2x2i64_4i32:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512VL-NEXT: vpmovqd %ymm1, %xmm1
+; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc2x2i64_4i32:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc2x2i64_4i32:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
+; AVX512BWVL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512BWVL-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512BWVL-NEXT: vpmovqd %ymm1, %xmm1
+; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = trunc <2 x i64> %a to <2 x i32>
+ %1 = trunc <2 x i64> %b to <2 x i32>
+ %2 = shufflevector <2 x i32> %0, <2 x i32> %1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %2
+}
+
+define i64 @trunc2i64_i64(<2 x i64> %inval) {
+; SSE-LABEL: trunc2i64_i64:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE-NEXT: movq %xmm0, %rax
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: trunc2i64_i64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: trunc2i64_i64:
+; AVX2-SLOW: # %bb.0: # %entry
+; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT: vmovq %xmm0, %rax
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc2i64_i64:
+; AVX2-FAST: # %bb.0: # %entry
+; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
+; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT: vmovq %xmm0, %rax
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512F-LABEL: trunc2i64_i64:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vmovq %xmm0, %rax
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc2i64_i64:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512VL-NEXT: vmovq %xmm0, %rax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc2i64_i64:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vmovq %xmm0, %rax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc2i64_i64:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512BWVL-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512BWVL-NEXT: vmovq %xmm0, %rax
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = trunc <2 x i64> %inval to <2 x i32>
+ %1 = bitcast <2 x i32> %0 to i64
+ ret i64 %1
+}
+
+define <8 x i16> @trunc2x4i32_8i16(<4 x i32> %a, <4 x i32> %b) {
+; SSE2-LABEL: trunc2x4i32_8i16:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc2x4i32_8i16:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSSE3-NEXT: pshufb %xmm2, %xmm1
+; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc2x4i32_8i16:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: trunc2x4i32_8i16:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: trunc2x4i32_8i16:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: trunc2x4i32_8i16:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc2x4i32_8i16:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT: vpmovdw %ymm1, %xmm1
+; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc2x4i32_8i16:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc2x4i32_8i16:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
+; AVX512BWVL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512BWVL-NEXT: vpmovdw %ymm1, %xmm1
+; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = trunc <4 x i32> %a to <4 x i16>
+ %1 = trunc <4 x i32> %b to <4 x i16>
+ %2 = shufflevector <4 x i16> %0, <4 x i16> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %2
+}
+
+; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524
+define i64 @trunc4i32_i64(<4 x i32> %inval) {
+; SSE2-LABEL: trunc4i32_i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc4i32_i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSSE3-NEXT: movq %xmm0, %rax
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc4i32_i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE41-NEXT: movq %xmm0, %rax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc4i32_i64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: retq
+;
+; AVX512F-LABEL: trunc4i32_i64:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT: vmovq %xmm0, %rax
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc4i32_i64:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT: vmovq %xmm0, %rax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc4i32_i64:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: vmovq %xmm0, %rax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc4i32_i64:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512BWVL-NEXT: vmovq %xmm0, %rax
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = trunc <4 x i32> %inval to <4 x i16>
+ %1 = bitcast <4 x i16> %0 to i64
+ ret i64 %1
+}
+
+define <16 x i8> @trunc2x8i16_16i8(<8 x i16> %a, <8 x i16> %b) {
+; SSE2-LABEL: trunc2x8i16_16i8:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc2x8i16_16i8:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSSE3-NEXT: pshufb %xmm2, %xmm1
+; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc2x8i16_16i8:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; SSE41-NEXT: pshufb %xmm2, %xmm1
+; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc2x8i16_16i8:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX-NEXT: retq
+;
+; AVX512F-LABEL: trunc2x8i16_16i8:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc2x8i16_16i8:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc2x8i16_16i8:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc2x8i16_16i8:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
+; AVX512BWVL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
+; AVX512BWVL-NEXT: vpmovwb %ymm1, %xmm1
+; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = trunc <8 x i16> %a to <8 x i8>
+ %1 = trunc <8 x i16> %b to <8 x i8>
+ %2 = shufflevector <8 x i8> %0, <8 x i8> %1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %2
+}
+
+; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524
+define i64 @trunc8i16_i64(<8 x i16> %inval) {
+; SSE2-LABEL: trunc8i16_i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: packuswb %xmm0, %xmm0
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: trunc8i16_i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SSSE3-NEXT: movq %xmm0, %rax
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: trunc8i16_i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; SSE41-NEXT: movq %xmm0, %rax
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: trunc8i16_i64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vmovq %xmm0, %rax
+; AVX-NEXT: retq
+;
+; AVX512F-LABEL: trunc8i16_i64:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vmovq %xmm0, %rax
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: trunc8i16_i64:
+; AVX512VL: # %bb.0: # %entry
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT: vmovq %xmm0, %rax
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: trunc8i16_i64:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vmovq %xmm0, %rax
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: trunc8i16_i64:
+; AVX512BWVL: # %bb.0: # %entry
+; AVX512BWVL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
+; AVX512BWVL-NEXT: vmovq %xmm0, %rax
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+entry:
+ %0 = trunc <8 x i16> %inval to <8 x i8>
+ %1 = bitcast <8 x i8> %0 to i64
+ ret i64 %1
+}
+
+define <16 x i8> @trunc16i64_16i8_const() {
+; SSE-LABEL: trunc16i64_16i8_const:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: trunc16i64_16i8_const:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+;
+; AVX512-LABEL: trunc16i64_16i8_const:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX512-NEXT: retq
+
+entry:
+ %0 = trunc <16 x i64> zeroinitializer to <16 x i8>
+ %1 = shufflevector <16 x i8> %0, <16 x i8> %0, <16 x i32> <i32 28, i32 30, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 undef, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26>
+ ret <16 x i8> %1
+}
+
+define <8 x i16> @PR32160(<8 x i32> %x) {
+; SSE-LABEL: PR32160:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: PR32160:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,8,9,8,9,8,9,8,9,8,9,8,9]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: PR32160:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
+; AVX2-SLOW-NEXT: vpbroadcastd %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vzeroupper
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: PR32160:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,8,9,8,9,8,9,8,9,8,9,8,9]
+; AVX2-FAST-NEXT: vzeroupper
+; AVX2-FAST-NEXT: retq
+;
+; AVX512F-LABEL: PR32160:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,2,3,4,5,6,7]
+; AVX512F-NEXT: vpbroadcastd %xmm0, %xmm0
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: PR32160:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,4,5,4,5,4,5,4,5,4,5]
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: PR32160:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,4,5,4,5,4,5,4,5,4,5]
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: PR32160:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
+; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,4,5,4,5,4,5,4,5,4,5]
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %shuf = trunc <8 x i32> %x to <8 x i16>
+ %trunc = shufflevector <8 x i16> %shuf, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ ret <8 x i16> %trunc
+}
+
+define void @PR34773(i16* %a0, i8* %a1) {
+; SSE-LABEL: PR34773:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqu (%rdi), %xmm0
+; SSE-NEXT: movdqu 16(%rdi), %xmm1
+; SSE-NEXT: movdqu 32(%rdi), %xmm2
+; SSE-NEXT: movdqu 48(%rdi), %xmm3
+; SSE-NEXT: psrlw $8, %xmm1
+; SSE-NEXT: psrlw $8, %xmm0
+; SSE-NEXT: packuswb %xmm1, %xmm0
+; SSE-NEXT: psrlw $8, %xmm3
+; SSE-NEXT: psrlw $8, %xmm2
+; SSE-NEXT: packuswb %xmm3, %xmm2
+; SSE-NEXT: movdqu %xmm0, (%rsi)
+; SSE-NEXT: movdqu %xmm2, 16(%rsi)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: PR34773:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqu (%rdi), %xmm0
+; AVX1-NEXT: vmovdqu 16(%rdi), %xmm1
+; AVX1-NEXT: vmovdqu 32(%rdi), %xmm2
+; AVX1-NEXT: vmovdqu 48(%rdi), %xmm3
+; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm1
+; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vmovdqu %xmm0, (%rsi)
+; AVX1-NEXT: vmovdqu %xmm1, 16(%rsi)
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: PR34773:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
+; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vmovdqu %xmm0, (%rsi)
+; AVX2-NEXT: vmovdqu %xmm1, 16(%rsi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: PR34773:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovdqu (%rdi), %ymm0
+; AVX512F-NEXT: vmovdqu 32(%rdi), %ymm1
+; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, (%rsi)
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, 16(%rsi)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512VL-LABEL: PR34773:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vmovdqu (%rdi), %ymm0
+; AVX512VL-NEXT: vmovdqu 32(%rdi), %ymm1
+; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, (%rsi)
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT: vpmovdb %zmm0, 16(%rsi)
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; AVX512BW-LABEL: PR34773:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vmovdqu (%rdi), %ymm0
+; AVX512BW-NEXT: vmovdqu 32(%rdi), %ymm1
+; AVX512BW-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT: vmovdqu %xmm0, (%rsi)
+; AVX512BW-NEXT: vmovdqu %xmm1, 16(%rsi)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512BWVL-LABEL: PR34773:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpsrlw $8, (%rdi), %ymm0
+; AVX512BWVL-NEXT: vpsrlw $8, 32(%rdi), %ymm1
+; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rsi)
+; AVX512BWVL-NEXT: vpmovwb %ymm1, 16(%rsi)
+; AVX512BWVL-NEXT: vzeroupper
+; AVX512BWVL-NEXT: retq
+ %1 = getelementptr i16, i16* %a0, i64 16
+ %2 = getelementptr i8, i8* %a1, i64 16
+ %3 = bitcast i16* %a0 to <16 x i16>*
+ %4 = bitcast i16* %1 to <16 x i16>*
+ %5 = bitcast i8* %a1 to <16 x i8>*
+ %6 = bitcast i8* %2 to <16 x i8>*
+ %7 = load <16 x i16>, <16 x i16>* %3, align 2
+ %8 = load <16 x i16>, <16 x i16>* %4, align 2
+ %9 = lshr <16 x i16> %7, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %10 = lshr <16 x i16> %8, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %11 = trunc <16 x i16> %9 to <16 x i8>
+ %12 = trunc <16 x i16> %10 to <16 x i8>
+ store <16 x i8> %11, <16 x i8>* %5, align 1
+ store <16 x i8> %12, <16 x i8>* %6, align 1
+ ret void
+}
Added: llvm/trunk/test/CodeGen/X86/vector-zext-widen.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-zext-widen.ll?rev=346745&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-zext-widen.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-zext-widen.ll Mon Nov 12 23:47:52 2018
@@ -0,0 +1,2294 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=AVX2-SLOW
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=AVX2-FAST
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -x86-experimental-vector-widening-legalization -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BW
+
+define <8 x i16> @zext_16i8_to_8i16(<16 x i8> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_16i8_to_8i16:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_16i8_to_8i16:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_16i8_to_8i16:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: zext_16i8_to_8i16:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX-NEXT: retq
+entry:
+ %B = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %C = zext <8 x i8> %B to <8 x i16>
+ ret <8 x i16> %C
+}
+
+; PR17654
+define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
+; SSE2-LABEL: zext_16i8_to_16i16:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_16i8_to_16i16:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_16i8_to_16i16:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_16i8_to_16i16:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_16i8_to_16i16:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_16i8_to_16i16:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512-NEXT: retq
+entry:
+ %B = zext <16 x i8> %A to <16 x i16>
+ ret <16 x i16> %B
+}
+
+define <32 x i16> @zext_32i8_to_32i16(<32 x i8> %A) {
+; SSE2-LABEL: zext_32i8_to_32i16:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_32i8_to_32i16:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm4
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
+; SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_32i8_to_32i16:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm1
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_32i8_to_32i16:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
+; AVX1-NEXT: vmovaps %ymm2, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_32i8_to_32i16:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT: vmovdqa %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: zext_32i8_to_32i16:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512F-NEXT: vmovdqa %ymm2, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: zext_32i8_to_32i16:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT: retq
+entry:
+ %B = zext <32 x i8> %A to <32 x i16>
+ ret <32 x i16> %B
+}
+
+define <4 x i32> @zext_16i8_to_4i32(<16 x i8> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_16i8_to_4i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_16i8_to_4i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_16i8_to_4i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: zext_16i8_to_4i32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX-NEXT: retq
+entry:
+ %B = shufflevector <16 x i8> %A, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %C = zext <4 x i8> %B to <4 x i32>
+ ret <4 x i32> %C
+}
+
+define <8 x i32> @zext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_16i8_to_8i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_16i8_to_8i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_16i8_to_8i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_16i8_to_8i32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_16i8_to_8i32:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_16i8_to_8i32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %B = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %C = zext <8 x i8> %B to <8 x i32>
+ ret <8 x i32> %C
+}
+
+define <16 x i32> @zext_16i8_to_16i32(<16 x i8> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_16i8_to_16i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_16i8_to_16i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: pxor %xmm4, %xmm4
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_16i8_to_16i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm4 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: movdqa %xmm4, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_16i8_to_16i32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,0,1]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
+; AVX1-NEXT: vmovaps %ymm2, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_16i8_to_16i32:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: vmovdqa %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_16i8_to_16i32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %B = zext <16 x i8> %A to <16 x i32>
+ ret <16 x i32> %B
+}
+
+define <2 x i64> @zext_16i8_to_2i64(<16 x i8> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_16i8_to_2i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_16i8_to_2i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_16i8_to_2i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: zext_16i8_to_2i64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT: retq
+entry:
+ %B = shufflevector <16 x i8> %A, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
+ %C = zext <2 x i8> %B to <2 x i64>
+ ret <2 x i64> %C
+}
+
+define <4 x i64> @zext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_16i8_to_4i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_16i8_to_4i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[2],zero,zero,zero,zero,zero,zero,zero,xmm1[3],zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_16i8_to_4i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_16i8_to_4i64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_16i8_to_4i64:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_16i8_to_4i64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %B = shufflevector <16 x i8> %A, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %C = zext <4 x i8> %B to <4 x i64>
+ ret <4 x i64> %C
+}
+
+define <8 x i64> @zext_16i8_to_8i64(<16 x i8> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_16i8_to_8i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_16i8_to_8i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,128,128,128,128,128,128,128,1,128,128,128,128,128,128,128]
+; SSSE3-NEXT: pshufb %xmm4, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2,128,128,128,128,128,128,128,3,128,128,128,128,128,128,128]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
+; SSSE3-NEXT: pshufb %xmm5, %xmm1
+; SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSSE3-NEXT: pshufb %xmm4, %xmm2
+; SSSE3-NEXT: pshufb %xmm5, %xmm3
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_16i8_to_8i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm4 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: psrlq $48, %xmm0
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm3 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: movdqa %xmm4, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_16i8_to_8i64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm2
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
+; AVX1-NEXT: vmovaps %ymm2, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_16i8_to_8i64:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vmovdqa %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_16i8_to_8i64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxbq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %B = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %C = zext <8 x i8> %B to <8 x i64>
+ ret <8 x i64> %C
+}
+
+define <4 x i32> @zext_8i16_to_4i32(<8 x i16> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_8i16_to_4i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_8i16_to_4i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_8i16_to_4i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: zext_8i16_to_4i32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX-NEXT: retq
+entry:
+ %B = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %C = zext <4 x i16> %B to <4 x i32>
+ ret <4 x i32> %C
+}
+
+define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_8i16_to_8i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_8i16_to_8i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_8i16_to_8i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_8i16_to_8i32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_8i16_to_8i32:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_8i16_to_8i32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT: retq
+entry:
+ %B = zext <8 x i16> %A to <8 x i32>
+ ret <8 x i32>%B
+}
+
+define <16 x i32> @zext_16i16_to_16i32(<16 x i16> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_16i16_to_16i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_16i16_to_16i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm1, %xmm3
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm4, %xmm4
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_16i16_to_16i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm1
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_16i16_to_16i32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
+; AVX1-NEXT: vmovaps %ymm2, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_16i16_to_16i32:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vmovdqa %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_16i16_to_16i32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512-NEXT: retq
+entry:
+ %B = zext <16 x i16> %A to <16 x i32>
+ ret <16 x i32> %B
+}
+
+define <2 x i64> @zext_8i16_to_2i64(<8 x i16> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_8i16_to_2i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_8i16_to_2i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_8i16_to_2i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: zext_8i16_to_2i64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX-NEXT: retq
+entry:
+ %B = shufflevector <8 x i16> %A, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
+ %C = zext <2 x i16> %B to <2 x i64>
+ ret <2 x i64> %C
+}
+
+define <4 x i64> @zext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_8i16_to_4i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_8i16_to_4i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_8i16_to_4i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_8i16_to_4i64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_8i16_to_4i64:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_8i16_to_4i64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %B = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %C = zext <4 x i16> %B to <4 x i64>
+ ret <4 x i64> %C
+}
+
+define <8 x i64> @zext_8i16_to_8i64(<8 x i16> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_8i16_to_8i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_8i16_to_8i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: pxor %xmm4, %xmm4
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSSE3-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_8i16_to_8i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm4 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT: movdqa %xmm4, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_8i16_to_8i64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,0,1]
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
+; AVX1-NEXT: vmovaps %ymm2, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_8i16_to_8i64:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX2-NEXT: vmovdqa %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_8i16_to_8i64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxwq {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %B = zext <8 x i16> %A to <8 x i64>
+ ret <8 x i64> %B
+}
+
+define <2 x i64> @zext_4i32_to_2i64(<4 x i32> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_4i32_to_2i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_4i32_to_2i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_4i32_to_2i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: zext_4i32_to_2i64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX-NEXT: retq
+entry:
+ %B = shufflevector <4 x i32> %A, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
+ %C = zext <2 x i32> %B to <2 x i64>
+ ret <2 x i64> %C
+}
+
+define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_4i32_to_4i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_4i32_to_4i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movaps %xmm0, %xmm1
+; SSSE3-NEXT: xorps %xmm2, %xmm2
+; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_4i32_to_4i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_4i32_to_4i64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_4i32_to_4i64:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_4i32_to_4i64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT: retq
+entry:
+ %B = zext <4 x i32> %A to <4 x i64>
+ ret <4 x i64>%B
+}
+
+define <8 x i64> @zext_8i32_to_8i64(<8 x i32> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: zext_8i32_to_8i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movaps %xmm1, %xmm3
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: xorps %xmm4, %xmm4
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT: movaps %xmm3, %xmm2
+; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_8i32_to_8i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movaps %xmm1, %xmm3
+; SSSE3-NEXT: movaps %xmm0, %xmm1
+; SSSE3-NEXT: xorps %xmm4, %xmm4
+; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSSE3-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSSE3-NEXT: movaps %xmm3, %xmm2
+; SSSE3-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSSE3-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_8i32_to_8i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: movdqa %xmm5, %xmm0
+; SSE41-NEXT: movdqa %xmm4, %xmm1
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_8i32_to_8i64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
+; AVX1-NEXT: vmovaps %ymm2, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_8i32_to_8i64:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: vmovdqa %ymm2, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_8i32_to_8i64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; AVX512-NEXT: retq
+entry:
+ %B = zext <8 x i32> %A to <8 x i64>
+ ret <8 x i64>%B
+}
+
+define <2 x i64> @load_zext_2i8_to_2i64(<2 x i8> *%ptr) {
+; SSE2-LABEL: load_zext_2i8_to_2i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movzwl (%rdi), %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_2i8_to_2i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movzwl (%rdi), %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_2i8_to_2i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: load_zext_2i8_to_2i64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT: retq
+entry:
+ %X = load <2 x i8>, <2 x i8>* %ptr
+ %Y = zext <2 x i8> %X to <2 x i64>
+ ret <2 x i64> %Y
+}
+
+define <4 x i32> @load_zext_4i8_to_4i32(<4 x i8> *%ptr) {
+; SSE2-LABEL: load_zext_4i8_to_4i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_4i8_to_4i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_4i8_to_4i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: load_zext_4i8_to_4i32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX-NEXT: retq
+entry:
+ %X = load <4 x i8>, <4 x i8>* %ptr
+ %Y = zext <4 x i8> %X to <4 x i32>
+ ret <4 x i32> %Y
+}
+
+define <4 x i64> @load_zext_4i8_to_4i64(<4 x i8> *%ptr) {
+; SSE2-LABEL: load_zext_4i8_to_4i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_4i8_to_4i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[2],zero,zero,zero,zero,zero,zero,zero,xmm1[3],zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_4i8_to_4i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: load_zext_4i8_to_4i64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_zext_4i8_to_4i64:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_zext_4i8_to_4i64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %X = load <4 x i8>, <4 x i8>* %ptr
+ %Y = zext <4 x i8> %X to <4 x i64>
+ ret <4 x i64> %Y
+}
+
+define <8 x i16> @load_zext_8i8_to_8i16(<8 x i8> *%ptr) {
+; SSE2-LABEL: load_zext_8i8_to_8i16:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_8i8_to_8i16:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_8i8_to_8i16:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: load_zext_8i8_to_8i16:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX-NEXT: retq
+entry:
+ %X = load <8 x i8>, <8 x i8>* %ptr
+ %Y = zext <8 x i8> %X to <8 x i16>
+ ret <8 x i16> %Y
+}
+
+define <8 x i32> @load_zext_8i8_to_8i32(<8 x i8> *%ptr) {
+; SSE2-LABEL: load_zext_8i8_to_8i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_8i8_to_8i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_8i8_to_8i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: load_zext_8i8_to_8i32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_zext_8i8_to_8i32:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_zext_8i8_to_8i32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %X = load <8 x i8>, <8 x i8>* %ptr
+ %Y = zext <8 x i8> %X to <8 x i32>
+ ret <8 x i32> %Y
+}
+
+define <8 x i32> @load_zext_16i8_to_8i32(<16 x i8> *%ptr) {
+; SSE2-LABEL: load_zext_16i8_to_8i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_16i8_to_8i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa (%rdi), %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_16i8_to_8i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa (%rdi), %xmm1
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: load_zext_16i8_to_8i32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vmovdqa (%rdi), %xmm0
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_zext_16i8_to_8i32:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_zext_16i8_to_8i32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %X = load <16 x i8>, <16 x i8>* %ptr
+ %Y = shufflevector <16 x i8> %X, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %Z = zext <8 x i8> %Y to <8 x i32>
+ ret <8 x i32> %Z
+}
+
+define <8 x i64> @load_zext_8i8_to_8i64(<8 x i8> *%ptr) {
+; SSE2-LABEL: load_zext_8i8_to_8i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_8i8_to_8i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [0,128,128,128,128,128,128,128,1,128,128,128,128,128,128,128]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pshufb %xmm4, %xmm0
+; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [2,128,128,128,128,128,128,128,3,128,128,128,128,128,128,128]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
+; SSSE3-NEXT: pshufb %xmm5, %xmm1
+; SSSE3-NEXT: movdqa %xmm3, %xmm2
+; SSSE3-NEXT: pshufb %xmm4, %xmm2
+; SSSE3-NEXT: pshufb %xmm5, %xmm3
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_8i8_to_8i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm3 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: load_zext_8i8_to_8i64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm2 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm3 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_zext_8i8_to_8i64:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_zext_8i8_to_8i64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %X = load <8 x i8>, <8 x i8>* %ptr
+ %Y = zext <8 x i8> %X to <8 x i64>
+ ret <8 x i64> %Y
+}
+
+define <16 x i16> @load_zext_16i8_to_16i16(<16 x i8> *%ptr) {
+; SSE2-LABEL: load_zext_16i8_to_16i16:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_16i8_to_16i16:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa (%rdi), %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_16i8_to_16i16:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: load_zext_16i8_to_16i16:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_zext_16i8_to_16i16:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_zext_16i8_to_16i16:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512-NEXT: retq
+entry:
+ %X = load <16 x i8>, <16 x i8>* %ptr
+ %Y = zext <16 x i8> %X to <16 x i16>
+ ret <16 x i16> %Y
+}
+
+define <2 x i64> @load_zext_2i16_to_2i64(<2 x i16> *%ptr) {
+; SSE2-LABEL: load_zext_2i16_to_2i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_2i16_to_2i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_2i16_to_2i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: load_zext_2i16_to_2i64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
+; AVX-NEXT: retq
+entry:
+ %X = load <2 x i16>, <2 x i16>* %ptr
+ %Y = zext <2 x i16> %X to <2 x i64>
+ ret <2 x i64> %Y
+}
+
+define <4 x i32> @load_zext_4i16_to_4i32(<4 x i16> *%ptr) {
+; SSE2-LABEL: load_zext_4i16_to_4i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_4i16_to_4i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_4i16_to_4i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: load_zext_4i16_to_4i32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX-NEXT: retq
+entry:
+ %X = load <4 x i16>, <4 x i16>* %ptr
+ %Y = zext <4 x i16> %X to <4 x i32>
+ ret <4 x i32> %Y
+}
+
+define <4 x i64> @load_zext_4i16_to_4i64(<4 x i16> *%ptr) {
+; SSE2-LABEL: load_zext_4i16_to_4i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_4i16_to_4i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_4i16_to_4i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: load_zext_4i16_to_4i64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_zext_4i16_to_4i64:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_zext_4i16_to_4i64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %X = load <4 x i16>, <4 x i16>* %ptr
+ %Y = zext <4 x i16> %X to <4 x i64>
+ ret <4 x i64> %Y
+}
+
+define <8 x i32> @load_zext_8i16_to_8i32(<8 x i16> *%ptr) {
+; SSE2-LABEL: load_zext_8i16_to_8i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_8i16_to_8i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa (%rdi), %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_8i16_to_8i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: load_zext_8i16_to_8i32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_zext_8i16_to_8i32:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_zext_8i16_to_8i32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX512-NEXT: retq
+entry:
+ %X = load <8 x i16>, <8 x i16>* %ptr
+ %Y = zext <8 x i16> %X to <8 x i32>
+ ret <8 x i32> %Y
+}
+
+define <2 x i64> @load_zext_2i32_to_2i64(<2 x i32> *%ptr) {
+; SSE2-LABEL: load_zext_2i32_to_2i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_2i32_to_2i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSSE3-NEXT: xorps %xmm1, %xmm1
+; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_2i32_to_2i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: load_zext_2i32_to_2i64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; AVX-NEXT: retq
+entry:
+ %X = load <2 x i32>, <2 x i32>* %ptr
+ %Y = zext <2 x i32> %X to <2 x i64>
+ ret <2 x i64> %Y
+}
+
+define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
+; SSE2-LABEL: load_zext_4i32_to_4i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movaps (%rdi), %xmm1
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: load_zext_4i32_to_4i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movaps (%rdi), %xmm1
+; SSSE3-NEXT: xorps %xmm2, %xmm2
+; SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: load_zext_4i32_to_4i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: load_zext_4i32_to_4i64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_zext_4i32_to_4i64:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_zext_4i32_to_4i64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX512-NEXT: retq
+entry:
+ %X = load <4 x i32>, <4 x i32>* %ptr
+ %Y = zext <4 x i32> %X to <4 x i64>
+ ret <4 x i64> %Y
+}
+
+define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
+; SSE2-LABEL: zext_8i8_to_8i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_8i8_to_8i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_8i8_to_8i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_8i8_to_8i32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_8i8_to_8i32:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_8i8_to_8i32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %t = zext <8 x i8> %z to <8 x i32>
+ ret <8 x i32> %t
+}
+
+define <8 x i32> @shuf_zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: shuf_zext_8i16_to_8i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_8i16_to_8i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_8i16_to_8i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuf_zext_8i16_to_8i32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuf_zext_8i16_to_8i32:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: shuf_zext_8i16_to_8i32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT: retq
+entry:
+ %B = shufflevector <8 x i16> %A, <8 x i16> zeroinitializer, <16 x i32> <i32 0, i32 8, i32 1, i32 8, i32 2, i32 8, i32 3, i32 8, i32 4, i32 8, i32 5, i32 8, i32 6, i32 8, i32 7, i32 8>
+ %Z = bitcast <16 x i16> %B to <8 x i32>
+ ret <8 x i32> %Z
+}
+
+define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: shuf_zext_4i32_to_4i64:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_4i32_to_4i64:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movaps %xmm0, %xmm1
+; SSSE3-NEXT: xorps %xmm2, %xmm2
+; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_4i32_to_4i64:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuf_zext_4i32_to_4i64:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuf_zext_4i32_to_4i64:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: shuf_zext_4i32_to_4i64:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT: retq
+entry:
+ %B = shufflevector <4 x i32> %A, <4 x i32> zeroinitializer, <8 x i32> <i32 0, i32 4, i32 1, i32 4, i32 2, i32 4, i32 3, i32 4>
+ %Z = bitcast <8 x i32> %B to <4 x i64>
+ ret <4 x i64> %Z
+}
+
+define <8 x i32> @shuf_zext_8i8_to_8i32(<8 x i8> %A) {
+; SSE2-LABEL: shuf_zext_8i8_to_8i32:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_8i8_to_8i32:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_8i8_to_8i32:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuf_zext_8i8_to_8i32:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuf_zext_8i8_to_8i32:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: shuf_zext_8i8_to_8i32:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %B = shufflevector <8 x i8> %A, <8 x i8> zeroinitializer, <32 x i32> <i32 0, i32 8, i32 8, i32 8, i32 1, i32 8, i32 8, i32 8, i32 2, i32 8, i32 8, i32 8, i32 3, i32 8, i32 8, i32 8, i32 4, i32 8, i32 8, i32 8, i32 5, i32 8, i32 8, i32 8, i32 6, i32 8, i32 8, i32 8, i32 7, i32 8, i32 8, i32 8>
+ %Z = bitcast <32 x i8> %B to <8 x i32>
+ ret <8 x i32> %Z
+}
+
+define <2 x i64> @shuf_zext_16i8_to_2i64_offset6(<16 x i8> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: shuf_zext_16i8_to_2i64_offset6:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_16i8_to_2i64_offset6:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_16i8_to_2i64_offset6:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: psrlq $48, %xmm0
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuf_zext_16i8_to_2i64_offset6:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpsrlq $48, %xmm0, %xmm0
+; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT: retq
+entry:
+ %B = shufflevector <16 x i8> %A, <16 x i8> zeroinitializer, <16 x i32> <i32 6, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 7, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %Z = bitcast <16 x i8> %B to <2 x i64>
+ ret <2 x i64> %Z
+}
+
+define <4 x i64> @shuf_zext_16i8_to_4i64_offset11(<16 x i8> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: shuf_zext_16i8_to_4i64_offset11:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_16i8_to_4i64_offset11:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[11],zero,zero,zero,zero,zero,zero,zero,xmm0[12],zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[13],zero,zero,zero,zero,zero,zero,zero,xmm1[14],zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_16i8_to_4i64_offset11:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psrldq {{.*#+}} xmm1 = xmm1[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: psrldq {{.*#+}} xmm0 = xmm0[13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuf_zext_16i8_to_4i64_offset11:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm1 = xmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuf_zext_16i8_to_4i64_offset11:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: shuf_zext_16i8_to_4i64_offset11:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %B = shufflevector <16 x i8> %A, <16 x i8> zeroinitializer, <32 x i32> <i32 11, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 12, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 13, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 14, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %Z = bitcast <32 x i8> %B to <4 x i64>
+ ret <4 x i64> %Z
+}
+
+define <2 x i64> @shuf_zext_8i16_to_2i64_offset6(<8 x i16> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: shuf_zext_8i16_to_2i64_offset6:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_8i16_to_2i64_offset6:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6,7],zero,zero,zero,zero,zero,zero,xmm0[8,9],zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_8i16_to_2i64_offset6:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuf_zext_8i16_to_2i64_offset6:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX-NEXT: retq
+entry:
+ %B = shufflevector <8 x i16> %A, <8 x i16> zeroinitializer, <8 x i32> <i32 3, i32 8, i32 8, i32 8, i32 4, i32 8, i32 8, i32 8>
+ %Z = bitcast <8 x i16> %B to <2 x i64>
+ ret <2 x i64> %Z
+}
+
+define <4 x i64> @shuf_zext_8i16_to_4i64_offset2(<8 x i16> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: shuf_zext_8i16_to_4i64_offset2:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_8i16_to_4i64_offset2:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_8i16_to_4i64_offset2:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuf_zext_8i16_to_4i64_offset2:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuf_zext_8i16_to_4i64_offset2:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,2,3]
+; AVX2-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: shuf_zext_8i16_to_4i64_offset2:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,2,3]
+; AVX512-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512-NEXT: retq
+entry:
+ %B = shufflevector <8 x i16> %A, <8 x i16> zeroinitializer, <16 x i32> <i32 2, i32 8, i32 8, i32 8, i32 3, i32 8, i32 8, i32 8, i32 4, i32 8, i32 8, i32 8, i32 5, i32 8, i32 8, i32 8>
+ %Z = bitcast <16 x i16> %B to <4 x i64>
+ ret <4 x i64> %Z
+}
+
+define <4 x i32> @shuf_zext_8i16_to_4i32_offset1(<8 x i16> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: shuf_zext_8i16_to_4i32_offset1:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_8i16_to_4i32_offset1:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_8i16_to_4i32_offset1:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuf_zext_8i16_to_4i32_offset1:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: retq
+;
+; AVX2-SLOW-LABEL: shuf_zext_8i16_to_4i32_offset1:
+; AVX2-SLOW: # %bb.0: # %entry
+; AVX2-SLOW-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX2-SLOW-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: shuf_zext_8i16_to_4i32_offset1:
+; AVX2-FAST: # %bb.0: # %entry
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3],zero,zero,xmm0[4,5],zero,zero,xmm0[6,7],zero,zero,xmm0[8,9],zero,zero
+; AVX2-FAST-NEXT: retq
+;
+; AVX512F-LABEL: shuf_zext_8i16_to_4i32_offset1:
+; AVX512F: # %bb.0: # %entry
+; AVX512F-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: shuf_zext_8i16_to_4i32_offset1:
+; AVX512BW: # %bb.0: # %entry
+; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3],zero,zero,xmm0[4,5],zero,zero,xmm0[6,7],zero,zero,xmm0[8,9],zero,zero
+; AVX512BW-NEXT: retq
+entry:
+ %B = shufflevector <8 x i16> %A, <8 x i16> zeroinitializer, <8 x i32> <i32 1, i32 8, i32 2, i32 8, i32 3, i32 8, i32 4, i32 8>
+ %Z = bitcast <8 x i16> %B to <4 x i32>
+ ret <4 x i32> %Z
+}
+
+define <8 x i32> @shuf_zext_8i16_to_8i32_offset3(<8 x i16> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: shuf_zext_8i16_to_8i32_offset3:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_8i16_to_8i32_offset3:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_8i16_to_8i32_offset3:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE41-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuf_zext_8i16_to_8i32_offset3:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuf_zext_8i16_to_8i32_offset3:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: shuf_zext_8i16_to_8i32_offset3:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT: retq
+entry:
+ %B = shufflevector <8 x i16> %A, <8 x i16> zeroinitializer, <16 x i32> <i32 3, i32 8, i32 4, i32 8, i32 5, i32 8, i32 6, i32 8, i32 7, i32 8, i32 undef, i32 8, i32 undef, i32 8, i32 undef, i32 8>
+ %Z = bitcast <16 x i16> %B to <8 x i32>
+ ret <8 x i32> %Z
+}
+
+define <8 x i32> @shuf_zext_16i16_to_8i32_offset8(<16 x i16> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: shuf_zext_16i16_to_8i32_offset8:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_16i16_to_8i32_offset8:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_16i16_to_8i32_offset8:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
+; SSE41-NEXT: movdqa %xmm2, %xmm1
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuf_zext_16i16_to_8i32_offset8:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuf_zext_16i16_to_8i32_offset8:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: shuf_zext_16i16_to_8i32_offset8:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT: retq
+entry:
+ %B = shufflevector <16 x i16> %A, <16 x i16> zeroinitializer, <16 x i32> <i32 8, i32 16, i32 9, i32 16, i32 10, i32 16, i32 11, i32 16, i32 12, i32 16, i32 undef, i32 16, i32 14, i32 16, i32 undef, i32 16>
+ %Z = bitcast <16 x i16> %B to <8 x i32>
+ ret <8 x i32> %Z
+}
+
+define <2 x i64> @shuf_zext_4i32_to_2i64_offset2(<4 x i32> %A) nounwind uwtable readnone ssp {
+; SSE-LABEL: shuf_zext_4i32_to_2i64_offset2:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: xorps %xmm1, %xmm1
+; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuf_zext_4i32_to_2i64_offset2:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX-NEXT: retq
+entry:
+ %B = shufflevector <4 x i32> %A, <4 x i32> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 3, i32 4>
+ %Z = bitcast <4 x i32> %B to <2 x i64>
+ ret <2 x i64> %Z
+}
+
+define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable readnone ssp {
+; SSE2-LABEL: shuf_zext_4i32_to_4i64_offset1:
+; SSE2: # %bb.0: # %entry
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,0,4294967295,0]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: shuf_zext_4i32_to_4i64_offset1:
+; SSSE3: # %bb.0: # %entry
+; SSSE3-NEXT: movdqa %xmm0, %xmm1
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,0,4294967295,0]
+; SSSE3-NEXT: pand %xmm1, %xmm0
+; SSSE3-NEXT: psrldq {{.*#+}} xmm1 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuf_zext_4i32_to_4i64_offset1:
+; SSE41: # %bb.0: # %entry
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: pxor %xmm0, %xmm0
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
+; SSE41-NEXT: psrldq {{.*#+}} xmm1 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: shuf_zext_4i32_to_4i64_offset1:
+; AVX1: # %bb.0: # %entry
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7]
+; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuf_zext_4i32_to_4i64_offset1:
+; AVX2: # %bb.0: # %entry
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,3,3]
+; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: shuf_zext_4i32_to_4i64_offset1:
+; AVX512: # %bb.0: # %entry
+; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,3,3]
+; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT: retq
+entry:
+ %B = shufflevector <4 x i32> %A, <4 x i32> zeroinitializer, <8 x i32> <i32 undef, i32 4, i32 2, i32 4, i32 3, i32 4, i32 undef, i32 4>
+ %Z = bitcast <8 x i32> %B to <4 x i64>
+ ret <4 x i64> %Z
+}
+
+define <32 x i32> @zext_32i8_to_32i32(<32 x i8> %x) {
+; SSE2-LABEL: zext_32i8_to_32i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movq %rdi, %rax
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm3, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm6
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSE2-NEXT: movdqa %xmm1, 112(%rdi)
+; SSE2-NEXT: movdqa %xmm4, 96(%rdi)
+; SSE2-NEXT: movdqa %xmm6, 80(%rdi)
+; SSE2-NEXT: movdqa %xmm7, 64(%rdi)
+; SSE2-NEXT: movdqa %xmm0, 48(%rdi)
+; SSE2-NEXT: movdqa %xmm5, 32(%rdi)
+; SSE2-NEXT: movdqa %xmm3, 16(%rdi)
+; SSE2-NEXT: movdqa %xmm8, (%rdi)
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_32i8_to_32i32:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movq %rdi, %rax
+; SSSE3-NEXT: pxor %xmm2, %xmm2
+; SSSE3-NEXT: movdqa %xmm0, %xmm3
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSSE3-NEXT: movdqa %xmm3, %xmm8
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; SSSE3-NEXT: movdqa %xmm0, %xmm5
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: movdqa %xmm1, %xmm6
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; SSSE3-NEXT: movdqa %xmm6, %xmm7
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; SSSE3-NEXT: movdqa %xmm1, %xmm4
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; SSSE3-NEXT: movdqa %xmm1, 112(%rdi)
+; SSSE3-NEXT: movdqa %xmm4, 96(%rdi)
+; SSSE3-NEXT: movdqa %xmm6, 80(%rdi)
+; SSSE3-NEXT: movdqa %xmm7, 64(%rdi)
+; SSSE3-NEXT: movdqa %xmm0, 48(%rdi)
+; SSSE3-NEXT: movdqa %xmm5, 32(%rdi)
+; SSSE3-NEXT: movdqa %xmm3, 16(%rdi)
+; SSSE3-NEXT: movdqa %xmm8, (%rdi)
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_32i8_to_32i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movq %rdi, %rax
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,3,0,1]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE41-NEXT: movdqa %xmm1, 112(%rdi)
+; SSE41-NEXT: movdqa %xmm7, 96(%rdi)
+; SSE41-NEXT: movdqa %xmm6, 80(%rdi)
+; SSE41-NEXT: movdqa %xmm5, 64(%rdi)
+; SSE41-NEXT: movdqa %xmm0, 48(%rdi)
+; SSE41-NEXT: movdqa %xmm4, 32(%rdi)
+; SSE41-NEXT: movdqa %xmm3, 16(%rdi)
+; SSE41-NEXT: movdqa %xmm2, (%rdi)
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: zext_32i8_to_32i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[1,1,2,3]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,0,1]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,3,0,1]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX1-NEXT: vmovaps %ymm4, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: zext_32i8_to_32i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm4 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
+; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,1,2,3]
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: vmovdqa %ymm4, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: zext_32i8_to_32i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT: vmovdqa64 %zmm2, %zmm0
+; AVX512-NEXT: retq
+ %res = zext <32 x i8>%x to <32 x i32>
+ ret <32 x i32> %res
+}
+
+define <2 x i32> @zext_2i8_to_2i32(<2 x i8>* %addr) {
+; SSE2-LABEL: zext_2i8_to_2i32:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movzwl (%rdi), %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSE2-NEXT: paddd %xmm0, %xmm0
+; SSE2-NEXT: retq
+;
+; SSSE3-LABEL: zext_2i8_to_2i32:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movzwl (%rdi), %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: pxor %xmm1, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: paddd %xmm0, %xmm0
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: zext_2i8_to_2i32:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movzbl 1(%rdi), %eax
+; SSE41-NEXT: movzbl (%rdi), %ecx
+; SSE41-NEXT: movd %ecx, %xmm0
+; SSE41-NEXT: pinsrd $1, %eax, %xmm0
+; SSE41-NEXT: paddd %xmm0, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: zext_2i8_to_2i32:
+; AVX: # %bb.0:
+; AVX-NEXT: movzbl 1(%rdi), %eax
+; AVX-NEXT: movzbl (%rdi), %ecx
+; AVX-NEXT: vmovd %ecx, %xmm0
+; AVX-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
+; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %x = load <2 x i8>, <2 x i8>* %addr, align 1
+ %y = zext <2 x i8> %x to <2 x i32>
+ %z = add <2 x i32>%y, %y
+ ret <2 x i32>%z
+}
More information about the llvm-commits
mailing list