[llvm] r261989 - [X86][F16C] Added native IR half/float conversion tests.
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 26 00:52:29 PST 2016
Author: rksimon
Date: Fri Feb 26 02:52:29 2016
New Revision: 261989
URL: http://llvm.org/viewvc/llvm-project?rev=261989&view=rev
Log:
[X86][F16C] Added native IR half/float conversion tests.
Placeholder tests until we start improving native vector support.
Added:
llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll (with props)
Added: llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll?rev=261989&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll Fri Feb 26 02:52:29 2016
@@ -0,0 +1,1760 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+f16c | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+f16c | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512
+
+;
+; Half to Float
+;
+
+define float @cvt_i16_to_f32(i16 %a0) {
+; ALL-LABEL: cvt_i16_to_f32:
+; ALL: # BB#0:
+; ALL-NEXT: movswl %di, %eax
+; ALL-NEXT: vmovd %eax, %xmm0
+; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
+; ALL-NEXT: retq
+ %1 = bitcast i16 %a0 to half
+ %2 = fpext half %1 to float
+ ret float %2
+}
+
+define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) {
+; ALL-LABEL: cvt_4i16_to_4f32:
+; ALL: # BB#0:
+; ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; ALL-NEXT: vmovq %xmm0, %rax
+; ALL-NEXT: movq %rax, %rcx
+; ALL-NEXT: movq %rax, %rdx
+; ALL-NEXT: movswl %ax, %esi
+; ALL-NEXT: shrl $16, %eax
+; ALL-NEXT: shrq $32, %rcx
+; ALL-NEXT: shrq $48, %rdx
+; ALL-NEXT: movswl %dx, %edx
+; ALL-NEXT: vmovd %edx, %xmm0
+; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
+; ALL-NEXT: movswl %cx, %ecx
+; ALL-NEXT: vmovd %ecx, %xmm1
+; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
+; ALL-NEXT: cwtl
+; ALL-NEXT: vmovd %eax, %xmm2
+; ALL-NEXT: vcvtph2ps %xmm2, %xmm2
+; ALL-NEXT: vmovd %esi, %xmm3
+; ALL-NEXT: vcvtph2ps %xmm3, %xmm3
+; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; ALL-NEXT: retq
+ %1 = bitcast <4 x i16> %a0 to <4 x half>
+ %2 = fpext <4 x half> %1 to <4 x float>
+ ret <4 x float> %2
+}
+
+define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) {
+; ALL-LABEL: cvt_8i16_to_4f32:
+; ALL: # BB#0:
+; ALL-NEXT: vmovq %xmm0, %rax
+; ALL-NEXT: movq %rax, %rcx
+; ALL-NEXT: movq %rax, %rdx
+; ALL-NEXT: movswl %ax, %esi
+; ALL-NEXT: shrl $16, %eax
+; ALL-NEXT: shrq $32, %rcx
+; ALL-NEXT: shrq $48, %rdx
+; ALL-NEXT: movswl %dx, %edx
+; ALL-NEXT: vmovd %edx, %xmm0
+; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
+; ALL-NEXT: movswl %cx, %ecx
+; ALL-NEXT: vmovd %ecx, %xmm1
+; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
+; ALL-NEXT: cwtl
+; ALL-NEXT: vmovd %eax, %xmm2
+; ALL-NEXT: vcvtph2ps %xmm2, %xmm2
+; ALL-NEXT: vmovd %esi, %xmm3
+; ALL-NEXT: vcvtph2ps %xmm3, %xmm3
+; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; ALL-NEXT: retq
+ %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %2 = bitcast <4 x i16> %1 to <4 x half>
+ %3 = fpext <4 x half> %2 to <4 x float>
+ ret <4 x float> %3
+}
+
+define <8 x float> @cvt_8i16_to_8f32(<8 x i16> %a0) {
+; AVX1-LABEL: cvt_8i16_to_8f32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX1-NEXT: movq %rdx, %r8
+; AVX1-NEXT: movq %rdx, %r10
+; AVX1-NEXT: movswl %dx, %r9d
+; AVX1-NEXT: shrl $16, %edx
+; AVX1-NEXT: shrq $32, %r8
+; AVX1-NEXT: shrq $48, %r10
+; AVX1-NEXT: vmovq %xmm0, %rdi
+; AVX1-NEXT: movq %rdi, %rax
+; AVX1-NEXT: movq %rdi, %rsi
+; AVX1-NEXT: movswl %di, %ecx
+; AVX1-NEXT: shrl $16, %edi
+; AVX1-NEXT: shrq $32, %rax
+; AVX1-NEXT: shrq $48, %rsi
+; AVX1-NEXT: movswl %si, %esi
+; AVX1-NEXT: vmovd %esi, %xmm0
+; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX1-NEXT: cwtl
+; AVX1-NEXT: vmovd %eax, %xmm1
+; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX1-NEXT: movswl %di, %eax
+; AVX1-NEXT: vmovd %eax, %xmm2
+; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX1-NEXT: vmovd %ecx, %xmm3
+; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX1-NEXT: movswl %r10w, %eax
+; AVX1-NEXT: vmovd %eax, %xmm4
+; AVX1-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX1-NEXT: movswl %r8w, %eax
+; AVX1-NEXT: vmovd %eax, %xmm5
+; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX1-NEXT: movswl %dx, %eax
+; AVX1-NEXT: vmovd %eax, %xmm6
+; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6
+; AVX1-NEXT: vmovd %r9d, %xmm7
+; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7
+; AVX1-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: cvt_8i16_to_8f32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX2-NEXT: movq %rdx, %r8
+; AVX2-NEXT: movq %rdx, %r10
+; AVX2-NEXT: movswl %dx, %r9d
+; AVX2-NEXT: shrl $16, %edx
+; AVX2-NEXT: shrq $32, %r8
+; AVX2-NEXT: shrq $48, %r10
+; AVX2-NEXT: vmovq %xmm0, %rdi
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: movq %rdi, %rsi
+; AVX2-NEXT: movswl %di, %ecx
+; AVX2-NEXT: shrl $16, %edi
+; AVX2-NEXT: shrq $32, %rax
+; AVX2-NEXT: shrq $48, %rsi
+; AVX2-NEXT: movswl %si, %esi
+; AVX2-NEXT: vmovd %esi, %xmm0
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX2-NEXT: cwtl
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX2-NEXT: movswl %di, %eax
+; AVX2-NEXT: vmovd %eax, %xmm2
+; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX2-NEXT: vmovd %ecx, %xmm3
+; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX2-NEXT: movswl %r10w, %eax
+; AVX2-NEXT: vmovd %eax, %xmm4
+; AVX2-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX2-NEXT: movswl %r8w, %eax
+; AVX2-NEXT: vmovd %eax, %xmm5
+; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX2-NEXT: movswl %dx, %eax
+; AVX2-NEXT: vmovd %eax, %xmm6
+; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6
+; AVX2-NEXT: vmovd %r9d, %xmm7
+; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7
+; AVX2-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX2-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: cvt_8i16_to_8f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512-NEXT: movq %rdx, %r8
+; AVX512-NEXT: movq %rdx, %r10
+; AVX512-NEXT: movswl %dx, %r9d
+; AVX512-NEXT: shrl $16, %edx
+; AVX512-NEXT: shrq $32, %r8
+; AVX512-NEXT: shrq $48, %r10
+; AVX512-NEXT: vmovq %xmm0, %rdi
+; AVX512-NEXT: movq %rdi, %rax
+; AVX512-NEXT: movq %rdi, %rsi
+; AVX512-NEXT: movswl %di, %ecx
+; AVX512-NEXT: shrl $16, %edi
+; AVX512-NEXT: shrq $32, %rax
+; AVX512-NEXT: shrq $48, %rsi
+; AVX512-NEXT: movswl %si, %esi
+; AVX512-NEXT: vmovd %esi, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: cwtl
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512-NEXT: movswl %di, %eax
+; AVX512-NEXT: vmovd %eax, %xmm2
+; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT: vmovd %ecx, %xmm3
+; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT: movswl %r10w, %eax
+; AVX512-NEXT: vmovd %eax, %xmm4
+; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX512-NEXT: movswl %r8w, %eax
+; AVX512-NEXT: vmovd %eax, %xmm5
+; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX512-NEXT: movswl %dx, %eax
+; AVX512-NEXT: vmovd %eax, %xmm6
+; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6
+; AVX512-NEXT: vmovd %r9d, %xmm7
+; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7
+; AVX512-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %1 = bitcast <8 x i16> %a0 to <8 x half>
+ %2 = fpext <8 x half> %1 to <8 x float>
+ ret <8 x float> %2
+}
+
+define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) {
+; AVX1-LABEL: cvt_16i16_to_16f32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vmovq %xmm4, %rax
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $48, %rcx
+; AVX1-NEXT: movswl %cx, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm8
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $32, %rcx
+; AVX1-NEXT: movswl %cx, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm9
+; AVX1-NEXT: movswl %ax, %ecx
+; AVX1-NEXT: shrl $16, %eax
+; AVX1-NEXT: cwtl
+; AVX1-NEXT: vmovd %eax, %xmm10
+; AVX1-NEXT: vpextrq $1, %xmm4, %rax
+; AVX1-NEXT: vmovd %ecx, %xmm11
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $48, %rcx
+; AVX1-NEXT: movswl %cx, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm12
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $32, %rcx
+; AVX1-NEXT: movswl %cx, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm13
+; AVX1-NEXT: movswl %ax, %ecx
+; AVX1-NEXT: shrl $16, %eax
+; AVX1-NEXT: cwtl
+; AVX1-NEXT: vmovd %eax, %xmm14
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: vmovd %ecx, %xmm15
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $48, %rcx
+; AVX1-NEXT: movswl %cx, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm2
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $32, %rcx
+; AVX1-NEXT: movswl %cx, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm3
+; AVX1-NEXT: movswl %ax, %ecx
+; AVX1-NEXT: shrl $16, %eax
+; AVX1-NEXT: cwtl
+; AVX1-NEXT: vmovd %eax, %xmm4
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $48, %rcx
+; AVX1-NEXT: movswl %cx, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm5
+; AVX1-NEXT: movq %rax, %rcx
+; AVX1-NEXT: shrq $32, %rcx
+; AVX1-NEXT: movswl %cx, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm6
+; AVX1-NEXT: movl %eax, %ecx
+; AVX1-NEXT: shrl $16, %ecx
+; AVX1-NEXT: movswl %cx, %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm7
+; AVX1-NEXT: cwtl
+; AVX1-NEXT: vmovd %eax, %xmm1
+; AVX1-NEXT: vcvtph2ps %xmm8, %xmm8
+; AVX1-NEXT: vcvtph2ps %xmm9, %xmm9
+; AVX1-NEXT: vcvtph2ps %xmm10, %xmm10
+; AVX1-NEXT: vcvtph2ps %xmm11, %xmm11
+; AVX1-NEXT: vcvtph2ps %xmm12, %xmm12
+; AVX1-NEXT: vcvtph2ps %xmm13, %xmm13
+; AVX1-NEXT: vcvtph2ps %xmm14, %xmm14
+; AVX1-NEXT: vcvtph2ps %xmm15, %xmm15
+; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX1-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6
+; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7
+; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0],xmm1[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[0]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm15[0],xmm14[0],xmm15[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm13[0],xmm1[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm12[0]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: cvt_16i16_to_16f32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; AVX2-NEXT: vmovq %xmm4, %rax
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $48, %rcx
+; AVX2-NEXT: movswl %cx, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm8
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $32, %rcx
+; AVX2-NEXT: movswl %cx, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm9
+; AVX2-NEXT: movswl %ax, %ecx
+; AVX2-NEXT: shrl $16, %eax
+; AVX2-NEXT: cwtl
+; AVX2-NEXT: vmovd %eax, %xmm10
+; AVX2-NEXT: vpextrq $1, %xmm4, %rax
+; AVX2-NEXT: vmovd %ecx, %xmm11
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $48, %rcx
+; AVX2-NEXT: movswl %cx, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm12
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $32, %rcx
+; AVX2-NEXT: movswl %cx, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm13
+; AVX2-NEXT: movswl %ax, %ecx
+; AVX2-NEXT: shrl $16, %eax
+; AVX2-NEXT: cwtl
+; AVX2-NEXT: vmovd %eax, %xmm14
+; AVX2-NEXT: vmovq %xmm0, %rax
+; AVX2-NEXT: vmovd %ecx, %xmm15
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $48, %rcx
+; AVX2-NEXT: movswl %cx, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm2
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $32, %rcx
+; AVX2-NEXT: movswl %cx, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm3
+; AVX2-NEXT: movswl %ax, %ecx
+; AVX2-NEXT: shrl $16, %eax
+; AVX2-NEXT: cwtl
+; AVX2-NEXT: vmovd %eax, %xmm4
+; AVX2-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-NEXT: vmovd %ecx, %xmm0
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $48, %rcx
+; AVX2-NEXT: movswl %cx, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm5
+; AVX2-NEXT: movq %rax, %rcx
+; AVX2-NEXT: shrq $32, %rcx
+; AVX2-NEXT: movswl %cx, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm6
+; AVX2-NEXT: movl %eax, %ecx
+; AVX2-NEXT: shrl $16, %ecx
+; AVX2-NEXT: movswl %cx, %ecx
+; AVX2-NEXT: vmovd %ecx, %xmm7
+; AVX2-NEXT: cwtl
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vcvtph2ps %xmm8, %xmm8
+; AVX2-NEXT: vcvtph2ps %xmm9, %xmm9
+; AVX2-NEXT: vcvtph2ps %xmm10, %xmm10
+; AVX2-NEXT: vcvtph2ps %xmm11, %xmm11
+; AVX2-NEXT: vcvtph2ps %xmm12, %xmm12
+; AVX2-NEXT: vcvtph2ps %xmm13, %xmm13
+; AVX2-NEXT: vcvtph2ps %xmm14, %xmm14
+; AVX2-NEXT: vcvtph2ps %xmm15, %xmm15
+; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX2-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6
+; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7
+; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0],xmm1[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[0]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[0]
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm15[0],xmm14[0],xmm15[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm13[0],xmm1[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm12[0]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0]
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: cvt_16i16_to_16f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm10
+; AVX512-NEXT: vmovq %xmm0, %rax
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: shrq $48, %rcx
+; AVX512-NEXT: movswl %cx, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm8
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: shrq $32, %rcx
+; AVX512-NEXT: movswl %cx, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm9
+; AVX512-NEXT: movswl %ax, %ecx
+; AVX512-NEXT: shrl $16, %eax
+; AVX512-NEXT: cwtl
+; AVX512-NEXT: vmovd %eax, %xmm11
+; AVX512-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512-NEXT: vmovd %ecx, %xmm12
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: shrq $48, %rcx
+; AVX512-NEXT: movswl %cx, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm13
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: shrq $32, %rcx
+; AVX512-NEXT: movswl %cx, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm14
+; AVX512-NEXT: movswl %ax, %ecx
+; AVX512-NEXT: shrl $16, %eax
+; AVX512-NEXT: cwtl
+; AVX512-NEXT: vmovd %eax, %xmm15
+; AVX512-NEXT: vmovq %xmm10, %rax
+; AVX512-NEXT: vmovd %ecx, %xmm2
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: shrq $48, %rcx
+; AVX512-NEXT: movswl %cx, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm3
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: shrq $32, %rcx
+; AVX512-NEXT: movswl %cx, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm1
+; AVX512-NEXT: movswl %ax, %ecx
+; AVX512-NEXT: shrl $16, %eax
+; AVX512-NEXT: cwtl
+; AVX512-NEXT: vmovd %eax, %xmm4
+; AVX512-NEXT: vpextrq $1, %xmm10, %rax
+; AVX512-NEXT: vmovd %ecx, %xmm10
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: shrq $48, %rcx
+; AVX512-NEXT: movswl %cx, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm5
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: shrq $32, %rcx
+; AVX512-NEXT: movswl %cx, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm6
+; AVX512-NEXT: movl %eax, %ecx
+; AVX512-NEXT: shrl $16, %ecx
+; AVX512-NEXT: movswl %cx, %ecx
+; AVX512-NEXT: vmovd %ecx, %xmm7
+; AVX512-NEXT: cwtl
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm8, %xmm8
+; AVX512-NEXT: vcvtph2ps %xmm9, %xmm9
+; AVX512-NEXT: vcvtph2ps %xmm11, %xmm11
+; AVX512-NEXT: vcvtph2ps %xmm12, %xmm12
+; AVX512-NEXT: vcvtph2ps %xmm13, %xmm13
+; AVX512-NEXT: vcvtph2ps %xmm14, %xmm14
+; AVX512-NEXT: vcvtph2ps %xmm15, %xmm15
+; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX512-NEXT: vcvtph2ps %xmm10, %xmm10
+; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6
+; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[2,3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0],xmm0[3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm5[0]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm4 = xmm10[0],xmm4[0],xmm10[2,3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0,1],xmm1[0],xmm4[3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0]
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm15[0],xmm2[2,3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm14[0],xmm1[3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm13[0]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm12[0],xmm11[0],xmm12[2,3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0]
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: retq
+ %1 = bitcast <16 x i16> %a0 to <16 x half>
+ %2 = fpext <16 x half> %1 to <16 x float>
+ ret <16 x float> %2
+}
+
+;
+; Half to Float (Load)
+;
+
+define <4 x float> @load_cvt_4i16_to_4f32(<4 x i16>* %a0) {
+; ALL-LABEL: load_cvt_4i16_to_4f32:
+; ALL: # BB#0:
+; ALL-NEXT: movswl 6(%rdi), %eax
+; ALL-NEXT: vmovd %eax, %xmm0
+; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
+; ALL-NEXT: movswl 4(%rdi), %eax
+; ALL-NEXT: vmovd %eax, %xmm1
+; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
+; ALL-NEXT: movswl (%rdi), %eax
+; ALL-NEXT: vmovd %eax, %xmm2
+; ALL-NEXT: vcvtph2ps %xmm2, %xmm2
+; ALL-NEXT: movswl 2(%rdi), %eax
+; ALL-NEXT: vmovd %eax, %xmm3
+; ALL-NEXT: vcvtph2ps %xmm3, %xmm3
+; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; ALL-NEXT: retq
+ %1 = load <4 x i16>, <4 x i16>* %a0
+ %2 = bitcast <4 x i16> %1 to <4 x half>
+ %3 = fpext <4 x half> %2 to <4 x float>
+ ret <4 x float> %3
+}
+
+define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) {
+; ALL-LABEL: load_cvt_8i16_to_4f32:
+; ALL: # BB#0:
+; ALL-NEXT: movq (%rdi), %rax
+; ALL-NEXT: movq %rax, %rcx
+; ALL-NEXT: movq %rax, %rdx
+; ALL-NEXT: movswl %ax, %esi
+; ALL-NEXT: shrl $16, %eax
+; ALL-NEXT: shrq $32, %rcx
+; ALL-NEXT: shrq $48, %rdx
+; ALL-NEXT: movswl %dx, %edx
+; ALL-NEXT: vmovd %edx, %xmm0
+; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
+; ALL-NEXT: movswl %cx, %ecx
+; ALL-NEXT: vmovd %ecx, %xmm1
+; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
+; ALL-NEXT: cwtl
+; ALL-NEXT: vmovd %eax, %xmm2
+; ALL-NEXT: vcvtph2ps %xmm2, %xmm2
+; ALL-NEXT: vmovd %esi, %xmm3
+; ALL-NEXT: vcvtph2ps %xmm3, %xmm3
+; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; ALL-NEXT: retq
+ %1 = load <8 x i16>, <8 x i16>* %a0
+ %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = bitcast <4 x i16> %2 to <4 x half>
+ %4 = fpext <4 x half> %3 to <4 x float>
+ ret <4 x float> %4
+}
+
+define <8 x float> @load_cvt_8i16_to_8f32(<8 x i16>* %a0) {
+; AVX1-LABEL: load_cvt_8i16_to_8f32:
+; AVX1: # BB#0:
+; AVX1-NEXT: movswl 6(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX1-NEXT: movswl 4(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm1
+; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX1-NEXT: movswl (%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm2
+; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX1-NEXT: movswl 2(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm3
+; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX1-NEXT: movswl 14(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm4
+; AVX1-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX1-NEXT: movswl 12(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm5
+; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX1-NEXT: movswl 8(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm6
+; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6
+; AVX1-NEXT: movswl 10(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm7
+; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7
+; AVX1-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_cvt_8i16_to_8f32:
+; AVX2: # BB#0:
+; AVX2-NEXT: movswl 6(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX2-NEXT: movswl 4(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX2-NEXT: movswl (%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm2
+; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX2-NEXT: movswl 2(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm3
+; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX2-NEXT: movswl 14(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm4
+; AVX2-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX2-NEXT: movswl 12(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm5
+; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX2-NEXT: movswl 8(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm6
+; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6
+; AVX2-NEXT: movswl 10(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm7
+; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7
+; AVX2-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX2-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_cvt_8i16_to_8f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: movswl 6(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: movswl 4(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512-NEXT: movswl (%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm2
+; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT: movswl 2(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm3
+; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT: movswl 14(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm4
+; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX512-NEXT: movswl 12(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm5
+; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX512-NEXT: movswl 8(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm6
+; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6
+; AVX512-NEXT: movswl 10(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm7
+; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7
+; AVX512-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %1 = load <8 x i16>, <8 x i16>* %a0
+ %2 = bitcast <8 x i16> %1 to <8 x half>
+ %3 = fpext <8 x half> %2 to <8 x float>
+ ret <8 x float> %3
+}
+
+define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) {
+; AVX1-LABEL: load_cvt_16i16_to_16f32:
+; AVX1: # BB#0:
+; AVX1-NEXT: movswl 22(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vcvtph2ps %xmm0, %xmm8
+; AVX1-NEXT: movswl 20(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vcvtph2ps %xmm0, %xmm9
+; AVX1-NEXT: movswl 16(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vcvtph2ps %xmm0, %xmm10
+; AVX1-NEXT: movswl 18(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vcvtph2ps %xmm0, %xmm11
+; AVX1-NEXT: movswl 30(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vcvtph2ps %xmm0, %xmm12
+; AVX1-NEXT: movswl 28(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vcvtph2ps %xmm0, %xmm13
+; AVX1-NEXT: movswl 24(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vcvtph2ps %xmm0, %xmm14
+; AVX1-NEXT: movswl 26(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vcvtph2ps %xmm0, %xmm15
+; AVX1-NEXT: movswl 6(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX1-NEXT: movswl 4(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm2
+; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX1-NEXT: movswl (%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm3
+; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX1-NEXT: movswl 2(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm4
+; AVX1-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX1-NEXT: movswl 14(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm5
+; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX1-NEXT: movswl 12(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm6
+; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6
+; AVX1-NEXT: movswl 8(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm7
+; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7
+; AVX1-NEXT: movswl 10(%rdi), %eax
+; AVX1-NEXT: vmovd %eax, %xmm1
+; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0],xmm1[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[0]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0],xmm3[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm14[0],xmm15[0],xmm14[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm13[0],xmm1[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm12[0]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: load_cvt_16i16_to_16f32:
+; AVX2: # BB#0:
+; AVX2-NEXT: movswl 22(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm8
+; AVX2-NEXT: movswl 20(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm9
+; AVX2-NEXT: movswl 16(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm10
+; AVX2-NEXT: movswl 18(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm11
+; AVX2-NEXT: movswl 30(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm12
+; AVX2-NEXT: movswl 28(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm13
+; AVX2-NEXT: movswl 24(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm14
+; AVX2-NEXT: movswl 26(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm15
+; AVX2-NEXT: movswl 6(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX2-NEXT: movswl 4(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm2
+; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX2-NEXT: movswl (%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm3
+; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX2-NEXT: movswl 2(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm4
+; AVX2-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX2-NEXT: movswl 14(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm5
+; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX2-NEXT: movswl 12(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm6
+; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6
+; AVX2-NEXT: movswl 8(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm7
+; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7
+; AVX2-NEXT: movswl 10(%rdi), %eax
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0],xmm1[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[0]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0],xmm3[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm14[0],xmm15[0],xmm14[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm13[0],xmm1[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm12[0]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0]
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_cvt_16i16_to_16f32:
+; AVX512: # BB#0:
+; AVX512-NEXT: movswl 6(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm8
+; AVX512-NEXT: movswl 4(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm9
+; AVX512-NEXT: movswl (%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm10
+; AVX512-NEXT: movswl 2(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm11
+; AVX512-NEXT: movswl 14(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm12
+; AVX512-NEXT: movswl 12(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm13
+; AVX512-NEXT: movswl 8(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm14
+; AVX512-NEXT: movswl 10(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm15
+; AVX512-NEXT: movswl 22(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: movswl 20(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm1
+; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
+; AVX512-NEXT: movswl 16(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm2
+; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT: movswl 18(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm3
+; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT: movswl 30(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm4
+; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
+; AVX512-NEXT: movswl 28(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm5
+; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5
+; AVX512-NEXT: movswl 24(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm6
+; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6
+; AVX512-NEXT: movswl 26(%rdi), %eax
+; AVX512-NEXT: vmovd %eax, %xmm7
+; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7
+; AVX512-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
+; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm14[0],xmm15[0],xmm14[2,3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm13[0],xmm1[3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm12[0]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[2,3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
+; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0]
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: retq
+ %1 = load <16 x i16>, <16 x i16>* %a0
+ %2 = bitcast <16 x i16> %1 to <16 x half>
+ %3 = fpext <16 x half> %2 to <16 x float>
+ ret <16 x float> %3
+}
+
+;
+; Float to Half
+;
+
+define i16 @cvt_f32_to_i16(float %a0) {
+; ALL-LABEL: cvt_f32_to_i16:
+; ALL: # BB#0:
+; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; ALL-NEXT: vmovd %xmm0, %eax
+; ALL-NEXT: retq
+ %1 = fptrunc float %a0 to half
+ %2 = bitcast half %1 to i16
+ ret i16 %2
+}
+
+define <4 x i16> @cvt_4f32_to_4i16(<4 x float> %a0) {
+; ALL-LABEL: cvt_4f32_to_4i16:
+; ALL: # BB#0:
+; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT: vmovd %xmm1, %eax
+; ALL-NEXT: shll $16, %eax
+; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; ALL-NEXT: vmovd %xmm1, %ecx
+; ALL-NEXT: movzwl %cx, %ecx
+; ALL-NEXT: orl %eax, %ecx
+; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT: vmovd %xmm1, %eax
+; ALL-NEXT: shll $16, %eax
+; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; ALL-NEXT: vmovd %xmm0, %edx
+; ALL-NEXT: movzwl %dx, %edx
+; ALL-NEXT: orl %eax, %edx
+; ALL-NEXT: shlq $32, %rdx
+; ALL-NEXT: orq %rcx, %rdx
+; ALL-NEXT: vmovq %rdx, %xmm0
+; ALL-NEXT: retq
+ %1 = fptrunc <4 x float> %a0 to <4 x half>
+ %2 = bitcast <4 x half> %1 to <4 x i16>
+ ret <4 x i16> %2
+}
+
+define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) {
+; ALL-LABEL: cvt_4f32_to_8i16_undef:
+; ALL: # BB#0:
+; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT: vmovd %xmm1, %eax
+; ALL-NEXT: shll $16, %eax
+; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; ALL-NEXT: vmovd %xmm1, %ecx
+; ALL-NEXT: movzwl %cx, %ecx
+; ALL-NEXT: orl %eax, %ecx
+; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT: vmovd %xmm1, %eax
+; ALL-NEXT: shll $16, %eax
+; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; ALL-NEXT: vmovd %xmm0, %edx
+; ALL-NEXT: movzwl %dx, %edx
+; ALL-NEXT: orl %eax, %edx
+; ALL-NEXT: shlq $32, %rdx
+; ALL-NEXT: orq %rcx, %rdx
+; ALL-NEXT: vmovq %rdx, %xmm0
+; ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; ALL-NEXT: retq
+ %1 = fptrunc <4 x float> %a0 to <4 x half>
+ %2 = bitcast <4 x half> %1 to <4 x i16>
+ %3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %3
+}
+
+define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) {
+; ALL-LABEL: cvt_4f32_to_8i16_zero:
+; ALL: # BB#0:
+; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT: vmovd %xmm1, %eax
+; ALL-NEXT: shll $16, %eax
+; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; ALL-NEXT: vmovd %xmm1, %ecx
+; ALL-NEXT: movzwl %cx, %ecx
+; ALL-NEXT: orl %eax, %ecx
+; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT: vmovd %xmm1, %eax
+; ALL-NEXT: shll $16, %eax
+; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; ALL-NEXT: vmovd %xmm0, %edx
+; ALL-NEXT: movzwl %dx, %edx
+; ALL-NEXT: orl %eax, %edx
+; ALL-NEXT: shlq $32, %rdx
+; ALL-NEXT: orq %rcx, %rdx
+; ALL-NEXT: vmovq %rdx, %xmm0
+; ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; ALL-NEXT: retq
+ %1 = fptrunc <4 x float> %a0 to <4 x half>
+ %2 = bitcast <4 x half> %1 to <4 x i16>
+ %3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %3
+}
+
+define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) {
+; AVX1-LABEL: cvt_8f32_to_8i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: shll $16, %eax
+; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; AVX1-NEXT: vmovd %xmm1, %ecx
+; AVX1-NEXT: movzwl %cx, %ecx
+; AVX1-NEXT: orl %eax, %ecx
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vmovd %xmm1, %edx
+; AVX1-NEXT: shll $16, %edx
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: movzwl %ax, %eax
+; AVX1-NEXT: orl %edx, %eax
+; AVX1-NEXT: shlq $32, %rax
+; AVX1-NEXT: orq %rcx, %rax
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vmovd %xmm1, %ecx
+; AVX1-NEXT: shll $16, %ecx
+; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; AVX1-NEXT: vmovd %xmm1, %edx
+; AVX1-NEXT: movzwl %dx, %edx
+; AVX1-NEXT: orl %ecx, %edx
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vmovd %xmm1, %ecx
+; AVX1-NEXT: shll $16, %ecx
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %esi
+; AVX1-NEXT: movzwl %si, %esi
+; AVX1-NEXT: orl %ecx, %esi
+; AVX1-NEXT: shlq $32, %rsi
+; AVX1-NEXT: orq %rdx, %rsi
+; AVX1-NEXT: vmovq %rsi, %xmm0
+; AVX1-NEXT: vmovq %rax, %xmm1
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: cvt_8f32_to_8i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: shll $16, %eax
+; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %ecx
+; AVX2-NEXT: movzwl %cx, %ecx
+; AVX2-NEXT: orl %eax, %ecx
+; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %edx
+; AVX2-NEXT: shll $16, %edx
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: movzwl %ax, %eax
+; AVX2-NEXT: orl %edx, %eax
+; AVX2-NEXT: shlq $32, %rax
+; AVX2-NEXT: orq %rcx, %rax
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %ecx
+; AVX2-NEXT: shll $16, %ecx
+; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %edx
+; AVX2-NEXT: movzwl %dx, %edx
+; AVX2-NEXT: orl %ecx, %edx
+; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %ecx
+; AVX2-NEXT: shll $16, %ecx
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %esi
+; AVX2-NEXT: movzwl %si, %esi
+; AVX2-NEXT: orl %ecx, %esi
+; AVX2-NEXT: shlq $32, %rsi
+; AVX2-NEXT: orq %rdx, %rsi
+; AVX2-NEXT: vmovq %rsi, %xmm0
+; AVX2-NEXT: vmovq %rax, %xmm1
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: cvt_8f32_to_8i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: shll $16, %eax
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; AVX512-NEXT: vmovd %xmm1, %ecx
+; AVX512-NEXT: movzwl %cx, %ecx
+; AVX512-NEXT: orl %eax, %ecx
+; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: vmovd %xmm1, %edx
+; AVX512-NEXT: shll $16, %edx
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: movzwl %ax, %eax
+; AVX512-NEXT: orl %edx, %eax
+; AVX512-NEXT: shlq $32, %rax
+; AVX512-NEXT: orq %rcx, %rax
+; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: vmovd %xmm1, %ecx
+; AVX512-NEXT: shll $16, %ecx
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; AVX512-NEXT: vmovd %xmm1, %edx
+; AVX512-NEXT: movzwl %dx, %edx
+; AVX512-NEXT: orl %ecx, %edx
+; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: vmovd %xmm1, %ecx
+; AVX512-NEXT: shll $16, %ecx
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %esi
+; AVX512-NEXT: movzwl %si, %esi
+; AVX512-NEXT: orl %ecx, %esi
+; AVX512-NEXT: shlq $32, %rsi
+; AVX512-NEXT: orq %rdx, %rsi
+; AVX512-NEXT: vmovq %rsi, %xmm0
+; AVX512-NEXT: vmovq %rax, %xmm1
+; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: retq
+ %1 = fptrunc <8 x float> %a0 to <8 x half>
+ %2 = bitcast <8 x half> %1 to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <16 x i16> @cvt_16f32_to_16i16(<16 x float> %a0) {
+; AVX1-LABEL: cvt_16f32_to_16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm2
+; AVX1-NEXT: vmovd %xmm2, %eax
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX1-NEXT: vmovd %eax, %xmm3
+; AVX1-NEXT: vmovd %xmm2, %eax
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX1-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vmovd %xmm2, %eax
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm1
+; AVX1-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX1-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vmovd %xmm2, %eax
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vmovd %eax, %xmm3
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm0
+; AVX1-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: vpinsrw $6, %eax, %xmm3, %xmm1
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: cvt_16f32_to_16i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm2
+; AVX2-NEXT: vmovd %xmm2, %eax
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX2-NEXT: vmovd %eax, %xmm3
+; AVX2-NEXT: vmovd %xmm2, %eax
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX2-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vmovd %xmm2, %eax
+; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm1
+; AVX2-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX2-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vmovd %xmm2, %eax
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %eax, %xmm3
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm0
+; AVX2-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vpinsrw $6, %eax, %xmm3, %xmm1
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
+; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: cvt_16f32_to_16i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm2
+; AVX512-NEXT: vmovd %xmm2, %eax
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT: vmovd %eax, %xmm3
+; AVX512-NEXT: vmovd %xmm2, %eax
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm2, %eax
+; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm1
+; AVX512-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm2, %eax
+; AVX512-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: vmovd %eax, %xmm3
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm0
+; AVX512-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm1[1,0]
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpinsrw $6, %eax, %xmm3, %xmm1
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
+; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512-NEXT: retq
+ %1 = fptrunc <16 x float> %a0 to <16 x half>
+ %2 = bitcast <16 x half> %1 to <16 x i16>
+ ret <16 x i16> %2
+}
+
+;
+; Float to Half (Store)
+;
+
+define void @store_cvt_4f32_to_4i16(<4 x float> %a0, <4 x i16>* %a1) {
+; ALL-LABEL: store_cvt_4f32_to_4i16:
+; ALL: # BB#0:
+; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT: vmovd %xmm1, %eax
+; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT: vmovd %xmm1, %ecx
+; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT: vmovd %xmm1, %edx
+; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; ALL-NEXT: vmovd %xmm0, %esi
+; ALL-NEXT: movw %si, (%rdi)
+; ALL-NEXT: movw %dx, 6(%rdi)
+; ALL-NEXT: movw %cx, 4(%rdi)
+; ALL-NEXT: movw %ax, 2(%rdi)
+; ALL-NEXT: retq
+ %1 = fptrunc <4 x float> %a0 to <4 x half>
+ %2 = bitcast <4 x half> %1 to <4 x i16>
+ store <4 x i16> %2, <4 x i16>* %a1
+ ret void
+}
+
+define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) {
+; ALL-LABEL: store_cvt_4f32_to_8i16_undef:
+; ALL: # BB#0:
+; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT: vmovd %xmm1, %eax
+; ALL-NEXT: shll $16, %eax
+; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; ALL-NEXT: vmovd %xmm1, %ecx
+; ALL-NEXT: movzwl %cx, %ecx
+; ALL-NEXT: orl %eax, %ecx
+; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT: vmovd %xmm1, %eax
+; ALL-NEXT: shll $16, %eax
+; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; ALL-NEXT: vmovd %xmm0, %edx
+; ALL-NEXT: movzwl %dx, %edx
+; ALL-NEXT: orl %eax, %edx
+; ALL-NEXT: shlq $32, %rdx
+; ALL-NEXT: orq %rcx, %rdx
+; ALL-NEXT: vmovq %rdx, %xmm0
+; ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; ALL-NEXT: vmovdqa %xmm0, (%rdi)
+; ALL-NEXT: retq
+ %1 = fptrunc <4 x float> %a0 to <4 x half>
+ %2 = bitcast <4 x half> %1 to <4 x i16>
+ %3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ store <8 x i16> %3, <8 x i16>* %a1
+ ret void
+}
+
+define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) {
+; ALL-LABEL: store_cvt_4f32_to_8i16_zero:
+; ALL: # BB#0:
+; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT: vmovd %xmm1, %eax
+; ALL-NEXT: shll $16, %eax
+; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1
+; ALL-NEXT: vmovd %xmm1, %ecx
+; ALL-NEXT: movzwl %cx, %ecx
+; ALL-NEXT: orl %eax, %ecx
+; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT: vmovd %xmm1, %eax
+; ALL-NEXT: shll $16, %eax
+; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; ALL-NEXT: vmovd %xmm0, %edx
+; ALL-NEXT: movzwl %dx, %edx
+; ALL-NEXT: orl %eax, %edx
+; ALL-NEXT: shlq $32, %rdx
+; ALL-NEXT: orq %rcx, %rdx
+; ALL-NEXT: vmovq %rdx, %xmm0
+; ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
+; ALL-NEXT: vmovdqa %xmm0, (%rdi)
+; ALL-NEXT: retq
+ %1 = fptrunc <4 x float> %a0 to <4 x half>
+ %2 = bitcast <4 x half> %1 to <4 x i16>
+ %3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ store <8 x i16> %3, <8 x i16>* %a1
+ ret void
+}
+
+define void @store_cvt_8f32_to_8i16(<8 x float> %a0, <8 x i16>* %a1) {
+; AVX1-LABEL: store_cvt_8f32_to_8i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vmovd %xmm1, %r8d
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vmovd %xmm1, %r9d
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vmovd %xmm1, %r10d
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX1-NEXT: vmovd %xmm2, %r11d
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX1-NEXT: vmovd %xmm2, %eax
+; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX1-NEXT: vmovd %xmm2, %ecx
+; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %edx
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm0
+; AVX1-NEXT: vmovd %xmm0, %esi
+; AVX1-NEXT: movw %si, 8(%rdi)
+; AVX1-NEXT: movw %dx, (%rdi)
+; AVX1-NEXT: movw %cx, 14(%rdi)
+; AVX1-NEXT: movw %ax, 12(%rdi)
+; AVX1-NEXT: movw %r11w, 10(%rdi)
+; AVX1-NEXT: movw %r10w, 6(%rdi)
+; AVX1-NEXT: movw %r9w, 4(%rdi)
+; AVX1-NEXT: movw %r8w, 2(%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: store_cvt_8f32_to_8i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %r8d
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %r9d
+; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %r10d
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX2-NEXT: vmovd %xmm2, %r11d
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX2-NEXT: vmovd %xmm2, %eax
+; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX2-NEXT: vmovd %xmm2, %ecx
+; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %edx
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm0
+; AVX2-NEXT: vmovd %xmm0, %esi
+; AVX2-NEXT: movw %si, 8(%rdi)
+; AVX2-NEXT: movw %dx, (%rdi)
+; AVX2-NEXT: movw %cx, 14(%rdi)
+; AVX2-NEXT: movw %ax, 12(%rdi)
+; AVX2-NEXT: movw %r11w, 10(%rdi)
+; AVX2-NEXT: movw %r10w, 6(%rdi)
+; AVX2-NEXT: movw %r9w, 4(%rdi)
+; AVX2-NEXT: movw %r8w, 2(%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: store_cvt_8f32_to_8i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: vmovd %xmm1, %r8d
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: vmovd %xmm1, %r9d
+; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: vmovd %xmm1, %r10d
+; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT: vmovd %xmm2, %r11d
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT: vmovd %xmm2, %eax
+; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT: vmovd %xmm2, %ecx
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %edx
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm0
+; AVX512-NEXT: vmovd %xmm0, %esi
+; AVX512-NEXT: movw %si, 8(%rdi)
+; AVX512-NEXT: movw %dx, (%rdi)
+; AVX512-NEXT: movw %cx, 14(%rdi)
+; AVX512-NEXT: movw %ax, 12(%rdi)
+; AVX512-NEXT: movw %r11w, 10(%rdi)
+; AVX512-NEXT: movw %r10w, 6(%rdi)
+; AVX512-NEXT: movw %r9w, 4(%rdi)
+; AVX512-NEXT: movw %r8w, 2(%rdi)
+; AVX512-NEXT: retq
+ %1 = fptrunc <8 x float> %a0 to <8 x half>
+ %2 = bitcast <8 x half> %1 to <8 x i16>
+ store <8 x i16> %2, <8 x i16>* %a1
+ ret void
+}
+
+define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) {
+; AVX1-LABEL: store_cvt_16f32_to_16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vcvtps2ph $4, %xmm3, %xmm4
+; AVX1-NEXT: vmovd %xmm4, %eax
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm4
+; AVX1-NEXT: movw %ax, 24(%rdi)
+; AVX1-NEXT: vmovd %xmm4, %eax
+; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm4
+; AVX1-NEXT: movw %ax, 16(%rdi)
+; AVX1-NEXT: vmovd %xmm4, %eax
+; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm4
+; AVX1-NEXT: movw %ax, 8(%rdi)
+; AVX1-NEXT: vmovd %xmm4, %eax
+; AVX1-NEXT: vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm4, %xmm4
+; AVX1-NEXT: movw %ax, (%rdi)
+; AVX1-NEXT: vmovd %xmm4, %eax
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
+; AVX1-NEXT: vcvtps2ph $4, %xmm4, %xmm4
+; AVX1-NEXT: movw %ax, 30(%rdi)
+; AVX1-NEXT: vmovd %xmm4, %eax
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm4, %xmm4
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX1-NEXT: movw %ax, 28(%rdi)
+; AVX1-NEXT: vmovd %xmm3, %eax
+; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = xmm1[3,1,2,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX1-NEXT: movw %ax, 26(%rdi)
+; AVX1-NEXT: vmovd %xmm3, %eax
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
+; AVX1-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX1-NEXT: movw %ax, 22(%rdi)
+; AVX1-NEXT: vmovd %xmm3, %eax
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX1-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: movw %ax, 20(%rdi)
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm2[3,1,2,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: movw %ax, 18(%rdi)
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX1-NEXT: movw %ax, 14(%rdi)
+; AVX1-NEXT: vmovd %xmm2, %eax
+; AVX1-NEXT: movw %ax, 12(%rdi)
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: movw %ax, 10(%rdi)
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: movw %ax, 6(%rdi)
+; AVX1-NEXT: vmovd %xmm3, %eax
+; AVX1-NEXT: movw %ax, 4(%rdi)
+; AVX1-NEXT: vmovd %xmm4, %eax
+; AVX1-NEXT: movw %ax, 2(%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: store_cvt_16f32_to_16i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vcvtps2ph $4, %xmm3, %xmm4
+; AVX2-NEXT: vmovd %xmm4, %eax
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm4
+; AVX2-NEXT: movw %ax, 24(%rdi)
+; AVX2-NEXT: vmovd %xmm4, %eax
+; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm4
+; AVX2-NEXT: movw %ax, 16(%rdi)
+; AVX2-NEXT: vmovd %xmm4, %eax
+; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm4
+; AVX2-NEXT: movw %ax, 8(%rdi)
+; AVX2-NEXT: vmovd %xmm4, %eax
+; AVX2-NEXT: vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm4, %xmm4
+; AVX2-NEXT: movw %ax, (%rdi)
+; AVX2-NEXT: vmovd %xmm4, %eax
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
+; AVX2-NEXT: vcvtps2ph $4, %xmm4, %xmm4
+; AVX2-NEXT: movw %ax, 30(%rdi)
+; AVX2-NEXT: vmovd %xmm4, %eax
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm4, %xmm4
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX2-NEXT: movw %ax, 28(%rdi)
+; AVX2-NEXT: vmovd %xmm3, %eax
+; AVX2-NEXT: vpermilps {{.*#+}} xmm3 = xmm1[3,1,2,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX2-NEXT: movw %ax, 26(%rdi)
+; AVX2-NEXT: vmovd %xmm3, %eax
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
+; AVX2-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX2-NEXT: movw %ax, 22(%rdi)
+; AVX2-NEXT: vmovd %xmm3, %eax
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX2-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: movw %ax, 20(%rdi)
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm2[3,1,2,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: movw %ax, 18(%rdi)
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX2-NEXT: movw %ax, 14(%rdi)
+; AVX2-NEXT: vmovd %xmm2, %eax
+; AVX2-NEXT: movw %ax, 12(%rdi)
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: movw %ax, 10(%rdi)
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: movw %ax, 6(%rdi)
+; AVX2-NEXT: vmovd %xmm3, %eax
+; AVX2-NEXT: movw %ax, 4(%rdi)
+; AVX2-NEXT: vmovd %xmm4, %eax
+; AVX2-NEXT: movw %ax, 2(%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: store_cvt_16f32_to_16i16:
+; AVX512: # BB#0:
+; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm2
+; AVX512-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm4
+; AVX512-NEXT: vmovd %xmm4, %eax
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm4
+; AVX512-NEXT: movw %ax, 24(%rdi)
+; AVX512-NEXT: vmovd %xmm4, %eax
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm4
+; AVX512-NEXT: movw %ax, 16(%rdi)
+; AVX512-NEXT: vmovd %xmm4, %eax
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm4
+; AVX512-NEXT: movw %ax, 8(%rdi)
+; AVX512-NEXT: vmovd %xmm4, %eax
+; AVX512-NEXT: vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
+; AVX512-NEXT: movw %ax, (%rdi)
+; AVX512-NEXT: vmovd %xmm4, %eax
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
+; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
+; AVX512-NEXT: movw %ax, 30(%rdi)
+; AVX512-NEXT: vmovd %xmm4, %eax
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX512-NEXT: movw %ax, 28(%rdi)
+; AVX512-NEXT: vmovd %xmm3, %eax
+; AVX512-NEXT: vpermilps {{.*#+}} xmm3 = xmm2[3,1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX512-NEXT: movw %ax, 26(%rdi)
+; AVX512-NEXT: vmovd %xmm3, %eax
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
+; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX512-NEXT: movw %ax, 22(%rdi)
+; AVX512-NEXT: vmovd %xmm3, %eax
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
+; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT: movw %ax, 20(%rdi)
+; AVX512-NEXT: vmovd %xmm2, %eax
+; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT: movw %ax, 18(%rdi)
+; AVX512-NEXT: vmovd %xmm2, %eax
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
+; AVX512-NEXT: movw %ax, 14(%rdi)
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: movw %ax, 12(%rdi)
+; AVX512-NEXT: vmovd %xmm2, %eax
+; AVX512-NEXT: movw %ax, 10(%rdi)
+; AVX512-NEXT: vmovd %xmm0, %eax
+; AVX512-NEXT: movw %ax, 6(%rdi)
+; AVX512-NEXT: vmovd %xmm3, %eax
+; AVX512-NEXT: movw %ax, 4(%rdi)
+; AVX512-NEXT: vmovd %xmm4, %eax
+; AVX512-NEXT: movw %ax, 2(%rdi)
+; AVX512-NEXT: retq
+ %1 = fptrunc <16 x float> %a0 to <16 x half>
+ %2 = bitcast <16 x half> %1 to <16 x i16>
+ store <16 x i16> %2, <16 x i16>* %a1
+ ret void
+}
Propchange: llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll
------------------------------------------------------------------------------
svn:eol-style = LF
More information about the llvm-commits
mailing list