[llvm] 244389a - [X86] Add fneg vector test coverage

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 29 07:03:42 PST 2023


Author: Simon Pilgrim
Date: 2023-11-29T15:03:26Z
New Revision: 244389ad1794b0bc6c659bd685cf6ea9a7cad8e3

URL: https://github.com/llvm/llvm-project/commit/244389ad1794b0bc6c659bd685cf6ea9a7cad8e3
DIFF: https://github.com/llvm/llvm-project/commit/244389ad1794b0bc6c659bd685cf6ea9a7cad8e3.diff

LOG: [X86] Add fneg vector test coverage

Added: 
    llvm/test/CodeGen/X86/vec_fneg.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/vec_fneg.ll b/llvm/test/CodeGen/X86/vec_fneg.ll
new file mode 100644
index 000000000000000..121ae3c0f12fdc5
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vec_fneg.ll
@@ -0,0 +1,6514 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86,X86-SSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X86,X86-AVX,X86-AVX1
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X86,X86-AVX,X86-AVX2
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=X86,X86-AVX512,X86-AVX512VL
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512fp16 | FileCheck %s --check-prefixes=X86,X86-AVX512,X86-AVX512FP16
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefixes=X86,X86-AVX512,X86-AVX512VLDQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X64,X64-SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=X64,X64-AVX512,X64-AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 | FileCheck %s --check-prefixes=X64,X64-AVX512,X64-AVX512FP16
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512vl | FileCheck %s --check-prefixes=X64,X64-AVX512,X64-AVX512VLDQ
+
+;
+; 128-bit Vectors
+;
+
+define <2 x double> @fneg_v2f64(<2 x double> %p) nounwind {
+; X86-SSE-LABEL: fneg_v2f64:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: fneg_v2f64:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X86-AVX512VL-LABEL: fneg_v2f64:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vpxorq {{\.?LCPI[0-9]+_[0-9]+}}{1to2}, %xmm0, %xmm0
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fneg_v2f64:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}{1to2}, %xmm0, %xmm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fneg_v2f64:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}{1to2}, %xmm0, %xmm0
+; X86-AVX512VLDQ-NEXT:    retl
+;
+; X64-SSE-LABEL: fneg_v2f64:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: fneg_v2f64:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
+;
+; X64-AVX512VL-LABEL: fneg_v2f64:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vpxorq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fneg_v2f64:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fneg_v2f64:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
+; X64-AVX512VLDQ-NEXT:    retq
+  %t = fsub <2 x double> <double -0.0, double -0.0>, %p
+  ret <2 x double> %t
+}
+
+define <4 x float> @fneg_v4f32(<4 x float> %p) nounwind {
+; X86-SSE-LABEL: fneg_v4f32:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX1-LABEL: fneg_v4f32:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX2-LABEL: fneg_v4f32:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm0, %xmm0
+; X86-AVX2-NEXT:    retl
+;
+; X86-AVX512VL-LABEL: fneg_v4f32:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vpxord {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fneg_v4f32:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fneg_v4f32:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
+; X86-AVX512VLDQ-NEXT:    retl
+;
+; X64-SSE-LABEL: fneg_v4f32:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: fneg_v4f32:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX2-LABEL: fneg_v4f32:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX2-NEXT:    vxorps %xmm1, %xmm0, %xmm0
+; X64-AVX2-NEXT:    retq
+;
+; X64-AVX512VL-LABEL: fneg_v4f32:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vpxord {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fneg_v4f32:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fneg_v4f32:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; X64-AVX512VLDQ-NEXT:    retq
+  %t = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %p
+  ret <4 x float> %t
+}
+
+define <8 x half> @fneg_v8f16(ptr %p) nounwind {
+; X86-SSE-LABEL: fneg_v8f16:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    subl $148, %esp
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movdqa (%eax), %xmm0
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrlq $48, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrld $16, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm1, (%esp)
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-SSE-NEXT:    movhps {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X86-SSE-NEXT:    # xmm1 = xmm1[0,1],mem[0,1]
+; X86-SSE-NEXT:    movaps %xmm1, %xmm0
+; X86-SSE-NEXT:    addl $148, %esp
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX1-LABEL: fneg_v8f16:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    pushl %esi
+; X86-AVX1-NEXT:    subl $148, %esp
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-AVX1-NEXT:    vmovdqa (%esi), %xmm0
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 4(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 8(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 12(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm1, (%esp)
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X86-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
+; X86-AVX1-NEXT:    addl $148, %esp
+; X86-AVX1-NEXT:    popl %esi
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX2-LABEL: fneg_v8f16:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    pushl %esi
+; X86-AVX2-NEXT:    subl $180, %esp
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-AVX2-NEXT:    vmovdqa (%esi), %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX2-NEXT:    vmovups %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 4(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 8(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 12(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vpxor {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm1, (%esp)
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX2-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-AVX2-NEXT:    vpunpckldq {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X86-AVX2-NEXT:    vpunpcklqdq {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0]
+; X86-AVX2-NEXT:    addl $180, %esp
+; X86-AVX2-NEXT:    popl %esi
+; X86-AVX2-NEXT:    retl
+;
+; X86-AVX512VL-LABEL: fneg_v8f16:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VL-NEXT:    vmovdqa (%eax), %xmm1
+; X86-AVX512VL-NEXT:    movzwl 12(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm0
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm0, %xmm2
+; X86-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vmovd %xmm2, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm2
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm3, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm3
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X86-AVX512VL-NEXT:    vmovd %xmm3, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm3
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; X86-AVX512VL-NEXT:    movzwl 8(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm3
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X86-AVX512VL-NEXT:    vmovd %xmm3, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm3
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm4, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm4
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vmovd %xmm4, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm4
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; X86-AVX512VL-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X86-AVX512VL-NEXT:    movzwl 4(%eax), %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm3
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X86-AVX512VL-NEXT:    vmovd %xmm3, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X86-AVX512VL-NEXT:    vpsrlq $48, %xmm1, %xmm4
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm4, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm4
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vmovd %xmm4, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm4
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vmovd %xmm4, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X86-AVX512VL-NEXT:    vpsrld $16, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X86-AVX512VL-NEXT:    vmovd %xmm0, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; X86-AVX512VL-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; X86-AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fneg_v8f16:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX512FP16-NEXT:    vpxor (%eax), %xmm0, %xmm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fneg_v8f16:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VLDQ-NEXT:    vmovdqa (%eax), %xmm1
+; X86-AVX512VLDQ-NEXT:    movzwl 12(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm0
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm0, %xmm2
+; X86-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm2, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm2
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm3, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm3
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm3, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm3
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; X86-AVX512VLDQ-NEXT:    movzwl 8(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm3
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm3, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm3
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm4, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; X86-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X86-AVX512VLDQ-NEXT:    movzwl 4(%eax), %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm3
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm3, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X86-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm1, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpsrld $16, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm0, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; X86-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; X86-AVX512VLDQ-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X86-AVX512VLDQ-NEXT:    retl
+;
+; X64-SSE-LABEL: fneg_v8f16:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    subq $72, %rsp
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm0
+; X64-SSE-NEXT:    movdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrlq $48, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrld $16, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-SSE-NEXT:    punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+; X64-SSE-NEXT:    punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm1 = xmm1[0],mem[0]
+; X64-SSE-NEXT:    movdqa %xmm1, %xmm0
+; X64-SSE-NEXT:    addq $72, %rsp
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: fneg_v8f16:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    pushq %rbx
+; X64-AVX1-NEXT:    subq $64, %rsp
+; X64-AVX1-NEXT:    movq %rdi, %rbx
+; X64-AVX1-NEXT:    vmovaps (%rdi), %xmm0
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 12(%rdi), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 8(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 4(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
+; X64-AVX1-NEXT:    addq $64, %rsp
+; X64-AVX1-NEXT:    popq %rbx
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX2-LABEL: fneg_v8f16:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    pushq %rbx
+; X64-AVX2-NEXT:    subq $80, %rsp
+; X64-AVX2-NEXT:    movq %rdi, %rbx
+; X64-AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 12(%rdi), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX2-NEXT:    vmovdqa %xmm1, (%rsp) # 16-byte Spill
+; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 8(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 4(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vxorps (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX2-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0]
+; X64-AVX2-NEXT:    addq $80, %rsp
+; X64-AVX2-NEXT:    popq %rbx
+; X64-AVX2-NEXT:    retq
+;
+; X64-AVX512VL-LABEL: fneg_v8f16:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vmovdqa (%rdi), %xmm1
+; X64-AVX512VL-NEXT:    movzwl 12(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm0
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm0, %xmm2
+; X64-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X64-AVX512VL-NEXT:    vmovd %xmm2, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm3, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm3
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X64-AVX512VL-NEXT:    vmovd %xmm3, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; X64-AVX512VL-NEXT:    movzwl 8(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm3
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X64-AVX512VL-NEXT:    vmovd %xmm3, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm4, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; X64-AVX512VL-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X64-AVX512VL-NEXT:    movzwl 4(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm3
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X64-AVX512VL-NEXT:    vmovd %xmm3, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X64-AVX512VL-NEXT:    vpsrlq $48, %xmm1, %xmm4
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm4, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm1, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VL-NEXT:    vpsrld $16, %xmm1, %xmm1
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm1, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm1
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X64-AVX512VL-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; X64-AVX512VL-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; X64-AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fneg_v8f16:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX512FP16-NEXT:    vpxor (%rdi), %xmm0, %xmm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fneg_v8f16:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vmovdqa (%rdi), %xmm1
+; X64-AVX512VLDQ-NEXT:    movzwl 12(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm0
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm0, %xmm2
+; X64-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm2, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm3, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm3
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm3, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; X64-AVX512VLDQ-NEXT:    movzwl 8(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm3
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm3, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; X64-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X64-AVX512VLDQ-NEXT:    movzwl 4(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm3
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm3, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X64-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm1, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpsrld $16, %xmm1, %xmm1
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; X64-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; X64-AVX512VLDQ-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X64-AVX512VLDQ-NEXT:    retq
+  %v = load <8 x half>, ptr %p, align 16
+  %nnv = fsub <8 x half> <half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0>, %v
+  ret <8 x half> %nnv
+}
+
+;
+; 256-bit Vectors
+;
+
+define <4 x double> @fneg_v4f64(<4 x double> %p) nounwind {
+; X86-SSE-LABEL: fneg_v4f64:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movaps {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; X86-SSE-NEXT:    xorps %xmm2, %xmm0
+; X86-SSE-NEXT:    xorps %xmm2, %xmm1
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX1-LABEL: fneg_v4f64:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX2-LABEL: fneg_v4f64:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX2-NEXT:    vxorps %ymm1, %ymm0, %ymm0
+; X86-AVX2-NEXT:    retl
+;
+; X86-AVX512VL-LABEL: fneg_v4f64:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vpxorq {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %ymm0, %ymm0
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fneg_v4f64:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %ymm0, %ymm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fneg_v4f64:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}{1to4}, %ymm0, %ymm0
+; X86-AVX512VLDQ-NEXT:    retl
+;
+; X64-SSE-LABEL: fneg_v4f64:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0]
+; X64-SSE-NEXT:    xorps %xmm2, %xmm0
+; X64-SSE-NEXT:    xorps %xmm2, %xmm1
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: fneg_v4f64:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX2-LABEL: fneg_v4f64:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX2-NEXT:    vxorps %ymm1, %ymm0, %ymm0
+; X64-AVX2-NEXT:    retq
+;
+; X64-AVX512VL-LABEL: fneg_v4f64:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vpxorq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fneg_v4f64:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fneg_v4f64:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0
+; X64-AVX512VLDQ-NEXT:    retq
+  %t = fsub <4 x double> <double -0.0, double -0.0, double -0.0, double -0.0>, %p
+  ret <4 x double> %t
+}
+
+define <8 x float> @fneg_v8f32(<8 x float> %p) nounwind {
+; X86-SSE-LABEL: fneg_v8f32:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movaps {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-SSE-NEXT:    xorps %xmm2, %xmm0
+; X86-SSE-NEXT:    xorps %xmm2, %xmm1
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX1-LABEL: fneg_v8f32:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX2-LABEL: fneg_v8f32:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    vbroadcastss {{.*#+}} ymm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX2-NEXT:    vxorps %ymm1, %ymm0, %ymm0
+; X86-AVX2-NEXT:    retl
+;
+; X86-AVX512VL-LABEL: fneg_v8f32:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vpxord {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fneg_v8f32:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fneg_v8f32:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
+; X86-AVX512VLDQ-NEXT:    retl
+;
+; X64-SSE-LABEL: fneg_v8f32:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-SSE-NEXT:    xorps %xmm2, %xmm0
+; X64-SSE-NEXT:    xorps %xmm2, %xmm1
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: fneg_v8f32:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX2-LABEL: fneg_v8f32:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    vbroadcastss {{.*#+}} ymm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX2-NEXT:    vxorps %ymm1, %ymm0, %ymm0
+; X64-AVX2-NEXT:    retq
+;
+; X64-AVX512VL-LABEL: fneg_v8f32:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vpxord {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fneg_v8f32:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fneg_v8f32:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-AVX512VLDQ-NEXT:    retq
+  %t = fsub <8 x float> <float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0>, %p
+  ret <8 x float> %t
+}
+
+define <16 x half> @fneg_v16f16(ptr %p) nounwind {
+; X86-SSE-LABEL: fneg_v16f16:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    subl $324, %esp # imm = 0x144
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movaps (%eax), %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqa 16(%eax), %xmm0
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrlq $48, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrld $16, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrlq $48, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrld $16, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm1, (%esp)
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    movdqa %xmm0, %xmm3
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; X86-SSE-NEXT:    movdqu %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-SSE-NEXT:    movhps {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X86-SSE-NEXT:    # xmm1 = xmm1[0,1],mem[0,1]
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    addl $324, %esp # imm = 0x144
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX1-LABEL: fneg_v16f16:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    pushl %esi
+; X86-AVX1-NEXT:    subl $308, %esp # imm = 0x134
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-AVX1-NEXT:    vmovdqa (%esi), %xmm0
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovaps 16(%esi), %xmm1
+; X86-AVX1-NEXT:    vmovups %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 4(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 8(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 12(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 20(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 24(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 28(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm1, (%esp)
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
+; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X86-AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X86-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
+; X86-AVX1-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%e{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    addl $308, %esp # imm = 0x134
+; X86-AVX1-NEXT:    popl %esi
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX2-LABEL: fneg_v16f16:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    pushl %esi
+; X86-AVX2-NEXT:    subl $372, %esp # imm = 0x174
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-AVX2-NEXT:    vmovdqa (%esi), %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovaps 16(%esi), %xmm1
+; X86-AVX2-NEXT:    vmovups %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX2-NEXT:    vmovups %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 4(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 20(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 8(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 24(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 12(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 28(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vpxor {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm1, (%esp)
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
+; X86-AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm1
+; X86-AVX2-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+; X86-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%e{{[sb]}}p) # 32-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%e{{[sb]}}p) # 32-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vzeroupper
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%e{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vpunpckldq {{[-0-9]+}}(%e{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; X86-AVX2-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; X86-AVX2-NEXT:    vpunpcklqdq {{[-0-9]+}}(%e{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; X86-AVX2-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
+; X86-AVX2-NEXT:    addl $372, %esp # imm = 0x174
+; X86-AVX2-NEXT:    popl %esi
+; X86-AVX2-NEXT:    retl
+;
+; X86-AVX512VL-LABEL: fneg_v16f16:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VL-NEXT:    movzwl 28(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm0
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm3
+; X86-AVX512VL-NEXT:    vmovdqa (%eax), %xmm1
+; X86-AVX512VL-NEXT:    vmovdqa 16(%eax), %xmm2
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm4, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm4
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vmovd %xmm4, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm4
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; X86-AVX512VL-NEXT:    movzwl 12(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm4
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vmovd %xmm4, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm4
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm5, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm5
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vmovd %xmm5, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; X86-AVX512VL-NEXT:    movzwl 24(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm4
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vmovd %xmm4, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm4
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm5, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm5
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vmovd %xmm5, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; X86-AVX512VL-NEXT:    movzwl 8(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm5
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vmovd %xmm5, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm6, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm6
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X86-AVX512VL-NEXT:    vmovd %xmm6, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
+; X86-AVX512VL-NEXT:    vpunpckldq {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[4],ymm3[4],ymm4[5],ymm3[5]
+; X86-AVX512VL-NEXT:    movzwl 20(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm4
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vmovd %xmm4, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm4
+; X86-AVX512VL-NEXT:    vpsrlq $48, %xmm2, %xmm5
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm5, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm5
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vmovd %xmm5, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; X86-AVX512VL-NEXT:    movzwl 4(%eax), %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm5
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vmovd %xmm5, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X86-AVX512VL-NEXT:    vpsrlq $48, %xmm1, %xmm6
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm6, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm6
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X86-AVX512VL-NEXT:    vmovd %xmm6, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm5
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vmovd %xmm5, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X86-AVX512VL-NEXT:    vpsrld $16, %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm2
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vmovd %xmm2, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm5
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vmovd %xmm5, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X86-AVX512VL-NEXT:    vpsrld $16, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X86-AVX512VL-NEXT:    vmovd %xmm0, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX512VL-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[1],ymm4[1],ymm0[4],ymm4[4],ymm0[5],ymm4[5]
+; X86-AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fneg_v16f16:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX512FP16-NEXT:    vpxor (%eax), %ymm0, %ymm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fneg_v16f16:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VLDQ-NEXT:    movzwl 28(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm0
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm3
+; X86-AVX512VLDQ-NEXT:    vmovdqa (%eax), %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovdqa 16(%eax), %xmm2
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm4, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; X86-AVX512VLDQ-NEXT:    movzwl 12(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm4, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm5, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm5, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; X86-AVX512VLDQ-NEXT:    movzwl 24(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm4, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm5, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm5, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; X86-AVX512VLDQ-NEXT:    movzwl 8(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm5, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm6, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm6
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm6, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
+; X86-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[4],ymm3[4],ymm4[5],ymm3[5]
+; X86-AVX512VLDQ-NEXT:    movzwl 20(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm4, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm2, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm5, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm5, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; X86-AVX512VLDQ-NEXT:    movzwl 4(%eax), %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm5, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm1, %xmm6
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm6, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm6
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm6, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm5, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpsrld $16, %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm2
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm2, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm5, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpsrld $16, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm0, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[1],ymm4[1],ymm0[4],ymm4[4],ymm0[5],ymm4[5]
+; X86-AVX512VLDQ-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
+; X86-AVX512VLDQ-NEXT:    retl
+;
+; X64-SSE-LABEL: fneg_v16f16:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    subq $88, %rsp
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm1
+; X64-SSE-NEXT:    movdqa %xmm1, (%rsp) # 16-byte Spill
+; X64-SSE-NEXT:    movaps 16(%rdi), %xmm0
+; X64-SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa %xmm1, %xmm0
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrlq $48, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrld $16, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-SSE-NEXT:    punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+; X64-SSE-NEXT:    punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm1 = xmm1[0],mem[0]
+; X64-SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd (%rsp), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    punpckldq (%rsp), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-SSE-NEXT:    movdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrlq $48, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrld $16, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-SSE-NEXT:    punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+; X64-SSE-NEXT:    punpcklqdq (%rsp), %xmm1 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm1 = xmm1[0],mem[0]
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    addq $88, %rsp
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: fneg_v16f16:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    pushq %rbx
+; X64-AVX1-NEXT:    subq $80, %rsp
+; X64-AVX1-NEXT:    movq %rdi, %rbx
+; X64-AVX1-NEXT:    vbroadcastss 28(%rdi), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovaps (%rbx), %xmm0
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa 16(%rbx), %xmm0
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 24(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 20(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 12(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 8(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 4(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
+; X64-AVX1-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    addq $80, %rsp
+; X64-AVX1-NEXT:    popq %rbx
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX2-LABEL: fneg_v16f16:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    pushq %rbx
+; X64-AVX2-NEXT:    subq $128, %rsp
+; X64-AVX2-NEXT:    movq %rdi, %rbx
+; X64-AVX2-NEXT:    vpinsrw $0, 28(%rdi), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX2-NEXT:    vmovdqa %xmm1, (%rsp) # 16-byte Spill
+; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovaps (%rbx), %xmm0
+; X64-AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa 16(%rbx), %xmm0
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 12(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 24(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    vzeroupper
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 8(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; X64-AVX2-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; X64-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 20(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    vzeroupper
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 4(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; X64-AVX2-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vzeroupper
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vxorps (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vxorps (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; X64-AVX2-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; X64-AVX2-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; X64-AVX2-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
+; X64-AVX2-NEXT:    addq $128, %rsp
+; X64-AVX2-NEXT:    popq %rbx
+; X64-AVX2-NEXT:    retq
+;
+; X64-AVX512VL-LABEL: fneg_v16f16:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    movzwl 28(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm0
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm0, %xmm1
+; X64-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X64-AVX512VL-NEXT:    vmovd %xmm1, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X64-AVX512VL-NEXT:    vmovdqa (%rdi), %xmm1
+; X64-AVX512VL-NEXT:    vmovdqa 16(%rdi), %xmm2
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm4, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; X64-AVX512VL-NEXT:    movzwl 12(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm5, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; X64-AVX512VL-NEXT:    movzwl 24(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm5, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; X64-AVX512VL-NEXT:    movzwl 8(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm6, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
+; X64-AVX512VL-NEXT:    vpunpckldq {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[4],ymm3[4],ymm4[5],ymm3[5]
+; X64-AVX512VL-NEXT:    movzwl 20(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VL-NEXT:    vpsrlq $48, %xmm2, %xmm5
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm5, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; X64-AVX512VL-NEXT:    movzwl 4(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VL-NEXT:    vpsrlq $48, %xmm1, %xmm6
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm6, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VL-NEXT:    vpsrld $16, %xmm2, %xmm2
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm2
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm2, %xmm2
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X64-AVX512VL-NEXT:    vmovd %xmm2, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm1, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VL-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VL-NEXT:    vpsrld $16, %xmm1, %xmm1
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm1, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm1
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X64-AVX512VL-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX512VL-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[1],ymm4[1],ymm0[4],ymm4[4],ymm0[5],ymm4[5]
+; X64-AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fneg_v16f16:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX512FP16-NEXT:    vpxor (%rdi), %ymm0, %ymm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fneg_v16f16:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    movzwl 28(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm0
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm0, %xmm1
+; X64-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm1, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X64-AVX512VLDQ-NEXT:    vmovdqa (%rdi), %xmm1
+; X64-AVX512VLDQ-NEXT:    vmovdqa 16(%rdi), %xmm2
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; X64-AVX512VLDQ-NEXT:    movzwl 12(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm5, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm3, %ymm4, %ymm3
+; X64-AVX512VLDQ-NEXT:    movzwl 24(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm5, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; X64-AVX512VLDQ-NEXT:    movzwl 8(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm6, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
+; X64-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[4],ymm3[4],ymm4[5],ymm3[5]
+; X64-AVX512VLDQ-NEXT:    movzwl 20(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm2, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm5, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; X64-AVX512VLDQ-NEXT:    movzwl 4(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm1, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm6, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpsrld $16, %xmm2, %xmm2
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm2
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm2, %xmm2
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm2, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm5, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VLDQ-NEXT:    vpsrld $16, %xmm1, %xmm1
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[1],ymm4[1],ymm0[4],ymm4[4],ymm0[5],ymm4[5]
+; X64-AVX512VLDQ-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
+; X64-AVX512VLDQ-NEXT:    retq
+  %v = load <16 x half>, ptr %p, align 16
+  %nnv = fsub <16 x half> <half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0>, %v
+  ret <16 x half> %nnv
+}
+
+;
+; 512-bit Vectors
+;
+
+define <8 x double> @fneg_v8f64(<8 x double> %p) nounwind {
+; X86-SSE-LABEL: fneg_v8f64:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pushl %ebp
+; X86-SSE-NEXT:    movl %esp, %ebp
+; X86-SSE-NEXT:    andl $-16, %esp
+; X86-SSE-NEXT:    subl $16, %esp
+; X86-SSE-NEXT:    movaps {{.*#+}} xmm3 = [-0.0E+0,-0.0E+0]
+; X86-SSE-NEXT:    xorps %xmm3, %xmm0
+; X86-SSE-NEXT:    xorps %xmm3, %xmm1
+; X86-SSE-NEXT:    xorps %xmm3, %xmm2
+; X86-SSE-NEXT:    xorps 8(%ebp), %xmm3
+; X86-SSE-NEXT:    movl %ebp, %esp
+; X86-SSE-NEXT:    popl %ebp
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: fneg_v8f64:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX-NEXT:    vxorps %ymm2, %ymm0, %ymm0
+; X86-AVX-NEXT:    vxorps %ymm2, %ymm1, %ymm1
+; X86-AVX-NEXT:    retl
+;
+; X86-AVX512VL-LABEL: fneg_v8f64:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vpxorq {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %zmm0, %zmm0
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fneg_v8f64:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %zmm0, %zmm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fneg_v8f64:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}{1to8}, %zmm0, %zmm0
+; X86-AVX512VLDQ-NEXT:    retl
+;
+; X64-SSE-LABEL: fneg_v8f64:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0]
+; X64-SSE-NEXT:    xorps %xmm4, %xmm0
+; X64-SSE-NEXT:    xorps %xmm4, %xmm1
+; X64-SSE-NEXT:    xorps %xmm4, %xmm2
+; X64-SSE-NEXT:    xorps %xmm4, %xmm3
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: fneg_v8f64:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX-NEXT:    vxorps %ymm2, %ymm0, %ymm0
+; X64-AVX-NEXT:    vxorps %ymm2, %ymm1, %ymm1
+; X64-AVX-NEXT:    retq
+;
+; X64-AVX512VL-LABEL: fneg_v8f64:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vpxorq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fneg_v8f64:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fneg_v8f64:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vxorpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
+; X64-AVX512VLDQ-NEXT:    retq
+  %t = fsub <8 x double> <double -0.0, double -0.0, double -0.0, double -0.0, double -0.0, double -0.0, double -0.0, double -0.0>, %p
+  ret <8 x double> %t
+}
+
+define <16 x float> @fneg_v16f32(<16 x float> %p) nounwind {
+; X86-SSE-LABEL: fneg_v16f32:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pushl %ebp
+; X86-SSE-NEXT:    movl %esp, %ebp
+; X86-SSE-NEXT:    andl $-16, %esp
+; X86-SSE-NEXT:    subl $16, %esp
+; X86-SSE-NEXT:    movaps {{.*#+}} xmm3 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-SSE-NEXT:    xorps %xmm3, %xmm0
+; X86-SSE-NEXT:    xorps %xmm3, %xmm1
+; X86-SSE-NEXT:    xorps %xmm3, %xmm2
+; X86-SSE-NEXT:    xorps 8(%ebp), %xmm3
+; X86-SSE-NEXT:    movl %ebp, %esp
+; X86-SSE-NEXT:    popl %ebp
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: fneg_v16f32:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vbroadcastss {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX-NEXT:    vxorps %ymm2, %ymm0, %ymm0
+; X86-AVX-NEXT:    vxorps %ymm2, %ymm1, %ymm1
+; X86-AVX-NEXT:    retl
+;
+; X86-AVX512VL-LABEL: fneg_v16f32:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    vpxord {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fneg_v16f32:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fneg_v16f32:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
+; X86-AVX512VLDQ-NEXT:    retl
+;
+; X64-SSE-LABEL: fneg_v16f32:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movaps {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-SSE-NEXT:    xorps %xmm4, %xmm0
+; X64-SSE-NEXT:    xorps %xmm4, %xmm1
+; X64-SSE-NEXT:    xorps %xmm4, %xmm2
+; X64-SSE-NEXT:    xorps %xmm4, %xmm3
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: fneg_v16f32:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vbroadcastss {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX-NEXT:    vxorps %ymm2, %ymm0, %ymm0
+; X64-AVX-NEXT:    vxorps %ymm2, %ymm1, %ymm1
+; X64-AVX-NEXT:    retq
+;
+; X64-AVX512VL-LABEL: fneg_v16f32:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    vpxord {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fneg_v16f32:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fneg_v16f32:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; X64-AVX512VLDQ-NEXT:    retq
+  %t = fsub <16 x float> <float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0>, %p
+  ret <16 x float> %t
+}
+
+define <32 x half> @fneg_v32f16(ptr %p) nounwind {
+; X86-SSE-LABEL: fneg_v32f16:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    subl $644, %esp # imm = 0x284
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movaps (%eax), %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movaps 16(%eax), %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movaps 32(%eax), %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqa 48(%eax), %xmm0
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrlq $48, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrld $16, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrlq $48, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrld $16, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrlq $48, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrld $16, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrlq $48, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    psrld $16, %xmm0
+; X86-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X86-SSE-NEXT:    movw %ax, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __extendhfsf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm0, (%esp)
+; X86-SSE-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    movss %xmm1, (%esp)
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    movdqa %xmm0, %xmm3
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    movdqa %xmm0, %xmm3
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    movdqa %xmm0, %xmm3
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; X86-SSE-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; X86-SSE-NEXT:    movdqu %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT:    movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-SSE-NEXT:    calll __truncsfhf2
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm3 # 16-byte Reload
+; X86-SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; X86-SSE-NEXT:    movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; X86-SSE-NEXT:    movhps {{[-0-9]+}}(%e{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; X86-SSE-NEXT:    # xmm3 = xmm3[0,1],mem[0,1]
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-SSE-NEXT:    movups {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-SSE-NEXT:    addl $644, %esp # imm = 0x284
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX1-LABEL: fneg_v32f16:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    pushl %esi
+; X86-AVX1-NEXT:    subl $644, %esp # imm = 0x284
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-AVX1-NEXT:    vmovdqa 32(%esi), %xmm0
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 36(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 40(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 44(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqa 48(%esi), %xmm0
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 52(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 56(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 60(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqa (%esi), %xmm1
+; X86-AVX1-NEXT:    vmovdqu %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovaps 16(%esi), %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vpsrld $16, %xmm1, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 4(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 8(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 12(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 20(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 24(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vbroadcastss 28(%esi), %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __extendhfsf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vmovss %xmm1, (%esp)
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
+; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X86-AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
+; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm3 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1],xmm3[2],mem[2],xmm3[3],mem[3]
+; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X86-AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; X86-AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%e{{[sb]}}p) # 32-byte Spill
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
+; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; X86-AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX1-NEXT:    vzeroupper
+; X86-AVX1-NEXT:    calll __truncsfhf2
+; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X86-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
+; X86-AVX1-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%e{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
+; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %ymm0 # 32-byte Reload
+; X86-AVX1-NEXT:    addl $644, %esp # imm = 0x284
+; X86-AVX1-NEXT:    popl %esi
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX2-LABEL: fneg_v32f16:
+; X86-AVX2:       # %bb.0:
+; X86-AVX2-NEXT:    pushl %esi
+; X86-AVX2-NEXT:    subl $708, %esp # imm = 0x2C4
+; X86-AVX2-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-AVX2-NEXT:    vmovdqa 32(%esi), %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqa 48(%esi), %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX2-NEXT:    vmovups %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps %xmm1, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 36(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 52(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 40(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 56(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 44(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 60(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqa (%esi), %xmm1
+; X86-AVX2-NEXT:    vmovdqu %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovaps 16(%esi), %xmm0
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vpsrld $16, %xmm1, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 4(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 20(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 8(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 24(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 12(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX2-NEXT:    vpinsrw $0, 28(%esi), %xmm0, %xmm0
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpextrw $0, %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vxorps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __extendhfsf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX2-NEXT:    fstps {{[0-9]+}}(%esp)
+; X86-AVX2-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX2-NEXT:    vpxor {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vmovss %xmm1, (%esp)
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
+; X86-AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm1
+; X86-AVX2-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
+; X86-AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm1
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm3 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1],xmm3[2],mem[2],xmm3[3],mem[3]
+; X86-AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; X86-AVX2-NEXT:    vpunpckldq {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[4],ymm1[4],ymm2[5],ymm1[5]
+; X86-AVX2-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
+; X86-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%e{{[sb]}}p) # 32-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
+; X86-AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm1
+; X86-AVX2-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
+; X86-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%e{{[sb]}}p) # 32-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; X86-AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%e{{[sb]}}p) # 32-byte Spill
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X86-AVX2-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX2-NEXT:    vzeroupper
+; X86-AVX2-NEXT:    calll __truncsfhf2
+; X86-AVX2-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
+; X86-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%e{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X86-AVX2-NEXT:    vpunpckldq {{[-0-9]+}}(%e{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; X86-AVX2-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; X86-AVX2-NEXT:    vpunpcklqdq {{[-0-9]+}}(%e{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; X86-AVX2-NEXT:    # ymm1 = ymm0[0],mem[0],ymm0[2],mem[2]
+; X86-AVX2-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %ymm0 # 32-byte Reload
+; X86-AVX2-NEXT:    addl $708, %esp # imm = 0x2C4
+; X86-AVX2-NEXT:    popl %esi
+; X86-AVX2-NEXT:    retl
+;
+; X86-AVX512VL-LABEL: fneg_v32f16:
+; X86-AVX512VL:       # %bb.0:
+; X86-AVX512VL-NEXT:    subl $128, %esp
+; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VL-NEXT:    movzwl 60(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm0
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vmovdqa 48(%eax), %xmm3
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vmovdqa %xmm3, %xmm4
+; X86-AVX512VL-NEXT:    vmovdqu %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm2, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm2
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vmovd %xmm2, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm2
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X86-AVX512VL-NEXT:    movzwl 44(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm2
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vmovd %xmm2, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm2
+; X86-AVX512VL-NEXT:    vmovdqa 32(%eax), %xmm3
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm5, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm5
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VL-NEXT:    vmovd %xmm5, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm5
+; X86-AVX512VL-NEXT:    movzwl 28(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
+; X86-AVX512VL-NEXT:    vmovdqa (%eax), %xmm1
+; X86-AVX512VL-NEXT:    vmovdqu %xmm1, (%esp) # 16-byte Spill
+; X86-AVX512VL-NEXT:    vmovdqa 16(%eax), %xmm2
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vmovdqu %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm7, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm7
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X86-AVX512VL-NEXT:    vmovd %xmm7, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm7
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; X86-AVX512VL-NEXT:    movzwl 12(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm7
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X86-AVX512VL-NEXT:    vmovd %xmm7, %ecx
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm7, %edx
+; X86-AVX512VL-NEXT:    movzwl %dx, %edx
+; X86-AVX512VL-NEXT:    vmovd %edx, %xmm7
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X86-AVX512VL-NEXT:    vmovd %xmm7, %edx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm7
+; X86-AVX512VL-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm6, %ymm1, %ymm1
+; X86-AVX512VL-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
+; X86-AVX512VL-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 64-byte Spill
+; X86-AVX512VL-NEXT:    movzwl 56(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm6, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm6
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X86-AVX512VL-NEXT:    vmovd %xmm6, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; X86-AVX512VL-NEXT:    movzwl 40(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm3[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vmovdqa %xmm3, %xmm4
+; X86-AVX512VL-NEXT:    vmovdqu %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %edx
+; X86-AVX512VL-NEXT:    movzwl %dx, %edx
+; X86-AVX512VL-NEXT:    vmovd %edx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %edx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
+; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm6, %ymm1, %ymm6
+; X86-AVX512VL-NEXT:    movzwl 24(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %edx
+; X86-AVX512VL-NEXT:    movzwl %dx, %edx
+; X86-AVX512VL-NEXT:    vmovd %edx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %edx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
+; X86-AVX512VL-NEXT:    movzwl 8(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VL-NEXT:    vmovdqu (%esp), %xmm2 # 16-byte Reload
+; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %edx
+; X86-AVX512VL-NEXT:    movzwl %dx, %edx
+; X86-AVX512VL-NEXT:    vmovd %edx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %edx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm5
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm7, %ymm1, %ymm1
+; X86-AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm1
+; X86-AVX512VL-NEXT:    vpunpckldq {{[-0-9]+}}(%e{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
+; X86-AVX512VL-NEXT:    # zmm1 = zmm1[0],mem[0],zmm1[1],mem[1],zmm1[4],mem[4],zmm1[5],mem[5],zmm1[8],mem[8],zmm1[9],mem[9],zmm1[12],mem[12],zmm1[13],mem[13]
+; X86-AVX512VL-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 64-byte Spill
+; X86-AVX512VL-NEXT:    movzwl 52(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm3 # 16-byte Reload
+; X86-AVX512VL-NEXT:    vpsrlq $48, %xmm3, %xmm6
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm6, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm6
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X86-AVX512VL-NEXT:    vmovd %xmm6, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; X86-AVX512VL-NEXT:    movzwl 36(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VL-NEXT:    vpsrlq $48, %xmm4, %xmm1
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %edx
+; X86-AVX512VL-NEXT:    movzwl %dx, %edx
+; X86-AVX512VL-NEXT:    vmovd %edx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %edx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
+; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm6, %ymm1, %ymm6
+; X86-AVX512VL-NEXT:    movzwl 20(%eax), %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VL-NEXT:    vpsrlq $48, {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %edx
+; X86-AVX512VL-NEXT:    movzwl %dx, %edx
+; X86-AVX512VL-NEXT:    vmovd %edx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %edx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
+; X86-AVX512VL-NEXT:    movzwl 4(%eax), %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %eax
+; X86-AVX512VL-NEXT:    vpsrlq $48, %xmm2, %xmm1
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm7, %ymm1, %ymm1
+; X86-AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm6
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm3, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vpsrld $16, %xmm3, %xmm4
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm4, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm4
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VL-NEXT:    vmovd %xmm4, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; X86-AVX512VL-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vpsrld $16, %xmm2, %xmm3
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm3, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm3
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X86-AVX512VL-NEXT:    vmovd %xmm3, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm4, %ymm1, %ymm3
+; X86-AVX512VL-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vpsrld $16, %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm2
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X86-AVX512VL-NEXT:    vmovd %xmm2, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X86-AVX512VL-NEXT:    vmovdqu (%esp), %xmm4 # 16-byte Reload
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm4, %eax
+; X86-AVX512VL-NEXT:    movzwl %ax, %eax
+; X86-AVX512VL-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vmovd %xmm1, %eax
+; X86-AVX512VL-NEXT:    vpsrld $16, %xmm4, %xmm1
+; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %ecx
+; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
+; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X86-AVX512VL-NEXT:    vmovd %xmm0, %eax
+; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; X86-AVX512VL-NEXT:    vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm6[0],zmm0[1],zmm6[1],zmm0[4],zmm6[4],zmm0[5],zmm6[5],zmm0[8],zmm6[8],zmm0[9],zmm6[9],zmm0[12],zmm6[12],zmm0[13],zmm6[13]
+; X86-AVX512VL-NEXT:    vpunpcklqdq {{[-0-9]+}}(%e{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
+; X86-AVX512VL-NEXT:    # zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
+; X86-AVX512VL-NEXT:    addl $128, %esp
+; X86-AVX512VL-NEXT:    retl
+;
+; X86-AVX512FP16-LABEL: fneg_v32f16:
+; X86-AVX512FP16:       # %bb.0:
+; X86-AVX512FP16-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} zmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX512FP16-NEXT:    vpxorq (%eax), %zmm0, %zmm0
+; X86-AVX512FP16-NEXT:    retl
+;
+; X86-AVX512VLDQ-LABEL: fneg_v32f16:
+; X86-AVX512VLDQ:       # %bb.0:
+; X86-AVX512VLDQ-NEXT:    subl $128, %esp
+; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512VLDQ-NEXT:    movzwl 60(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm0
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovdqa 48(%eax), %xmm3
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vmovdqa %xmm3, %xmm4
+; X86-AVX512VLDQ-NEXT:    vmovdqu %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm2
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm2, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm2
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X86-AVX512VLDQ-NEXT:    movzwl 44(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm2
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm2, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm2
+; X86-AVX512VLDQ-NEXT:    vmovdqa 32(%eax), %xmm3
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm5, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm5, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm5
+; X86-AVX512VLDQ-NEXT:    movzwl 28(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
+; X86-AVX512VLDQ-NEXT:    vmovdqa (%eax), %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovdqu %xmm1, (%esp) # 16-byte Spill
+; X86-AVX512VLDQ-NEXT:    vmovdqa 16(%eax), %xmm2
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vmovdqu %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm7, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm7
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm7, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm7
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; X86-AVX512VLDQ-NEXT:    movzwl 12(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm7
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm7, %ecx
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm7, %edx
+; X86-AVX512VLDQ-NEXT:    movzwl %dx, %edx
+; X86-AVX512VLDQ-NEXT:    vmovd %edx, %xmm7
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm7, %edx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm7
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm6, %ymm1, %ymm1
+; X86-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
+; X86-AVX512VLDQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 64-byte Spill
+; X86-AVX512VLDQ-NEXT:    movzwl 56(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm6, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm6
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm6, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; X86-AVX512VLDQ-NEXT:    movzwl 40(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm3[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vmovdqa %xmm3, %xmm4
+; X86-AVX512VLDQ-NEXT:    vmovdqu %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %edx
+; X86-AVX512VLDQ-NEXT:    movzwl %dx, %edx
+; X86-AVX512VLDQ-NEXT:    vmovd %edx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %edx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
+; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm6, %ymm1, %ymm6
+; X86-AVX512VLDQ-NEXT:    movzwl 24(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %edx
+; X86-AVX512VLDQ-NEXT:    movzwl %dx, %edx
+; X86-AVX512VLDQ-NEXT:    vmovd %edx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %edx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
+; X86-AVX512VLDQ-NEXT:    movzwl 8(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovdqu (%esp), %xmm2 # 16-byte Reload
+; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %edx
+; X86-AVX512VLDQ-NEXT:    movzwl %dx, %edx
+; X86-AVX512VLDQ-NEXT:    vmovd %edx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %edx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm7, %ymm1, %ymm1
+; X86-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm1
+; X86-AVX512VLDQ-NEXT:    vpunpckldq {{[-0-9]+}}(%e{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
+; X86-AVX512VLDQ-NEXT:    # zmm1 = zmm1[0],mem[0],zmm1[1],mem[1],zmm1[4],mem[4],zmm1[5],mem[5],zmm1[8],mem[8],zmm1[9],mem[9],zmm1[12],mem[12],zmm1[13],mem[13]
+; X86-AVX512VLDQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 64-byte Spill
+; X86-AVX512VLDQ-NEXT:    movzwl 52(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm3 # 16-byte Reload
+; X86-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm3, %xmm6
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm6, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm6
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm6, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; X86-AVX512VLDQ-NEXT:    movzwl 36(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm4, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %edx
+; X86-AVX512VLDQ-NEXT:    movzwl %dx, %edx
+; X86-AVX512VLDQ-NEXT:    vmovd %edx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %edx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
+; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm6, %ymm1, %ymm6
+; X86-AVX512VLDQ-NEXT:    movzwl 20(%eax), %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VLDQ-NEXT:    vpsrlq $48, {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %edx
+; X86-AVX512VLDQ-NEXT:    movzwl %dx, %edx
+; X86-AVX512VLDQ-NEXT:    vmovd %edx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %edx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
+; X86-AVX512VLDQ-NEXT:    movzwl 4(%eax), %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %eax
+; X86-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm2, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
+; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm7, %ymm1, %ymm1
+; X86-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm6
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm3, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpsrld $16, %xmm3, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; X86-AVX512VLDQ-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpsrld $16, %xmm2, %xmm3
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm3, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm3
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm3, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm4, %ymm1, %ymm3
+; X86-AVX512VLDQ-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpsrld $16, %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm2
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm2, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; X86-AVX512VLDQ-NEXT:    vmovdqu (%esp), %xmm4 # 16-byte Reload
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %eax
+; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %eax
+; X86-AVX512VLDQ-NEXT:    vpsrld $16, %xmm4, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %ecx
+; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
+; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X86-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
+; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X86-AVX512VLDQ-NEXT:    vmovd %xmm0, %eax
+; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; X86-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; X86-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm6[0],zmm0[1],zmm6[1],zmm0[4],zmm6[4],zmm0[5],zmm6[5],zmm0[8],zmm6[8],zmm0[9],zmm6[9],zmm0[12],zmm6[12],zmm0[13],zmm6[13]
+; X86-AVX512VLDQ-NEXT:    vpunpcklqdq {{[-0-9]+}}(%e{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
+; X86-AVX512VLDQ-NEXT:    # zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
+; X86-AVX512VLDQ-NEXT:    addl $128, %esp
+; X86-AVX512VLDQ-NEXT:    retl
+;
+; X64-SSE-LABEL: fneg_v32f16:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    subq $120, %rsp
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm1
+; X64-SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps 16(%rdi), %xmm0
+; X64-SSE-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; X64-SSE-NEXT:    movaps 32(%rdi), %xmm0
+; X64-SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps 48(%rdi), %xmm0
+; X64-SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa %xmm1, %xmm0
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrlq $48, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrld $16, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-SSE-NEXT:    punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+; X64-SSE-NEXT:    punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm1 = xmm1[0],mem[0]
+; X64-SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrlq $48, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrld $16, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-SSE-NEXT:    punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+; X64-SSE-NEXT:    punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm1 = xmm1[0],mem[0]
+; X64-SSE-NEXT:    movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd (%rsp), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    punpckldq (%rsp), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrlq $48, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd (%rsp), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movaps %xmm0, (%rsp) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrld $16, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa (%rsp), %xmm1 # 16-byte Reload
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-SSE-NEXT:    punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+; X64-SSE-NEXT:    punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm1 = xmm1[0],mem[0]
+; X64-SSE-NEXT:    movdqa %xmm1, (%rsp) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    punpckhqdq {{.*#+}} xmm0 = xmm0[1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrlq $48, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; X64-SSE-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    xorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    psrld $16, %xmm0
+; X64-SSE-NEXT:    callq __extendhfsf2 at PLT
+; X64-SSE-NEXT:    pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT:    callq __truncsfhf2 at PLT
+; X64-SSE-NEXT:    movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; X64-SSE-NEXT:    punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; X64-SSE-NEXT:    punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1]
+; X64-SSE-NEXT:    punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; X64-SSE-NEXT:    # xmm3 = xmm3[0],mem[0]
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-SSE-NEXT:    movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-SSE-NEXT:    movaps (%rsp), %xmm2 # 16-byte Reload
+; X64-SSE-NEXT:    addq $120, %rsp
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: fneg_v32f16:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    pushq %rbx
+; X64-AVX1-NEXT:    subq $128, %rsp
+; X64-AVX1-NEXT:    movq %rdi, %rbx
+; X64-AVX1-NEXT:    vbroadcastss 28(%rdi), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovaps (%rbx), %xmm0
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa 16(%rbx), %xmm1
+; X64-AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovaps 32(%rbx), %xmm0
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovaps 48(%rbx), %xmm0
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 24(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 20(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 12(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 8(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 4(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
+; X64-AVX1-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 60(%rbx), %xmm0
+; X64-AVX1-NEXT:    vzeroupper
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 56(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 52(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 44(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 40(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vbroadcastss 36(%rbx), %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX1-NEXT:    vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX1-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; X64-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
+; X64-AVX1-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
+; X64-AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; X64-AVX1-NEXT:    addq $128, %rsp
+; X64-AVX1-NEXT:    popq %rbx
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX2-LABEL: fneg_v32f16:
+; X64-AVX2:       # %bb.0:
+; X64-AVX2-NEXT:    pushq %rbx
+; X64-AVX2-NEXT:    subq $192, %rsp
+; X64-AVX2-NEXT:    movq %rdi, %rbx
+; X64-AVX2-NEXT:    vpinsrw $0, 28(%rdi), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX2-NEXT:    vmovdqa %xmm1, (%rsp) # 16-byte Spill
+; X64-AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovaps (%rbx), %xmm0
+; X64-AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa 16(%rbx), %xmm1
+; X64-AVX2-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovaps 32(%rbx), %xmm0
+; X64-AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovaps 48(%rbx), %xmm0
+; X64-AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 12(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 24(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    vzeroupper
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 8(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; X64-AVX2-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; X64-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 20(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    vzeroupper
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 4(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; X64-AVX2-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vzeroupper
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vxorps (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vxorps (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; X64-AVX2-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; X64-AVX2-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; X64-AVX2-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
+; X64-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 60(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    vzeroupper
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 44(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 56(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    vzeroupper
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 40(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; X64-AVX2-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; X64-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 52(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    vzeroupper
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vpinsrw $0, 36(%rbx), %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrlq $48, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; X64-AVX2-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vzeroupper
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vxorps (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vxorps (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; X64-AVX2-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-AVX2-NEXT:    callq __extendhfsf2 at PLT
+; X64-AVX2-NEXT:    vpxor (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    callq __truncsfhf2 at PLT
+; X64-AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; X64-AVX2-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; X64-AVX2-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; X64-AVX2-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; X64-AVX2-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; X64-AVX2-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; X64-AVX2-NEXT:    # ymm1 = ymm0[0],mem[0],ymm0[2],mem[2]
+; X64-AVX2-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; X64-AVX2-NEXT:    addq $192, %rsp
+; X64-AVX2-NEXT:    popq %rbx
+; X64-AVX2-NEXT:    retq
+;
+; X64-AVX512VL-LABEL: fneg_v32f16:
+; X64-AVX512VL:       # %bb.0:
+; X64-AVX512VL-NEXT:    movzwl 60(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm0
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm0, %xmm1
+; X64-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X64-AVX512VL-NEXT:    vmovd %xmm1, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VL-NEXT:    vmovdqa (%rdi), %xmm1
+; X64-AVX512VL-NEXT:    vmovdqa 16(%rdi), %xmm2
+; X64-AVX512VL-NEXT:    vmovdqa 32(%rdi), %xmm3
+; X64-AVX512VL-NEXT:    vmovdqa 48(%rdi), %xmm4
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm4[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm6, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; X64-AVX512VL-NEXT:    movzwl 44(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm7, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm5, %ymm6, %ymm5
+; X64-AVX512VL-NEXT:    movzwl 28(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm7, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; X64-AVX512VL-NEXT:    movzwl 12(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm8, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm6, %ymm7, %ymm6
+; X64-AVX512VL-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm5
+; X64-AVX512VL-NEXT:    movzwl 56(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm7, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; X64-AVX512VL-NEXT:    movzwl 40(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm3[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm8, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm6, %ymm7, %ymm6
+; X64-AVX512VL-NEXT:    movzwl 24(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm8, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; X64-AVX512VL-NEXT:    movzwl 8(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm9, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm9
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm9, %xmm9
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm9, %xmm9
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm9, %xmm9
+; X64-AVX512VL-NEXT:    vmovd %xmm9, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm9
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm7
+; X64-AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm6
+; X64-AVX512VL-NEXT:    vpunpckldq {{.*#+}} zmm5 = zmm6[0],zmm5[0],zmm6[1],zmm5[1],zmm6[4],zmm5[4],zmm6[5],zmm5[5],zmm6[8],zmm5[8],zmm6[9],zmm5[9],zmm6[12],zmm5[12],zmm6[13],zmm5[13]
+; X64-AVX512VL-NEXT:    movzwl 52(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VL-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VL-NEXT:    vpsrlq $48, %xmm4, %xmm7
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm7, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; X64-AVX512VL-NEXT:    movzwl 36(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VL-NEXT:    vpsrlq $48, %xmm3, %xmm8
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm8, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm6, %ymm7, %ymm6
+; X64-AVX512VL-NEXT:    movzwl 20(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VL-NEXT:    vpsrlq $48, %xmm2, %xmm8
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm8, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; X64-AVX512VL-NEXT:    movzwl 4(%rdi), %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VL-NEXT:    vpsrlq $48, %xmm1, %xmm9
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm9, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm9
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm9, %xmm9
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm9, %xmm9
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm9, %xmm9
+; X64-AVX512VL-NEXT:    vmovd %xmm9, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm9
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm7
+; X64-AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm6
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm4, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VL-NEXT:    vpsrld $16, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm4, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm3, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VL-NEXT:    vpsrld $16, %xmm3, %xmm3
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm3, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm3
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X64-AVX512VL-NEXT:    vmovd %xmm3, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
+; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm4, %ymm3, %ymm3
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VL-NEXT:    vpsrld $16, %xmm2, %xmm2
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm2
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm2, %xmm2
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X64-AVX512VL-NEXT:    vmovd %xmm2, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm1, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VL-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VL-NEXT:    vpsrld $16, %xmm1, %xmm1
+; X64-AVX512VL-NEXT:    vpextrw $0, %xmm1, %eax
+; X64-AVX512VL-NEXT:    movzwl %ax, %eax
+; X64-AVX512VL-NEXT:    vmovd %eax, %xmm1
+; X64-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X64-AVX512VL-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X64-AVX512VL-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; X64-AVX512VL-NEXT:    vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm6[0],zmm0[1],zmm6[1],zmm0[4],zmm6[4],zmm0[5],zmm6[5],zmm0[8],zmm6[8],zmm0[9],zmm6[9],zmm0[12],zmm6[12],zmm0[13],zmm6[13]
+; X64-AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm5[0],zmm0[2],zmm5[2],zmm0[4],zmm5[4],zmm0[6],zmm5[6]
+; X64-AVX512VL-NEXT:    retq
+;
+; X64-AVX512FP16-LABEL: fneg_v32f16:
+; X64-AVX512FP16:       # %bb.0:
+; X64-AVX512FP16-NEXT:    vpbroadcastw {{.*#+}} zmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX512FP16-NEXT:    vpxorq (%rdi), %zmm0, %zmm0
+; X64-AVX512FP16-NEXT:    retq
+;
+; X64-AVX512VLDQ-LABEL: fneg_v32f16:
+; X64-AVX512VLDQ:       # %bb.0:
+; X64-AVX512VLDQ-NEXT:    movzwl 60(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm0
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm0, %xmm1
+; X64-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm1
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm1, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
+; X64-AVX512VLDQ-NEXT:    vmovdqa (%rdi), %xmm1
+; X64-AVX512VLDQ-NEXT:    vmovdqa 16(%rdi), %xmm2
+; X64-AVX512VLDQ-NEXT:    vmovdqa 32(%rdi), %xmm3
+; X64-AVX512VLDQ-NEXT:    vmovdqa 48(%rdi), %xmm4
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm4[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm6, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; X64-AVX512VLDQ-NEXT:    movzwl 44(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm5, %ymm6, %ymm5
+; X64-AVX512VLDQ-NEXT:    movzwl 28(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; X64-AVX512VLDQ-NEXT:    movzwl 12(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm8, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm6, %ymm7, %ymm6
+; X64-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm5
+; X64-AVX512VLDQ-NEXT:    movzwl 56(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; X64-AVX512VLDQ-NEXT:    movzwl 40(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm3[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm8, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm6, %ymm7, %ymm6
+; X64-AVX512VLDQ-NEXT:    movzwl 24(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm8, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; X64-AVX512VLDQ-NEXT:    movzwl 8(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm9, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm9
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm9, %xmm9
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm9, %xmm9
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm9, %xmm9
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm9, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm9
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm7
+; X64-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm6
+; X64-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} zmm5 = zmm6[0],zmm5[0],zmm6[1],zmm5[1],zmm6[4],zmm5[4],zmm6[5],zmm5[5],zmm6[8],zmm5[8],zmm6[9],zmm5[9],zmm6[12],zmm5[12],zmm6[13],zmm5[13]
+; X64-AVX512VLDQ-NEXT:    movzwl 52(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm6, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
+; X64-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm4, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; X64-AVX512VLDQ-NEXT:    movzwl 36(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm3, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm8, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm6, %ymm7, %ymm6
+; X64-AVX512VLDQ-NEXT:    movzwl 20(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm2, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm8, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; X64-AVX512VLDQ-NEXT:    movzwl 4(%rdi), %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
+; X64-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm1, %xmm9
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm9, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm9
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm9, %xmm9
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm9, %xmm9
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm9, %xmm9
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm9, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm9
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm7
+; X64-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm6
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpsrld $16, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm3, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
+; X64-AVX512VLDQ-NEXT:    vpsrld $16, %xmm3, %xmm3
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm3, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm3
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm3, %xmm3
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm3, %xmm3
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm3, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
+; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm4, %ymm3, %ymm3
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpsrld $16, %xmm2, %xmm2
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm2
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm2, %xmm2
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm2, %xmm2
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm2, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
+; X64-AVX512VLDQ-NEXT:    vpsrld $16, %xmm1, %xmm1
+; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %eax
+; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
+; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
+; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
+; X64-AVX512VLDQ-NEXT:    vpxor %xmm0, %xmm1, %xmm0
+; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X64-AVX512VLDQ-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; X64-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm6[0],zmm0[1],zmm6[1],zmm0[4],zmm6[4],zmm0[5],zmm6[5],zmm0[8],zmm6[8],zmm0[9],zmm6[9],zmm0[12],zmm6[12],zmm0[13],zmm6[13]
+; X64-AVX512VLDQ-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm5[0],zmm0[2],zmm5[2],zmm0[4],zmm5[4],zmm0[6],zmm5[6]
+; X64-AVX512VLDQ-NEXT:    retq
+  %v = load <32 x half>, ptr %p, align 16
+  %nnv = fsub <32 x half> <half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0, half -0.0>, %v
+  ret <32 x half> %nnv
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; X64: {{.*}}
+; X64-AVX512: {{.*}}
+; X86: {{.*}}
+; X86-AVX512: {{.*}}


        


More information about the llvm-commits mailing list