[clang-tools-extra] [llvm] [clang] [ISel] Add pattern matching for depositing subreg value (PR #75978)

David Li via cfe-commits cfe-commits at lists.llvm.org
Wed Dec 20 11:27:38 PST 2023


https://github.com/david-xl updated https://github.com/llvm/llvm-project/pull/75978

>From 4e37d0645974b3555b79641db89f4dc40f30c78c Mon Sep 17 00:00:00 2001
From: David Li <davidxl at google.com>
Date: Mon, 27 Nov 2023 12:17:42 -0800
Subject: [PATCH 1/2] Enable custom lowering of fabs_v16f16 with AVX and
 fabs_v32f16 with AVX512f

---
 llvm/lib/Target/X86/X86ISelLowering.cpp |    4 +-
 llvm/test/CodeGen/X86/vec_fabs.ll       | 2305 +----------------------
 2 files changed, 26 insertions(+), 2283 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index d0e51301945ecb..7cfd48e283b47a 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1596,8 +1596,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
       setOperationAction(ISD::STORE,              VT, Custom);
     }
     setF16Action(MVT::v16f16, Expand);
-    if (Subtarget.hasAVX2())
-      setOperationAction(ISD::FABS, MVT::v16f16, Custom);
+    setOperationAction(ISD::FABS, MVT::v16f16, Custom);
     setOperationAction(ISD::FADD, MVT::v16f16, Expand);
     setOperationAction(ISD::FSUB, MVT::v16f16, Expand);
     setOperationAction(ISD::FMUL, MVT::v16f16, Expand);
@@ -2054,6 +2053,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
       for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
         setOperationAction(ISD::CTPOP, VT, Legal);
     }
+    setOperationAction(ISD::FABS, MVT::v32f16, Custom);
   }
 
   // This block control legalization of v32i1/v64i1 which are available with
diff --git a/llvm/test/CodeGen/X86/vec_fabs.ll b/llvm/test/CodeGen/X86/vec_fabs.ll
index 7e915c9ee04076..ec02dfda30c850 100644
--- a/llvm/test/CodeGen/X86/vec_fabs.ll
+++ b/llvm/test/CodeGen/X86/vec_fabs.ll
@@ -282,235 +282,9 @@ declare <8 x float> @llvm.fabs.v8f32(<8 x float> %p)
 define <16 x half> @fabs_v16f16(ptr %p) {
 ; X86-AVX1-LABEL: fabs_v16f16:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    pushl %esi
-; X86-AVX1-NEXT:    .cfi_def_cfa_offset 8
-; X86-AVX1-NEXT:    subl $308, %esp # imm = 0x134
-; X86-AVX1-NEXT:    .cfi_def_cfa_offset 316
-; X86-AVX1-NEXT:    .cfi_offset %esi, -8
-; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-AVX1-NEXT:    vmovdqa (%esi), %xmm0
-; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovaps 16(%esi), %xmm1
-; X86-AVX1-NEXT:    vmovups %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 4(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 8(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 12(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 20(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 24(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 28(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm1, (%esp)
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
-; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
-; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X86-AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
-; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X86-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X86-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
-; X86-AVX1-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%e{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    addl $308, %esp # imm = 0x134
-; X86-AVX1-NEXT:    .cfi_def_cfa_offset 8
-; X86-AVX1-NEXT:    popl %esi
-; X86-AVX1-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX1-NEXT:    vmovaps (%eax), %ymm0
+; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: fabs_v16f16:
@@ -529,135 +303,8 @@ define <16 x half> @fabs_v16f16(ptr %p) {
 ;
 ; X64-AVX1-LABEL: fabs_v16f16:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    pushq %rbx
-; X64-AVX1-NEXT:    .cfi_def_cfa_offset 16
-; X64-AVX1-NEXT:    subq $80, %rsp
-; X64-AVX1-NEXT:    .cfi_def_cfa_offset 96
-; X64-AVX1-NEXT:    .cfi_offset %rbx, -16
-; X64-AVX1-NEXT:    movq %rdi, %rbx
-; X64-AVX1-NEXT:    vbroadcastss 28(%rdi), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovaps (%rbx), %xmm0
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa 16(%rbx), %xmm0
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 24(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 20(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X64-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 12(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 8(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 4(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X64-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
-; X64-AVX1-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    addq $80, %rsp
-; X64-AVX1-NEXT:    .cfi_def_cfa_offset 16
-; X64-AVX1-NEXT:    popq %rbx
-; X64-AVX1-NEXT:    .cfi_def_cfa_offset 8
+; X64-AVX1-NEXT:    vmovaps (%rdi), %ymm0
+; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: fabs_v16f16:
@@ -782,455 +429,10 @@ declare <16 x float> @llvm.fabs.v16f32(<16 x float> %p)
 define <32 x half> @fabs_v32f16(ptr %p) {
 ; X86-AVX1-LABEL: fabs_v32f16:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    pushl %esi
-; X86-AVX1-NEXT:    .cfi_def_cfa_offset 8
-; X86-AVX1-NEXT:    subl $644, %esp # imm = 0x284
-; X86-AVX1-NEXT:    .cfi_def_cfa_offset 652
-; X86-AVX1-NEXT:    .cfi_offset %esi, -8
-; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-AVX1-NEXT:    vmovdqa 32(%esi), %xmm0
-; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 36(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 40(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 44(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqa 48(%esi), %xmm0
-; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 52(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 56(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 60(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqa (%esi), %xmm1
-; X86-AVX1-NEXT:    vmovdqu %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovaps 16(%esi), %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vpsrld $16, %xmm1, %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 4(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 8(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 12(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 20(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 24(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vbroadcastss 28(%esi), %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vpextrw $0, %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __extendhfsf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX1-NEXT:    fstps {{[0-9]+}}(%esp)
-; X86-AVX1-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
-; X86-AVX1-NEXT:    vmovss %xmm1, (%esp)
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
-; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
-; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X86-AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
-; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm3 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1],xmm3[2],mem[2],xmm3[3],mem[3]
-; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; X86-AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; X86-AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; X86-AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%e{{[sb]}}p) # 32-byte Spill
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
-; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3]
-; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X86-AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
-; X86-AVX1-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; X86-AVX1-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX1-NEXT:    vzeroupper
-; X86-AVX1-NEXT:    calll __truncsfhf2
-; X86-AVX1-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
-; X86-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X86-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X86-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%e{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
-; X86-AVX1-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%e{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
-; X86-AVX1-NEXT:    vmovups {{[-0-9]+}}(%e{{[sb]}}p), %ymm0 # 32-byte Reload
-; X86-AVX1-NEXT:    addl $644, %esp # imm = 0x284
-; X86-AVX1-NEXT:    .cfi_def_cfa_offset 8
-; X86-AVX1-NEXT:    popl %esi
-; X86-AVX1-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX1-NEXT:    vbroadcastss {{.*#+}} ymm1 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
+; X86-AVX1-NEXT:    vandps (%eax), %ymm1, %ymm0
+; X86-AVX1-NEXT:    vandps 32(%eax), %ymm1, %ymm1
 ; X86-AVX1-NEXT:    retl
 ;
 ; X86-AVX2-LABEL: fabs_v32f16:
@@ -1243,322 +445,10 @@ define <32 x half> @fabs_v32f16(ptr %p) {
 ;
 ; X86-AVX512VL-LABEL: fabs_v32f16:
 ; X86-AVX512VL:       # %bb.0:
-; X86-AVX512VL-NEXT:    subl $128, %esp
-; X86-AVX512VL-NEXT:    .cfi_def_cfa_offset 132
 ; X86-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512VL-NEXT:    movzwl 60(%eax), %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm0
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [NaN,NaN,NaN,NaN]
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vmovdqa 48(%eax), %xmm3
-; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VL-NEXT:    vmovdqa %xmm3, %xmm4
-; X86-AVX512VL-NEXT:    vmovdqu %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm2, %ecx
-; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm2
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm2, %xmm2
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm2, %xmm2
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; X86-AVX512VL-NEXT:    vmovd %xmm2, %ecx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm2
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X86-AVX512VL-NEXT:    movzwl 44(%eax), %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm2
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm2, %xmm2
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm2, %xmm2
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; X86-AVX512VL-NEXT:    vmovd %xmm2, %ecx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm2
-; X86-AVX512VL-NEXT:    vmovdqa 32(%eax), %xmm3
-; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm5, %ecx
-; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm5
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm5, %xmm5
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm5, %xmm5
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
-; X86-AVX512VL-NEXT:    vmovd %xmm5, %ecx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm5
-; X86-AVX512VL-NEXT:    movzwl 28(%eax), %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
-; X86-AVX512VL-NEXT:    vmovdqa (%eax), %xmm1
-; X86-AVX512VL-NEXT:    vmovdqu %xmm1, (%esp) # 16-byte Spill
-; X86-AVX512VL-NEXT:    vmovdqa 16(%eax), %xmm2
-; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VL-NEXT:    vmovdqu %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm7, %ecx
-; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm7
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X86-AVX512VL-NEXT:    vmovd %xmm7, %ecx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm7
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; X86-AVX512VL-NEXT:    movzwl 12(%eax), %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm7
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X86-AVX512VL-NEXT:    vmovd %xmm7, %ecx
-; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm7, %edx
-; X86-AVX512VL-NEXT:    movzwl %dx, %edx
-; X86-AVX512VL-NEXT:    vmovd %edx, %xmm7
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X86-AVX512VL-NEXT:    vmovd %xmm7, %edx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm7
-; X86-AVX512VL-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
-; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm6, %ymm1, %ymm1
-; X86-AVX512VL-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
-; X86-AVX512VL-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 64-byte Spill
-; X86-AVX512VL-NEXT:    movzwl 56(%eax), %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm6, %ecx
-; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm6
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X86-AVX512VL-NEXT:    vmovd %xmm6, %ecx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; X86-AVX512VL-NEXT:    movzwl 40(%eax), %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm3[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VL-NEXT:    vmovdqa %xmm3, %xmm4
-; X86-AVX512VL-NEXT:    vmovdqu %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %edx
-; X86-AVX512VL-NEXT:    movzwl %dx, %edx
-; X86-AVX512VL-NEXT:    vmovd %edx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %edx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
-; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm6, %ymm1, %ymm6
-; X86-AVX512VL-NEXT:    movzwl 24(%eax), %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %edx
-; X86-AVX512VL-NEXT:    movzwl %dx, %edx
-; X86-AVX512VL-NEXT:    vmovd %edx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %edx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
-; X86-AVX512VL-NEXT:    movzwl 8(%eax), %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VL-NEXT:    vmovdqu (%esp), %xmm2 # 16-byte Reload
-; X86-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %edx
-; X86-AVX512VL-NEXT:    movzwl %dx, %edx
-; X86-AVX512VL-NEXT:    vmovd %edx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %edx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm5
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm7, %ymm1, %ymm1
-; X86-AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm1
-; X86-AVX512VL-NEXT:    vpunpckldq {{[-0-9]+}}(%e{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
-; X86-AVX512VL-NEXT:    # zmm1 = zmm1[0],mem[0],zmm1[1],mem[1],zmm1[4],mem[4],zmm1[5],mem[5],zmm1[8],mem[8],zmm1[9],mem[9],zmm1[12],mem[12],zmm1[13],mem[13]
-; X86-AVX512VL-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 64-byte Spill
-; X86-AVX512VL-NEXT:    movzwl 52(%eax), %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm3 # 16-byte Reload
-; X86-AVX512VL-NEXT:    vpsrlq $48, %xmm3, %xmm6
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm6, %ecx
-; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm6
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X86-AVX512VL-NEXT:    vmovd %xmm6, %ecx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; X86-AVX512VL-NEXT:    movzwl 36(%eax), %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VL-NEXT:    vpsrlq $48, %xmm4, %xmm1
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %edx
-; X86-AVX512VL-NEXT:    movzwl %dx, %edx
-; X86-AVX512VL-NEXT:    vmovd %edx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %edx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
-; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm6, %ymm1, %ymm6
-; X86-AVX512VL-NEXT:    movzwl 20(%eax), %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VL-NEXT:    vpsrlq $48, {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %edx
-; X86-AVX512VL-NEXT:    movzwl %dx, %edx
-; X86-AVX512VL-NEXT:    vmovd %edx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %edx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
-; X86-AVX512VL-NEXT:    movzwl 4(%eax), %eax
-; X86-AVX512VL-NEXT:    vmovd %eax, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %eax
-; X86-AVX512VL-NEXT:    vpsrlq $48, %xmm2, %xmm1
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %ecx
-; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm7, %ymm1, %ymm1
-; X86-AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm6
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm3, %eax
-; X86-AVX512VL-NEXT:    movzwl %ax, %eax
-; X86-AVX512VL-NEXT:    vmovd %eax, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %eax
-; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vpsrld $16, %xmm3, %xmm4
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm4, %eax
-; X86-AVX512VL-NEXT:    movzwl %ax, %eax
-; X86-AVX512VL-NEXT:    vmovd %eax, %xmm4
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm4, %xmm4
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; X86-AVX512VL-NEXT:    vmovd %xmm4, %eax
-; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; X86-AVX512VL-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
-; X86-AVX512VL-NEXT:    movzwl %ax, %eax
-; X86-AVX512VL-NEXT:    vmovd %eax, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %eax
-; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vpsrld $16, %xmm2, %xmm3
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm3, %eax
-; X86-AVX512VL-NEXT:    movzwl %ax, %eax
-; X86-AVX512VL-NEXT:    vmovd %eax, %xmm3
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm3, %xmm3
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm3, %xmm3
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; X86-AVX512VL-NEXT:    vmovd %xmm3, %eax
-; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm4, %ymm1, %ymm3
-; X86-AVX512VL-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
-; X86-AVX512VL-NEXT:    movzwl %ax, %eax
-; X86-AVX512VL-NEXT:    vmovd %eax, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %eax
-; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vpsrld $16, %xmm2, %xmm2
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
-; X86-AVX512VL-NEXT:    movzwl %ax, %eax
-; X86-AVX512VL-NEXT:    vmovd %eax, %xmm2
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm2, %xmm2
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm2, %xmm2
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; X86-AVX512VL-NEXT:    vmovd %xmm2, %eax
-; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X86-AVX512VL-NEXT:    vmovdqu (%esp), %xmm4 # 16-byte Reload
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm4, %eax
-; X86-AVX512VL-NEXT:    movzwl %ax, %eax
-; X86-AVX512VL-NEXT:    vmovd %eax, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vmovd %xmm1, %eax
-; X86-AVX512VL-NEXT:    vpsrld $16, %xmm4, %xmm1
-; X86-AVX512VL-NEXT:    vpextrw $0, %xmm1, %ecx
-; X86-AVX512VL-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VL-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
-; X86-AVX512VL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; X86-AVX512VL-NEXT:    vmovd %xmm0, %eax
-; X86-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; X86-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X86-AVX512VL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; X86-AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; X86-AVX512VL-NEXT:    vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm6[0],zmm0[1],zmm6[1],zmm0[4],zmm6[4],zmm0[5],zmm6[5],zmm0[8],zmm6[8],zmm0[9],zmm6[9],zmm0[12],zmm6[12],zmm0[13],zmm6[13]
-; X86-AVX512VL-NEXT:    vpunpcklqdq {{[-0-9]+}}(%e{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
-; X86-AVX512VL-NEXT:    # zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
-; X86-AVX512VL-NEXT:    addl $128, %esp
-; X86-AVX512VL-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX512VL-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
+; X86-AVX512VL-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; X86-AVX512VL-NEXT:    vpandq (%eax), %zmm0, %zmm0
 ; X86-AVX512VL-NEXT:    retl
 ;
 ; X86-AVX512FP16-LABEL: fabs_v32f16:
@@ -1570,578 +460,17 @@ define <32 x half> @fabs_v32f16(ptr %p) {
 ;
 ; X86-AVX512VLDQ-LABEL: fabs_v32f16:
 ; X86-AVX512VLDQ:       # %bb.0:
-; X86-AVX512VLDQ-NEXT:    subl $128, %esp
-; X86-AVX512VLDQ-NEXT:    .cfi_def_cfa_offset 132
 ; X86-AVX512VLDQ-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX512VLDQ-NEXT:    movzwl 60(%eax), %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm0
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [NaN,NaN,NaN,NaN]
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovdqa 48(%eax), %xmm3
-; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VLDQ-NEXT:    vmovdqa %xmm3, %xmm4
-; X86-AVX512VLDQ-NEXT:    vmovdqu %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %ecx
-; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm2
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm2, %xmm2
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm2, %xmm2
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm2, %ecx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm2
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X86-AVX512VLDQ-NEXT:    movzwl 44(%eax), %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm2
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm2, %xmm2
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm2, %xmm2
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm2, %ecx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm2
-; X86-AVX512VLDQ-NEXT:    vmovdqa 32(%eax), %xmm3
-; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm5, %ecx
-; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm5
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm5, %xmm5
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm5, %xmm5
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm5, %ecx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm5
-; X86-AVX512VLDQ-NEXT:    movzwl 28(%eax), %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
-; X86-AVX512VLDQ-NEXT:    vmovdqa (%eax), %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovdqu %xmm1, (%esp) # 16-byte Spill
-; X86-AVX512VLDQ-NEXT:    vmovdqa 16(%eax), %xmm2
-; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VLDQ-NEXT:    vmovdqu %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm7, %ecx
-; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm7
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm7, %ecx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm7
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; X86-AVX512VLDQ-NEXT:    movzwl 12(%eax), %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm7
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm7, %ecx
-; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm7, %edx
-; X86-AVX512VLDQ-NEXT:    movzwl %dx, %edx
-; X86-AVX512VLDQ-NEXT:    vmovd %edx, %xmm7
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm7, %edx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm7
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
-; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm6, %ymm1, %ymm1
-; X86-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
-; X86-AVX512VLDQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 64-byte Spill
-; X86-AVX512VLDQ-NEXT:    movzwl 56(%eax), %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm6, %ecx
-; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm6
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm6, %ecx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; X86-AVX512VLDQ-NEXT:    movzwl 40(%eax), %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm3[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VLDQ-NEXT:    vmovdqa %xmm3, %xmm4
-; X86-AVX512VLDQ-NEXT:    vmovdqu %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %edx
-; X86-AVX512VLDQ-NEXT:    movzwl %dx, %edx
-; X86-AVX512VLDQ-NEXT:    vmovd %edx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %edx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
-; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm6, %ymm1, %ymm6
-; X86-AVX512VLDQ-NEXT:    movzwl 24(%eax), %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %edx
-; X86-AVX512VLDQ-NEXT:    movzwl %dx, %edx
-; X86-AVX512VLDQ-NEXT:    vmovd %edx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %edx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
-; X86-AVX512VLDQ-NEXT:    movzwl 8(%eax), %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VLDQ-NEXT:    vmovdqu (%esp), %xmm2 # 16-byte Reload
-; X86-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %edx
-; X86-AVX512VLDQ-NEXT:    movzwl %dx, %edx
-; X86-AVX512VLDQ-NEXT:    vmovd %edx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %edx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm5
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm7, %ymm1, %ymm1
-; X86-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm1
-; X86-AVX512VLDQ-NEXT:    vpunpckldq {{[-0-9]+}}(%e{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
-; X86-AVX512VLDQ-NEXT:    # zmm1 = zmm1[0],mem[0],zmm1[1],mem[1],zmm1[4],mem[4],zmm1[5],mem[5],zmm1[8],mem[8],zmm1[9],mem[9],zmm1[12],mem[12],zmm1[13],mem[13]
-; X86-AVX512VLDQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 64-byte Spill
-; X86-AVX512VLDQ-NEXT:    movzwl 52(%eax), %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm3 # 16-byte Reload
-; X86-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm3, %xmm6
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm6, %ecx
-; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm6
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm6, %ecx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm6
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; X86-AVX512VLDQ-NEXT:    movzwl 36(%eax), %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm4, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %edx
-; X86-AVX512VLDQ-NEXT:    movzwl %dx, %edx
-; X86-AVX512VLDQ-NEXT:    vmovd %edx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %edx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
-; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm6, %ymm1, %ymm6
-; X86-AVX512VLDQ-NEXT:    movzwl 20(%eax), %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VLDQ-NEXT:    vpsrlq $48, {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %edx
-; X86-AVX512VLDQ-NEXT:    movzwl %dx, %edx
-; X86-AVX512VLDQ-NEXT:    vmovd %edx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %edx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %edx, %xmm0, %xmm7
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
-; X86-AVX512VLDQ-NEXT:    movzwl 4(%eax), %eax
-; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %eax
-; X86-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm2, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %ecx
-; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %ecx
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm5
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm7, %ymm1, %ymm1
-; X86-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm6
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm3, %eax
-; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %eax
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpsrld $16, %xmm3, %xmm4
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %eax
-; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm4, %xmm4
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; X86-AVX512VLDQ-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
-; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %eax
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpsrld $16, %xmm2, %xmm3
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm3, %eax
-; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm3
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm3, %xmm3
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm3, %xmm3
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm3, %eax
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm4, %ymm1, %ymm3
-; X86-AVX512VLDQ-NEXT:    vmovdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 16-byte Reload
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
-; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %eax
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpsrld $16, %xmm2, %xmm2
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
-; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm2
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm2, %xmm2
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm2, %xmm2
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm2, %eax
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; X86-AVX512VLDQ-NEXT:    vmovdqu (%esp), %xmm4 # 16-byte Reload
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %eax
-; X86-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X86-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm1, %eax
-; X86-AVX512VLDQ-NEXT:    vpsrld $16, %xmm4, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %ecx
-; X86-AVX512VLDQ-NEXT:    movzwl %cx, %ecx
-; X86-AVX512VLDQ-NEXT:    vmovd %ecx, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X86-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm1
-; X86-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; X86-AVX512VLDQ-NEXT:    vmovd %xmm0, %eax
-; X86-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; X86-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X86-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; X86-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; X86-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm6[0],zmm0[1],zmm6[1],zmm0[4],zmm6[4],zmm0[5],zmm6[5],zmm0[8],zmm6[8],zmm0[9],zmm6[9],zmm0[12],zmm6[12],zmm0[13],zmm6[13]
-; X86-AVX512VLDQ-NEXT:    vpunpcklqdq {{[-0-9]+}}(%e{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
-; X86-AVX512VLDQ-NEXT:    # zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
-; X86-AVX512VLDQ-NEXT:    addl $128, %esp
-; X86-AVX512VLDQ-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX512VLDQ-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
+; X86-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; X86-AVX512VLDQ-NEXT:    vpandq (%eax), %zmm0, %zmm0
 ; X86-AVX512VLDQ-NEXT:    retl
 ;
 ; X64-AVX1-LABEL: fabs_v32f16:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    pushq %rbx
-; X64-AVX1-NEXT:    .cfi_def_cfa_offset 16
-; X64-AVX1-NEXT:    subq $128, %rsp
-; X64-AVX1-NEXT:    .cfi_def_cfa_offset 144
-; X64-AVX1-NEXT:    .cfi_offset %rbx, -16
-; X64-AVX1-NEXT:    movq %rdi, %rbx
-; X64-AVX1-NEXT:    vbroadcastss 28(%rdi), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovaps (%rbx), %xmm0
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa 16(%rbx), %xmm1
-; X64-AVX1-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovaps 32(%rbx), %xmm0
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovaps 48(%rbx), %xmm0
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 24(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 20(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X64-AVX1-NEXT:    vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 12(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 8(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 4(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X64-AVX1-NEXT:    vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
-; X64-AVX1-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 60(%rbx), %xmm0
-; X64-AVX1-NEXT:    vzeroupper
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 56(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 52(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X64-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 44(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 40(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vbroadcastss 36(%rbx), %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrlq $48, %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __extendhfsf2 at PLT
-; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX1-NEXT:    callq __truncsfhf2 at PLT
-; X64-AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; X64-AVX1-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; X64-AVX1-NEXT:    vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; X64-AVX1-NEXT:    vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    # xmm0 = xmm0[0],mem[0]
-; X64-AVX1-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
-; X64-AVX1-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; X64-AVX1-NEXT:    addq $128, %rsp
-; X64-AVX1-NEXT:    .cfi_def_cfa_offset 16
-; X64-AVX1-NEXT:    popq %rbx
-; X64-AVX1-NEXT:    .cfi_def_cfa_offset 8
+; X64-AVX1-NEXT:    vbroadcastss {{.*#+}} ymm1 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
+; X64-AVX1-NEXT:    vandps (%rdi), %ymm1, %ymm0
+; X64-AVX1-NEXT:    vandps 32(%rdi), %ymm1, %ymm1
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: fabs_v32f16:
@@ -2153,302 +482,9 @@ define <32 x half> @fabs_v32f16(ptr %p) {
 ;
 ; X64-AVX512VL-LABEL: fabs_v32f16:
 ; X64-AVX512VL:       # %bb.0:
-; X64-AVX512VL-NEXT:    movzwl 60(%rdi), %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm0
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm0, %xmm1
-; X64-AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [NaN,NaN,NaN,NaN]
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X64-AVX512VL-NEXT:    vmovd %xmm1, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
-; X64-AVX512VL-NEXT:    vmovdqa (%rdi), %xmm1
-; X64-AVX512VL-NEXT:    vmovdqa 16(%rdi), %xmm2
-; X64-AVX512VL-NEXT:    vmovdqa 32(%rdi), %xmm3
-; X64-AVX512VL-NEXT:    vmovdqa 48(%rdi), %xmm4
-; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm4[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm6, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm6
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vmovd %xmm6, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; X64-AVX512VL-NEXT:    movzwl 44(%rdi), %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm6
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vmovd %xmm6, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
-; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm7, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm5, %ymm6, %ymm5
-; X64-AVX512VL-NEXT:    movzwl 28(%rdi), %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm6
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vmovd %xmm6, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
-; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm7, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; X64-AVX512VL-NEXT:    movzwl 12(%rdi), %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm8, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm6, %ymm7, %ymm6
-; X64-AVX512VL-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm5
-; X64-AVX512VL-NEXT:    movzwl 56(%rdi), %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm6
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vmovd %xmm6, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
-; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm7, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; X64-AVX512VL-NEXT:    movzwl 40(%rdi), %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm3[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm8, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm6, %ymm7, %ymm6
-; X64-AVX512VL-NEXT:    movzwl 24(%rdi), %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm8, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; X64-AVX512VL-NEXT:    movzwl 8(%rdi), %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VL-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm9, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm9
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm9, %xmm9
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm9, %xmm9
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm9, %xmm9
-; X64-AVX512VL-NEXT:    vmovd %xmm9, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm9
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
-; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm7
-; X64-AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; X64-AVX512VL-NEXT:    vpunpckldq {{.*#+}} zmm5 = zmm6[0],zmm5[0],zmm6[1],zmm5[1],zmm6[4],zmm5[4],zmm6[5],zmm5[5],zmm6[8],zmm5[8],zmm6[9],zmm5[9],zmm6[12],zmm5[12],zmm6[13],zmm5[13]
-; X64-AVX512VL-NEXT:    movzwl 52(%rdi), %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm6
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X64-AVX512VL-NEXT:    vmovd %xmm6, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
-; X64-AVX512VL-NEXT:    vpsrlq $48, %xmm4, %xmm7
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm7, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; X64-AVX512VL-NEXT:    movzwl 36(%rdi), %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VL-NEXT:    vpsrlq $48, %xmm3, %xmm8
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm8, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm6, %ymm7, %ymm6
-; X64-AVX512VL-NEXT:    movzwl 20(%rdi), %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VL-NEXT:    vpsrlq $48, %xmm2, %xmm8
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm8, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; X64-AVX512VL-NEXT:    movzwl 4(%rdi), %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VL-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VL-NEXT:    vpsrlq $48, %xmm1, %xmm9
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm9, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm9
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm9, %xmm9
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm9, %xmm9
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm9, %xmm9
-; X64-AVX512VL-NEXT:    vmovd %xmm9, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm9
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
-; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm7
-; X64-AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm4, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VL-NEXT:    vpsrld $16, %xmm4, %xmm4
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm4, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm4
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm4, %xmm4
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; X64-AVX512VL-NEXT:    vmovd %xmm4, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm3, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VL-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VL-NEXT:    vpsrld $16, %xmm3, %xmm3
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm3, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm3
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm3, %xmm3
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm3, %xmm3
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; X64-AVX512VL-NEXT:    vmovd %xmm3, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
-; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm4, %ymm3, %ymm3
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm4
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm4, %xmm4
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; X64-AVX512VL-NEXT:    vmovd %xmm4, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
-; X64-AVX512VL-NEXT:    vpsrld $16, %xmm2, %xmm2
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm2, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm2
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm2, %xmm2
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm2, %xmm2
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; X64-AVX512VL-NEXT:    vmovd %xmm2, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm1, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm4
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm4, %xmm4
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; X64-AVX512VL-NEXT:    vmovd %xmm4, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
-; X64-AVX512VL-NEXT:    vpsrld $16, %xmm1, %xmm1
-; X64-AVX512VL-NEXT:    vpextrw $0, %xmm1, %eax
-; X64-AVX512VL-NEXT:    movzwl %ax, %eax
-; X64-AVX512VL-NEXT:    vmovd %eax, %xmm1
-; X64-AVX512VL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X64-AVX512VL-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; X64-AVX512VL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; X64-AVX512VL-NEXT:    vmovd %xmm0, %eax
-; X64-AVX512VL-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; X64-AVX512VL-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; X64-AVX512VL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; X64-AVX512VL-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; X64-AVX512VL-NEXT:    vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm6[0],zmm0[1],zmm6[1],zmm0[4],zmm6[4],zmm0[5],zmm6[5],zmm0[8],zmm6[8],zmm0[9],zmm6[9],zmm0[12],zmm6[12],zmm0[13],zmm6[13]
-; X64-AVX512VL-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm5[0],zmm0[2],zmm5[2],zmm0[4],zmm5[4],zmm0[6],zmm5[6]
+; X64-AVX512VL-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
+; X64-AVX512VL-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; X64-AVX512VL-NEXT:    vpandq (%rdi), %zmm0, %zmm0
 ; X64-AVX512VL-NEXT:    retq
 ;
 ; X64-AVX512FP16-LABEL: fabs_v32f16:
@@ -2459,302 +495,9 @@ define <32 x half> @fabs_v32f16(ptr %p) {
 ;
 ; X64-AVX512VLDQ-LABEL: fabs_v32f16:
 ; X64-AVX512VLDQ:       # %bb.0:
-; X64-AVX512VLDQ-NEXT:    movzwl 60(%rdi), %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm0
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm0, %xmm1
-; X64-AVX512VLDQ-NEXT:    vpbroadcastd {{.*#+}} xmm0 = [NaN,NaN,NaN,NaN]
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm1
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm1, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm5
-; X64-AVX512VLDQ-NEXT:    vmovdqa (%rdi), %xmm1
-; X64-AVX512VLDQ-NEXT:    vmovdqa 16(%rdi), %xmm2
-; X64-AVX512VLDQ-NEXT:    vmovdqa 32(%rdi), %xmm3
-; X64-AVX512VLDQ-NEXT:    vmovdqa 48(%rdi), %xmm4
-; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm4[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm6, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm6
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm6, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; X64-AVX512VLDQ-NEXT:    movzwl 44(%rdi), %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm6
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm6, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
-; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm5, %ymm6, %ymm5
-; X64-AVX512VLDQ-NEXT:    movzwl 28(%rdi), %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm6
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm6, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
-; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; X64-AVX512VLDQ-NEXT:    movzwl 12(%rdi), %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm8, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm6, %ymm7, %ymm6
-; X64-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm5
-; X64-AVX512VLDQ-NEXT:    movzwl 56(%rdi), %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm6
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm6, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
-; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; X64-AVX512VLDQ-NEXT:    movzwl 40(%rdi), %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm3[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm8, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm6, %ymm7, %ymm6
-; X64-AVX512VLDQ-NEXT:    movzwl 24(%rdi), %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm8, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; X64-AVX512VLDQ-NEXT:    movzwl 8(%rdi), %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm9, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm9
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm9, %xmm9
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm9, %xmm9
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm9, %xmm9
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm9, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm9
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
-; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm7
-; X64-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; X64-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} zmm5 = zmm6[0],zmm5[0],zmm6[1],zmm5[1],zmm6[4],zmm5[4],zmm6[5],zmm5[5],zmm6[8],zmm5[8],zmm6[9],zmm5[9],zmm6[12],zmm5[12],zmm6[13],zmm5[13]
-; X64-AVX512VLDQ-NEXT:    movzwl 52(%rdi), %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm6
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm6, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm6
-; X64-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm4, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; X64-AVX512VLDQ-NEXT:    movzwl 36(%rdi), %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm3, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm8, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm6, %ymm7, %ymm6
-; X64-AVX512VLDQ-NEXT:    movzwl 20(%rdi), %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm2, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm8, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; X64-AVX512VLDQ-NEXT:    movzwl 4(%rdi), %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm8, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm8
-; X64-AVX512VLDQ-NEXT:    vpsrlq $48, %xmm1, %xmm9
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm9, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm9
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm9, %xmm9
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm9, %xmm9
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm9, %xmm9
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm9, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm9
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
-; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm7
-; X64-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpsrld $16, %xmm4, %xmm4
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm4, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm4, %xmm4
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm3, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm7, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm7
-; X64-AVX512VLDQ-NEXT:    vpsrld $16, %xmm3, %xmm3
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm3, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm3
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm3, %xmm3
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm3, %xmm3
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm3, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm3
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
-; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm4, %ymm3, %ymm3
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm4, %xmm4
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
-; X64-AVX512VLDQ-NEXT:    vpsrld $16, %xmm2, %xmm2
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm2, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm2
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm2, %xmm2
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm2, %xmm2
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm2, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm4
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm4, %xmm4
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm4, %xmm4
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm4, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm4
-; X64-AVX512VLDQ-NEXT:    vpsrld $16, %xmm1, %xmm1
-; X64-AVX512VLDQ-NEXT:    vpextrw $0, %xmm1, %eax
-; X64-AVX512VLDQ-NEXT:    movzwl %ax, %eax
-; X64-AVX512VLDQ-NEXT:    vmovd %eax, %xmm1
-; X64-AVX512VLDQ-NEXT:    vcvtph2ps %xmm1, %xmm1
-; X64-AVX512VLDQ-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; X64-AVX512VLDQ-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; X64-AVX512VLDQ-NEXT:    vmovd %xmm0, %eax
-; X64-AVX512VLDQ-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; X64-AVX512VLDQ-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; X64-AVX512VLDQ-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; X64-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; X64-AVX512VLDQ-NEXT:    vpunpckldq {{.*#+}} zmm0 = zmm0[0],zmm6[0],zmm0[1],zmm6[1],zmm0[4],zmm6[4],zmm0[5],zmm6[5],zmm0[8],zmm6[8],zmm0[9],zmm6[9],zmm0[12],zmm6[12],zmm0[13],zmm6[13]
-; X64-AVX512VLDQ-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm5[0],zmm0[2],zmm5[2],zmm0[4],zmm5[4],zmm0[6],zmm5[6]
+; X64-AVX512VLDQ-NEXT:    vpbroadcastw {{.*#+}} ymm0 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
+; X64-AVX512VLDQ-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; X64-AVX512VLDQ-NEXT:    vpandq (%rdi), %zmm0, %zmm0
 ; X64-AVX512VLDQ-NEXT:    retq
   %v = load <32 x half>, ptr %p, align 64
   %nnv = call <32 x half> @llvm.fabs.v32f16(<32 x half> %v)

>From f66fe021d3b3956d4b8ef16dafec6c50b87ce5f8 Mon Sep 17 00:00:00 2001
From: David Li <davidxl at google.com>
Date: Tue, 19 Dec 2023 14:07:45 -0800
Subject: [PATCH 2/2] ISel improvement for subreg insertion pattern

---
 llvm/lib/Target/X86/X86InstrCompiler.td | 17 +++++
 llvm/test/CodeGen/X86/insert.ll         | 93 +++++++++++++++++++++++++
 2 files changed, 110 insertions(+)
 create mode 100644 llvm/test/CodeGen/X86/insert.ll

diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index 457833f8cc3313..c77c77ee4a3eeb 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -1515,6 +1515,23 @@ def : Pat<(X86add_flag_nocf GR32:$src1, 128),
 def : Pat<(X86add_flag_nocf GR64:$src1, 128),
           (SUB64ri32 GR64:$src1, -128)>;
 
+// Depositing value to 8/16 bit subreg:
+def : Pat<(or (and GR64:$dst, -256), 
+              (i64 (zextloadi8 addr:$src))),
+          (INSERT_SUBREG (i64 (COPY $dst)), (MOV8rm  i8mem:$src), sub_8bit)>; 
+
+def : Pat<(or (and GR32:$dst, -256), 
+              (i32 (zextloadi8 addr:$src))),
+          (INSERT_SUBREG (i32 (COPY $dst)), (MOV8rm  i8mem:$src), sub_8bit)>; 
+
+def : Pat<(or (and GR64:$dst, -65536), 
+              (i64 (zextloadi16 addr:$src))),
+          (INSERT_SUBREG (i64 (COPY $dst)), (MOV16rm  i16mem:$src), sub_16bit)>;
+
+def : Pat<(or (and GR32:$dst, -65536), 
+              (i32 (zextloadi16 addr:$src))),
+          (INSERT_SUBREG (i32 (COPY $dst)), (MOV16rm  i16mem:$src), sub_16bit)>; 
+
 // The same trick applies for 32-bit immediate fields in 64-bit
 // instructions.
 def : Pat<(add GR64:$src1, 0x0000000080000000),
diff --git a/llvm/test/CodeGen/X86/insert.ll b/llvm/test/CodeGen/X86/insert.ll
new file mode 100644
index 00000000000000..381de2ecaa1646
--- /dev/null
+++ b/llvm/test/CodeGen/X86/insert.ll
@@ -0,0 +1,93 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=i386-unknown-unknown | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64
+
+define i64 @sub8(i64 noundef %res, ptr %byte) {
+; X86-LABEL: sub8:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movb (%ecx), %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: sub8:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movb (%rsi), %al
+; X64-NEXT:    retq
+entry:
+  %and = and i64 %res, -256
+  %d = load i8, ptr %byte, align 1
+  %conv2 = zext i8 %d to i64
+  %or = or i64 %and, %conv2
+  ret i64 %or
+}
+
+define i64 @sub16(i64 noundef %res, ptr %byte) {
+; X86-LABEL: sub16:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    shll $16, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzwl (%eax), %eax
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: sub16:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movw (%rsi), %ax
+; X64-NEXT:    retq
+entry:
+  %and = and i64 %res, -65536
+  %d = load i16, ptr %byte, align 1
+  %conv2 = zext i16 %d to i64
+  %or = or i64 %and, %conv2
+  ret i64 %or
+}
+
+define i32 @sub8_32(i32 noundef %res, ptr %byte) {
+; X86-LABEL: sub8_32:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movb (%ecx), %al
+; X86-NEXT:    retl
+;
+; X64-LABEL: sub8_32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    movb (%rsi), %al
+; X64-NEXT:    retq
+entry:
+  %and = and i32 %res, -256
+  %d = load i8, ptr %byte, align 1
+  %conv2 = zext i8 %d to i32
+  %or = or i32 %and, %conv2
+  ret i32 %or
+}
+
+define i32 @sub16_32(i32 noundef %res, ptr %byte) {
+; X86-LABEL: sub16_32:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    shll $16, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzwl (%eax), %eax
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: sub16_32:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    movw (%rsi), %ax
+; X64-NEXT:    retq
+entry:
+  %and = and i32 %res, -65536
+  %d = load i16, ptr %byte, align 1
+  %conv2 = zext i16 %d to i32
+  %or = or i32 %and, %conv2
+  ret i32 %or
+}



More information about the cfe-commits mailing list