[llvm] 798305d - [X86] Custom lower ISD::FP16_TO_FP and ISD::FP_TO_FP16 on f16c targets instead of using isel patterns.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 10 22:21:52 PST 2020


Author: Craig Topper
Date: 2020-02-10T22:01:48-08:00
New Revision: 798305d29b780584a2805bc1002b410b3d703197

URL: https://github.com/llvm/llvm-project/commit/798305d29b780584a2805bc1002b410b3d703197
DIFF: https://github.com/llvm/llvm-project/commit/798305d29b780584a2805bc1002b410b3d703197.diff

LOG: [X86] Custom lower ISD::FP16_TO_FP and ISD::FP_TO_FP16 on f16c targets instead of using isel patterns.

We need to use vector instructions for these operations. Previously
we handled this with isel patterns that used extra instructions
and copies to handle the the conversions.

Now we use custom lowering to emit the conversions. This allows
them to be pattern matched and optimized on their own. For
example we can now emit vpextrw to store the result if its going
directly to memory.

I've forced the upper elements to VCVTPHS2PS to zero to keep some
code similar. Zeroes will be needed for strictfp. I've added a
DAG combine for (fp16_to_fp (fp_to_fp16 X)) to avoid extra
instructions in between to be closer to the previous codegen.

This is a step towards strictfp support for f16 conversions.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/lib/Target/X86/X86InstrAVX512.td
    llvm/lib/Target/X86/X86InstrSSE.td
    llvm/test/CodeGen/X86/avx512-insert-extract.ll
    llvm/test/CodeGen/X86/avx512-vec-cmp.ll
    llvm/test/CodeGen/X86/cvt16.ll
    llvm/test/CodeGen/X86/half.ll
    llvm/test/CodeGen/X86/pr31088.ll
    llvm/test/CodeGen/X86/vector-half-conversions.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index faed5963d620..ae54a7e460c7 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -373,7 +373,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
   // Special handling for half-precision floating point conversions.
   // If we don't have F16C support, then lower half float conversions
   // into library calls.
-  if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
+  if (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) {
+    setOperationAction(ISD::FP16_TO_FP, MVT::f32, Custom);
+    setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom);
+  } else {
     setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
     setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
   }
@@ -2043,6 +2046,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
   setTargetDAGCombine(ISD::XOR);
   setTargetDAGCombine(ISD::MSCATTER);
   setTargetDAGCombine(ISD::MGATHER);
+  setTargetDAGCombine(ISD::FP16_TO_FP);
 
   computeRegisterProperties(Subtarget.getRegisterInfo());
 
@@ -20540,6 +20544,32 @@ SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
   return Tmp.first;
 }
 
+static SDValue LowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) {
+  assert(Op.getOperand(0).getValueType() == MVT::i16 &&
+         Op.getValueType() == MVT::f32 && "Unexpected VT!");
+
+  SDLoc dl(Op);
+  SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16,
+                            DAG.getConstant(0, dl, MVT::v8i16),
+                            Op.getOperand(0), DAG.getIntPtrConstant(0, dl));
+  Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
+  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
+                     DAG.getIntPtrConstant(0, dl));
+}
+
+static SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) {
+  assert(Op.getOperand(0).getValueType() == MVT::f32 &&
+         Op.getValueType() == MVT::i16 && "Unexpected VT!");
+
+  SDLoc dl(Op);
+  SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32,
+                            Op.getOperand(0));
+  Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
+                    DAG.getTargetConstant(4, dl, MVT::i32));
+  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Res,
+                     DAG.getIntPtrConstant(0, dl));
+}
+
 /// Depending on uarch and/or optimizing for size, we might prefer to use a
 /// vector operation in place of the typical scalar operation.
 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
@@ -28783,6 +28813,8 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
   case ISD::STRICT_FP_EXTEND:   return LowerFP_EXTEND(Op, DAG);
   case ISD::FP_ROUND:
   case ISD::STRICT_FP_ROUND:    return LowerFP_ROUND(Op, DAG);
+  case ISD::FP16_TO_FP:         return LowerFP16_TO_FP(Op, DAG);
+  case ISD::FP_TO_FP16:         return LowerFP_TO_FP16(Op, DAG);
   case ISD::LOAD:               return LowerLoad(Op, Subtarget, DAG);
   case ISD::STORE:              return LowerStore(Op, Subtarget, DAG);
   case ISD::FADD:
@@ -46510,6 +46542,31 @@ static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
   return SDValue();
 }
 
+// Optimize (fp16_to_fp (fp_to_fp16 X)) to VCVTPS2PH followed by VCVTPH2PS.
+// Done as a combine because the lowering for fp16_to_fp and fp_to_fp16 produce
+// extra instructions between the conversion due to going to scalar and back.
+static SDValue combineFP16_TO_FP(SDNode *N, SelectionDAG &DAG,
+                                 const X86Subtarget &Subtarget) {
+  if (Subtarget.useSoftFloat() || !Subtarget.hasF16C())
+    return SDValue();
+
+  if (N->getOperand(0).getOpcode() != ISD::FP_TO_FP16)
+    return SDValue();
+
+  if (N->getValueType(0) != MVT::f32 ||
+      N->getOperand(0).getOperand(0).getValueType() != MVT::f32)
+    return SDValue();
+
+  SDLoc dl(N);
+  SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32,
+                            N->getOperand(0).getOperand(0));
+  Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
+                    DAG.getTargetConstant(4, dl, MVT::i32));
+  Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
+  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
+                     DAG.getIntPtrConstant(0, dl));
+}
+
 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
                                              DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -46661,6 +46718,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
   case X86ISD::PMULUDQ:     return combinePMULDQ(N, DAG, DCI, Subtarget);
   case X86ISD::KSHIFTL:
   case X86ISD::KSHIFTR:     return combineKSHIFT(N, DAG, DCI);
+  case ISD::FP16_TO_FP:     return combineFP16_TO_FP(N, DAG, Subtarget);
   }
 
   return SDValue();

diff  --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 5d764a9c984e..9ce695730046 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -8686,27 +8686,6 @@ let Predicates = [HasAVX512] in {
             (VCVTPS2PHZmr addr:$dst, VR512:$src1, timm:$src2)>;
 }
 
-// Patterns for matching conversions from float to half-float and vice versa.
-let Predicates = [HasVLX] in {
-  // Use MXCSR.RC for rounding instead of explicitly specifying the default
-  // rounding mode (Nearest-Even, encoded as 0). Both are equivalent in the
-  // configurations we support (the default). However, falling back to MXCSR is
-  // more consistent with other instructions, which are always controlled by it.
-  // It's encoded as 0b100.
-  def : Pat<(fp_to_f16 FR32X:$src),
-            (i16 (EXTRACT_SUBREG (VMOVPDI2DIZrr (v8i16 (VCVTPS2PHZ128rr
-              (v4f32 (COPY_TO_REGCLASS FR32X:$src, VR128X)), 4))), sub_16bit))>;
-
-  def : Pat<(f16_to_fp GR16:$src),
-            (f32 (COPY_TO_REGCLASS (v4f32 (VCVTPH2PSZ128rr
-              (v8i16 (COPY_TO_REGCLASS (MOVZX32rr16 GR16:$src), VR128X)))), FR32X)) >;
-
-  def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32X:$src))),
-            (f32 (COPY_TO_REGCLASS (v4f32 (VCVTPH2PSZ128rr
-              (v8i16 (VCVTPS2PHZ128rr
-               (v4f32 (COPY_TO_REGCLASS FR32X:$src, VR128X)), 4)))), FR32X)) >;
-}
-
 //  Unordered/Ordered scalar fp compare with Sae and set EFLAGS
 multiclass avx512_ord_cmp_sae<bits<8> opc, X86VectorVTInfo _,
                             string OpcodeStr, Domain d,

diff  --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index 45544d1795f3..b519e4a42e9d 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -7387,26 +7387,6 @@ let Predicates = [HasF16C, NoVLX] in {
             (VCVTPS2PHYmr addr:$dst, VR256:$src1, timm:$src2)>;
 }
 
-// Patterns for  matching conversions from float to half-float and vice versa.
-let Predicates = [HasF16C, NoVLX] in {
-  // Use MXCSR.RC for rounding instead of explicitly specifying the default
-  // rounding mode (Nearest-Even, encoded as 0). Both are equivalent in the
-  // configurations we support (the default). However, falling back to MXCSR is
-  // more consistent with other instructions, which are always controlled by it.
-  // It's encoded as 0b100.
-  def : Pat<(fp_to_f16 FR32:$src),
-            (i16 (EXTRACT_SUBREG (VMOVPDI2DIrr (v8i16 (VCVTPS2PHrr
-              (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 4))), sub_16bit))>;
-
-  def : Pat<(f16_to_fp GR16:$src),
-            (f32 (COPY_TO_REGCLASS (v4f32 (VCVTPH2PSrr
-              (v4i32 (COPY_TO_REGCLASS (MOVZX32rr16 GR16:$src), VR128)))), FR32)) >;
-
-  def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32:$src))),
-            (f32 (COPY_TO_REGCLASS (v4f32 (VCVTPH2PSrr
-             (v8i16 (VCVTPS2PHrr (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 4)))), FR32)) >;
-}
-
 //===----------------------------------------------------------------------===//
 // AVX2 Instructions
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index 51ea7fd13d31..5b7c7ba713a8 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -2264,15 +2264,15 @@ define i128 @test_insertelement_variable_v128i1(<128 x i8> %a, i8 %b, i32 %index
 define void @test_concat_v2i1(<2 x half>* %arg, <2 x half>* %arg1, <2 x half>* %arg2) {
 ; KNL-LABEL: test_concat_v2i1:
 ; KNL:       ## %bb.0:
-; KNL-NEXT:    movzwl 2(%rdi), %eax
-; KNL-NEXT:    vmovd %eax, %xmm0
+; KNL-NEXT:    movzwl (%rdi), %eax
+; KNL-NEXT:    movzwl 2(%rdi), %ecx
+; KNL-NEXT:    vmovd %ecx, %xmm0
 ; KNL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; KNL-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; KNL-NEXT:    vucomiss %xmm1, %xmm0
-; KNL-NEXT:    setb %al
-; KNL-NEXT:    kmovw %eax, %k0
+; KNL-NEXT:    setb %cl
+; KNL-NEXT:    kmovw %ecx, %k0
 ; KNL-NEXT:    kshiftlw $1, %k0, %k0
-; KNL-NEXT:    movzwl (%rdi), %eax
 ; KNL-NEXT:    vmovd %eax, %xmm2
 ; KNL-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; KNL-NEXT:    vucomiss %xmm1, %xmm2
@@ -2312,15 +2312,15 @@ define void @test_concat_v2i1(<2 x half>* %arg, <2 x half>* %arg1, <2 x half>* %
 ;
 ; SKX-LABEL: test_concat_v2i1:
 ; SKX:       ## %bb.0:
-; SKX-NEXT:    movzwl 2(%rdi), %eax
-; SKX-NEXT:    vmovd %eax, %xmm0
+; SKX-NEXT:    movzwl (%rdi), %eax
+; SKX-NEXT:    movzwl 2(%rdi), %ecx
+; SKX-NEXT:    vmovd %ecx, %xmm0
 ; SKX-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; SKX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; SKX-NEXT:    vucomiss %xmm1, %xmm0
-; SKX-NEXT:    setb %al
-; SKX-NEXT:    kmovd %eax, %k0
+; SKX-NEXT:    setb %cl
+; SKX-NEXT:    kmovd %ecx, %k0
 ; SKX-NEXT:    kshiftlb $1, %k0, %k0
-; SKX-NEXT:    movzwl (%rdi), %eax
 ; SKX-NEXT:    vmovd %eax, %xmm2
 ; SKX-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; SKX-NEXT:    vucomiss %xmm1, %xmm2

diff  --git a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
index 1fcdfa53b245..843e37e6c455 100644
--- a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -1432,7 +1432,6 @@ define void @half_vec_compare(<2 x half>* %x, <2 x i8>* %y) {
 ; KNL:       ## %bb.0: ## %entry
 ; KNL-NEXT:    movzwl (%rdi), %eax ## encoding: [0x0f,0xb7,0x07]
 ; KNL-NEXT:    movzwl 2(%rdi), %ecx ## encoding: [0x0f,0xb7,0x4f,0x02]
-; KNL-NEXT:    movzwl %cx, %ecx ## encoding: [0x0f,0xb7,0xc9]
 ; KNL-NEXT:    vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
 ; KNL-NEXT:    vcvtph2ps %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x13,0xc0]
 ; KNL-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
@@ -1442,7 +1441,6 @@ define void @half_vec_compare(<2 x half>* %x, <2 x i8>* %y) {
 ; KNL-NEXT:    orb %cl, %dl ## encoding: [0x08,0xca]
 ; KNL-NEXT:    kmovw %edx, %k0 ## encoding: [0xc5,0xf8,0x92,0xc2]
 ; KNL-NEXT:    kshiftlw $1, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x01]
-; KNL-NEXT:    movzwl %ax, %eax ## encoding: [0x0f,0xb7,0xc0]
 ; KNL-NEXT:    vmovd %eax, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
 ; KNL-NEXT:    vcvtph2ps %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x13,0xc0]
 ; KNL-NEXT:    vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
@@ -1465,7 +1463,6 @@ define void @half_vec_compare(<2 x half>* %x, <2 x i8>* %y) {
 ; AVX512BW:       ## %bb.0: ## %entry
 ; AVX512BW-NEXT:    movzwl (%rdi), %eax ## encoding: [0x0f,0xb7,0x07]
 ; AVX512BW-NEXT:    movzwl 2(%rdi), %ecx ## encoding: [0x0f,0xb7,0x4f,0x02]
-; AVX512BW-NEXT:    movzwl %cx, %ecx ## encoding: [0x0f,0xb7,0xc9]
 ; AVX512BW-NEXT:    vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
 ; AVX512BW-NEXT:    vcvtph2ps %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x13,0xc0]
 ; AVX512BW-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
@@ -1475,7 +1472,6 @@ define void @half_vec_compare(<2 x half>* %x, <2 x i8>* %y) {
 ; AVX512BW-NEXT:    orb %cl, %dl ## encoding: [0x08,0xca]
 ; AVX512BW-NEXT:    kmovd %edx, %k0 ## encoding: [0xc5,0xfb,0x92,0xc2]
 ; AVX512BW-NEXT:    kshiftlw $1, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x01]
-; AVX512BW-NEXT:    movzwl %ax, %eax ## encoding: [0x0f,0xb7,0xc0]
 ; AVX512BW-NEXT:    vmovd %eax, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
 ; AVX512BW-NEXT:    vcvtph2ps %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x13,0xc0]
 ; AVX512BW-NEXT:    vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]
@@ -1497,7 +1493,6 @@ define void @half_vec_compare(<2 x half>* %x, <2 x i8>* %y) {
 ; SKX:       ## %bb.0: ## %entry
 ; SKX-NEXT:    movzwl (%rdi), %eax ## encoding: [0x0f,0xb7,0x07]
 ; SKX-NEXT:    movzwl 2(%rdi), %ecx ## encoding: [0x0f,0xb7,0x4f,0x02]
-; SKX-NEXT:    movzwl %cx, %ecx ## encoding: [0x0f,0xb7,0xc9]
 ; SKX-NEXT:    vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
 ; SKX-NEXT:    vcvtph2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0xc0]
 ; SKX-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
@@ -1507,7 +1502,6 @@ define void @half_vec_compare(<2 x half>* %x, <2 x i8>* %y) {
 ; SKX-NEXT:    orb %cl, %dl ## encoding: [0x08,0xca]
 ; SKX-NEXT:    kmovd %edx, %k0 ## encoding: [0xc5,0xfb,0x92,0xc2]
 ; SKX-NEXT:    kshiftlb $1, %k0, %k0 ## encoding: [0xc4,0xe3,0x79,0x32,0xc0,0x01]
-; SKX-NEXT:    movzwl %ax, %eax ## encoding: [0x0f,0xb7,0xc0]
 ; SKX-NEXT:    vmovd %eax, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
 ; SKX-NEXT:    vcvtph2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x13,0xc0]
 ; SKX-NEXT:    vucomiss %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc1]

diff  --git a/llvm/test/CodeGen/X86/cvt16.ll b/llvm/test/CodeGen/X86/cvt16.ll
index c9fe7a003280..711f8fad8fa5 100644
--- a/llvm/test/CodeGen/X86/cvt16.ll
+++ b/llvm/test/CodeGen/X86/cvt16.ll
@@ -37,8 +37,7 @@ define void @test1(float %src, i16* %dest) {
 ; F16C-LABEL: test1:
 ; F16C:       # %bb.0:
 ; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; F16C-NEXT:    vmovd %xmm0, %eax
-; F16C-NEXT:    movw %ax, (%rdi)
+; F16C-NEXT:    vpextrw $0, %xmm0, (%rdi)
 ; F16C-NEXT:    retq
 ;
 ; SOFTFLOAT-LABEL: test1:

diff  --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll
index 1cf5726c9cdf..780d1d7153ea 100644
--- a/llvm/test/CodeGen/X86/half.ll
+++ b/llvm/test/CodeGen/X86/half.ll
@@ -143,8 +143,7 @@ define void @test_trunc32(float %in, half* %addr) #0 {
 ; BWON-F16C-LABEL: test_trunc32:
 ; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; BWON-F16C-NEXT:    vmovd %xmm0, %eax
-; BWON-F16C-NEXT:    movw %ax, (%rdi)
+; BWON-F16C-NEXT:    vpextrw $0, %xmm0, (%rdi)
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_trunc32:
@@ -240,8 +239,7 @@ define void @test_sitofp_i64(i64 %a, half* %p) #0 {
 ; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
 ; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; BWON-F16C-NEXT:    vmovd %xmm0, %eax
-; BWON-F16C-NEXT:    movw %ax, (%rsi)
+; BWON-F16C-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_sitofp_i64:
@@ -353,8 +351,7 @@ define void @test_uitofp_i64(i64 %a, half* %p) #0 {
 ; BWON-F16C-NEXT:    vaddss %xmm0, %xmm0, %xmm0
 ; BWON-F16C-NEXT:  .LBB10_3:
 ; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; BWON-F16C-NEXT:    vmovd %xmm0, %eax
-; BWON-F16C-NEXT:    movw %ax, (%rsi)
+; BWON-F16C-NEXT:    vpextrw $0, %xmm0, (%rsi)
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_uitofp_i64:
@@ -423,21 +420,19 @@ define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 {
 ; BWON-F16C-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm0
 ; BWON-F16C-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm1
 ; BWON-F16C-NEXT:    vpextrw $1, %xmm1, %eax
-; BWON-F16C-NEXT:    movzwl %ax, %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm2
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; BWON-F16C-NEXT:    vmovd %xmm1, %eax
 ; BWON-F16C-NEXT:    movzwl %ax, %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm1
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm1, %xmm1
-; BWON-F16C-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; BWON-F16C-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
 ; BWON-F16C-NEXT:    vmovd %xmm0, %eax
 ; BWON-F16C-NEXT:    movzwl %ax, %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm2
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; BWON-F16C-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; BWON-F16C-NEXT:    vpextrw $1, %xmm0, %eax
-; BWON-F16C-NEXT:    movzwl %ax, %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm0
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; BWON-F16C-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
@@ -530,20 +525,20 @@ define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 {
 ;
 ; BWON-F16C-LABEL: test_extend64_vec4:
 ; BWON-F16C:       # %bb.0:
-; BWON-F16C-NEXT:    movzwl 6(%rdi), %eax
-; BWON-F16C-NEXT:    vmovd %eax, %xmm0
+; BWON-F16C-NEXT:    movzwl (%rdi), %eax
+; BWON-F16C-NEXT:    movzwl 2(%rdi), %ecx
+; BWON-F16C-NEXT:    movzwl 4(%rdi), %edx
+; BWON-F16C-NEXT:    movzwl 6(%rdi), %esi
+; BWON-F16C-NEXT:    vmovd %esi, %xmm0
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; BWON-F16C-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; BWON-F16C-NEXT:    movzwl 4(%rdi), %eax
-; BWON-F16C-NEXT:    vmovd %eax, %xmm1
+; BWON-F16C-NEXT:    vmovd %edx, %xmm1
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; BWON-F16C-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; BWON-F16C-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; BWON-F16C-NEXT:    movzwl 2(%rdi), %eax
-; BWON-F16C-NEXT:    vmovd %eax, %xmm1
+; BWON-F16C-NEXT:    vmovd %ecx, %xmm1
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; BWON-F16C-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; BWON-F16C-NEXT:    movzwl (%rdi), %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm2
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; BWON-F16C-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
@@ -663,19 +658,15 @@ define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
 ; BWON-F16C:       # %bb.0:
 ; BWON-F16C-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; BWON-F16C-NEXT:    vmovd %xmm1, %eax
-; BWON-F16C-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; BWON-F16C-NEXT:    vmovd %xmm1, %ecx
-; BWON-F16C-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; BWON-F16C-NEXT:    vmovd %xmm1, %edx
+; BWON-F16C-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; BWON-F16C-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
+; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
 ; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; BWON-F16C-NEXT:    vmovd %xmm0, %esi
-; BWON-F16C-NEXT:    movw %si, (%rdi)
-; BWON-F16C-NEXT:    movw %dx, 6(%rdi)
-; BWON-F16C-NEXT:    movw %cx, 4(%rdi)
-; BWON-F16C-NEXT:    movw %ax, 2(%rdi)
+; BWON-F16C-NEXT:    vpextrw $0, %xmm0, (%rdi)
+; BWON-F16C-NEXT:    vpextrw $0, %xmm3, 6(%rdi)
+; BWON-F16C-NEXT:    vpextrw $0, %xmm2, 4(%rdi)
+; BWON-F16C-NEXT:    vpextrw $0, %xmm1, 2(%rdi)
 ; BWON-F16C-NEXT:    retq
 ;
 ; CHECK-I686-LABEL: test_trunc32_vec4:
@@ -926,10 +917,10 @@ define float @test_sitofp_fadd_i32(i32 %a, half* %b) #0 {
 ;
 ; BWON-F16C-LABEL: test_sitofp_fadd_i32:
 ; BWON-F16C:       # %bb.0:
+; BWON-F16C-NEXT:    movzwl (%rsi), %eax
 ; BWON-F16C-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0
 ; BWON-F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
-; BWON-F16C-NEXT:    movzwl (%rsi), %eax
 ; BWON-F16C-NEXT:    vmovd %eax, %xmm1
 ; BWON-F16C-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; BWON-F16C-NEXT:    vaddss %xmm0, %xmm1, %xmm0

diff  --git a/llvm/test/CodeGen/X86/pr31088.ll b/llvm/test/CodeGen/X86/pr31088.ll
index 92040e5732d0..f5c1b3bc2351 100644
--- a/llvm/test/CodeGen/X86/pr31088.ll
+++ b/llvm/test/CodeGen/X86/pr31088.ll
@@ -155,8 +155,7 @@ define <2 x half> @ir_fadd_v2f16(<2 x half> %arg0, <2 x half> %arg1) nounwind {
 ; F16C-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; F16C-NEXT:    vaddss %xmm0, %xmm1, %xmm0
 ; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; F16C-NEXT:    vmovd %xmm0, %eax
-; F16C-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; F16C-NEXT:    vpextrw $0, %xmm0, -{{[0-9]+}}(%rsp)
 ; F16C-NEXT:    movzwl %dx, %eax
 ; F16C-NEXT:    vmovd %eax, %xmm0
 ; F16C-NEXT:    vcvtph2ps %xmm0, %xmm0
@@ -165,8 +164,7 @@ define <2 x half> @ir_fadd_v2f16(<2 x half> %arg0, <2 x half> %arg1) nounwind {
 ; F16C-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; F16C-NEXT:    vaddss %xmm0, %xmm1, %xmm0
 ; F16C-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; F16C-NEXT:    vmovd %xmm0, %eax
-; F16C-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; F16C-NEXT:    vpextrw $0, %xmm0, -{{[0-9]+}}(%rsp)
 ; F16C-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm0
 ; F16C-NEXT:    vmovd %xmm0, %eax
 ; F16C-NEXT:    vpextrw $1, %xmm0, %edx

diff  --git a/llvm/test/CodeGen/X86/vector-half-conversions.ll b/llvm/test/CodeGen/X86/vector-half-conversions.ll
index 24740b6b6f94..0861070c8fde 100644
--- a/llvm/test/CodeGen/X86/vector-half-conversions.ll
+++ b/llvm/test/CodeGen/X86/vector-half-conversions.ll
@@ -27,21 +27,19 @@ define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind {
 ; ALL-NEXT:    vmovq %xmm0, %rax
 ; ALL-NEXT:    movq %rax, %rcx
 ; ALL-NEXT:    shrq $32, %rcx
-; ALL-NEXT:    movzwl %ax, %edx
-; ALL-NEXT:    vmovd %edx, %xmm0
-; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; ALL-NEXT:    movl %eax, %edx
 ; ALL-NEXT:    shrl $16, %edx
-; ALL-NEXT:    movzwl %dx, %edx
+; ALL-NEXT:    vmovd %edx, %xmm0
+; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
+; ALL-NEXT:    movzwl %ax, %edx
 ; ALL-NEXT:    vmovd %edx, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; ALL-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; ALL-NEXT:    movzwl %cx, %ecx
 ; ALL-NEXT:    vmovd %ecx, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
 ; ALL-NEXT:    shrq $48, %rax
-; ALL-NEXT:    movzwl %ax, %eax
 ; ALL-NEXT:    vmovd %eax, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
@@ -57,21 +55,19 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind {
 ; ALL-NEXT:    vmovq %xmm0, %rax
 ; ALL-NEXT:    movq %rax, %rcx
 ; ALL-NEXT:    shrq $32, %rcx
-; ALL-NEXT:    movzwl %ax, %edx
-; ALL-NEXT:    vmovd %edx, %xmm0
-; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; ALL-NEXT:    movl %eax, %edx
 ; ALL-NEXT:    shrl $16, %edx
-; ALL-NEXT:    movzwl %dx, %edx
+; ALL-NEXT:    vmovd %edx, %xmm0
+; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
+; ALL-NEXT:    movzwl %ax, %edx
 ; ALL-NEXT:    vmovd %edx, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; ALL-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; ALL-NEXT:    movzwl %cx, %ecx
 ; ALL-NEXT:    vmovd %ecx, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
 ; ALL-NEXT:    shrq $48, %rax
-; ALL-NEXT:    movzwl %ax, %eax
 ; ALL-NEXT:    vmovd %eax, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
@@ -91,39 +87,35 @@ define <8 x float> @cvt_8i16_to_8f32(<8 x i16> %a0) nounwind {
 ; ALL-NEXT:    vpextrq $1, %xmm0, %rdx
 ; ALL-NEXT:    movq %rdx, %rsi
 ; ALL-NEXT:    shrq $32, %rsi
-; ALL-NEXT:    movzwl %dx, %edi
-; ALL-NEXT:    vmovd %edi, %xmm0
-; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; ALL-NEXT:    movl %edx, %edi
 ; ALL-NEXT:    shrl $16, %edi
-; ALL-NEXT:    movzwl %di, %edi
+; ALL-NEXT:    vmovd %edi, %xmm0
+; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
+; ALL-NEXT:    movzwl %dx, %edi
 ; ALL-NEXT:    vmovd %edi, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
 ; ALL-NEXT:    movzwl %si, %esi
 ; ALL-NEXT:    vmovd %esi, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
 ; ALL-NEXT:    shrq $48, %rdx
-; ALL-NEXT:    movzwl %dx, %edx
 ; ALL-NEXT:    vmovd %edx, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
-; ALL-NEXT:    movzwl %ax, %edx
-; ALL-NEXT:    vmovd %edx, %xmm1
-; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    movl %eax, %edx
 ; ALL-NEXT:    shrl $16, %edx
-; ALL-NEXT:    movzwl %dx, %edx
+; ALL-NEXT:    vmovd %edx, %xmm1
+; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
+; ALL-NEXT:    movzwl %ax, %edx
 ; ALL-NEXT:    vmovd %edx, %xmm2
 ; ALL-NEXT:    vcvtph2ps %xmm2, %xmm2
-; ALL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; ALL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; ALL-NEXT:    movzwl %cx, %ecx
 ; ALL-NEXT:    vmovd %ecx, %xmm2
 ; ALL-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; ALL-NEXT:    shrq $48, %rax
-; ALL-NEXT:    movzwl %ax, %eax
 ; ALL-NEXT:    vmovd %eax, %xmm2
 ; ALL-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
@@ -150,77 +142,69 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
 ; AVX1-NEXT:    vpextrq $1, %xmm0, %rsi
 ; AVX1-NEXT:    movq %rsi, %rax
 ; AVX1-NEXT:    shrq $32, %rax
-; AVX1-NEXT:    movzwl %si, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm0
-; AVX1-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX1-NEXT:    movl %esi, %ecx
 ; AVX1-NEXT:    shrl $16, %ecx
-; AVX1-NEXT:    movzwl %cx, %ecx
+; AVX1-NEXT:    vmovd %ecx, %xmm0
+; AVX1-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX1-NEXT:    movzwl %si, %ecx
 ; AVX1-NEXT:    vmovd %ecx, %xmm1
 ; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
 ; AVX1-NEXT:    movzwl %ax, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm1
 ; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
 ; AVX1-NEXT:    shrq $48, %rsi
-; AVX1-NEXT:    movzwl %si, %eax
-; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    vmovd %esi, %xmm1
 ; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
-; AVX1-NEXT:    movzwl %di, %eax
-; AVX1-NEXT:    vmovd %eax, %xmm1
-; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX1-NEXT:    movl %edi, %eax
 ; AVX1-NEXT:    shrl $16, %eax
-; AVX1-NEXT:    movzwl %ax, %eax
+; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX1-NEXT:    movzwl %di, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm2
 ; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX1-NEXT:    movzwl %r11w, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm2
 ; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX1-NEXT:    shrq $48, %rdi
-; AVX1-NEXT:    movzwl %di, %eax
-; AVX1-NEXT:    vmovd %eax, %xmm2
+; AVX1-NEXT:    vmovd %edi, %xmm2
 ; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT:    movzwl %dx, %eax
-; AVX1-NEXT:    vmovd %eax, %xmm1
-; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX1-NEXT:    movl %edx, %eax
 ; AVX1-NEXT:    shrl $16, %eax
-; AVX1-NEXT:    movzwl %ax, %eax
+; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX1-NEXT:    movzwl %dx, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm2
 ; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX1-NEXT:    movzwl %r9w, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm2
 ; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX1-NEXT:    shrq $48, %rdx
-; AVX1-NEXT:    movzwl %dx, %eax
-; AVX1-NEXT:    vmovd %eax, %xmm2
+; AVX1-NEXT:    vmovd %edx, %xmm2
 ; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
-; AVX1-NEXT:    movzwl %r10w, %eax
-; AVX1-NEXT:    vmovd %eax, %xmm2
-; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX1-NEXT:    movl %r10d, %eax
 ; AVX1-NEXT:    shrl $16, %eax
-; AVX1-NEXT:    movzwl %ax, %eax
+; AVX1-NEXT:    vmovd %eax, %xmm2
+; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX1-NEXT:    movzwl %r10w, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm3
 ; AVX1-NEXT:    vcvtph2ps %xmm3, %xmm3
-; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
 ; AVX1-NEXT:    movzwl %r8w, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm3
 ; AVX1-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
 ; AVX1-NEXT:    shrq $48, %r10
-; AVX1-NEXT:    movzwl %r10w, %eax
-; AVX1-NEXT:    vmovd %eax, %xmm3
+; AVX1-NEXT:    vmovd %r10d, %xmm3
 ; AVX1-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
@@ -241,77 +225,69 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
 ; AVX2-NEXT:    vpextrq $1, %xmm0, %rsi
 ; AVX2-NEXT:    movq %rsi, %rax
 ; AVX2-NEXT:    shrq $32, %rax
-; AVX2-NEXT:    movzwl %si, %ecx
-; AVX2-NEXT:    vmovd %ecx, %xmm0
-; AVX2-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX2-NEXT:    movl %esi, %ecx
 ; AVX2-NEXT:    shrl $16, %ecx
-; AVX2-NEXT:    movzwl %cx, %ecx
+; AVX2-NEXT:    vmovd %ecx, %xmm0
+; AVX2-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX2-NEXT:    movzwl %si, %ecx
 ; AVX2-NEXT:    vmovd %ecx, %xmm1
 ; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
-; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
 ; AVX2-NEXT:    movzwl %ax, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm1
 ; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
 ; AVX2-NEXT:    shrq $48, %rsi
-; AVX2-NEXT:    movzwl %si, %eax
-; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    vmovd %esi, %xmm1
 ; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
-; AVX2-NEXT:    movzwl %di, %eax
-; AVX2-NEXT:    vmovd %eax, %xmm1
-; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX2-NEXT:    movl %edi, %eax
 ; AVX2-NEXT:    shrl $16, %eax
-; AVX2-NEXT:    movzwl %ax, %eax
+; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX2-NEXT:    movzwl %di, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm2
 ; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
-; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX2-NEXT:    movzwl %r11w, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm2
 ; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX2-NEXT:    shrq $48, %rdi
-; AVX2-NEXT:    movzwl %di, %eax
-; AVX2-NEXT:    vmovd %eax, %xmm2
+; AVX2-NEXT:    vmovd %edi, %xmm2
 ; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT:    movzwl %dx, %eax
-; AVX2-NEXT:    vmovd %eax, %xmm1
-; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX2-NEXT:    movl %edx, %eax
 ; AVX2-NEXT:    shrl $16, %eax
-; AVX2-NEXT:    movzwl %ax, %eax
+; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX2-NEXT:    movzwl %dx, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm2
 ; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
-; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX2-NEXT:    movzwl %r9w, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm2
 ; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX2-NEXT:    shrq $48, %rdx
-; AVX2-NEXT:    movzwl %dx, %eax
-; AVX2-NEXT:    vmovd %eax, %xmm2
+; AVX2-NEXT:    vmovd %edx, %xmm2
 ; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
-; AVX2-NEXT:    movzwl %r10w, %eax
-; AVX2-NEXT:    vmovd %eax, %xmm2
-; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX2-NEXT:    movl %r10d, %eax
 ; AVX2-NEXT:    shrl $16, %eax
-; AVX2-NEXT:    movzwl %ax, %eax
+; AVX2-NEXT:    vmovd %eax, %xmm2
+; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX2-NEXT:    movzwl %r10w, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm3
 ; AVX2-NEXT:    vcvtph2ps %xmm3, %xmm3
-; AVX2-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
 ; AVX2-NEXT:    movzwl %r8w, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm3
 ; AVX2-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
 ; AVX2-NEXT:    shrq $48, %r10
-; AVX2-NEXT:    movzwl %r10w, %eax
-; AVX2-NEXT:    vmovd %eax, %xmm3
+; AVX2-NEXT:    vmovd %r10d, %xmm3
 ; AVX2-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
@@ -332,77 +308,69 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
 ; AVX512-NEXT:    vpextrq $1, %xmm0, %rsi
 ; AVX512-NEXT:    movq %rsi, %rax
 ; AVX512-NEXT:    shrq $32, %rax
-; AVX512-NEXT:    movzwl %si, %ecx
-; AVX512-NEXT:    vmovd %ecx, %xmm0
-; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX512-NEXT:    movl %esi, %ecx
 ; AVX512-NEXT:    shrl $16, %ecx
-; AVX512-NEXT:    movzwl %cx, %ecx
+; AVX512-NEXT:    vmovd %ecx, %xmm0
+; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT:    movzwl %si, %ecx
 ; AVX512-NEXT:    vmovd %ecx, %xmm1
 ; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
 ; AVX512-NEXT:    movzwl %ax, %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm1
 ; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
 ; AVX512-NEXT:    shrq $48, %rsi
-; AVX512-NEXT:    movzwl %si, %eax
-; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vmovd %esi, %xmm1
 ; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
-; AVX512-NEXT:    movzwl %di, %eax
-; AVX512-NEXT:    vmovd %eax, %xmm1
-; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX512-NEXT:    movl %edi, %eax
 ; AVX512-NEXT:    shrl $16, %eax
-; AVX512-NEXT:    movzwl %ax, %eax
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX512-NEXT:    movzwl %di, %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm2
 ; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX512-NEXT:    movzwl %r11w, %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm2
 ; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX512-NEXT:    shrq $48, %rdi
-; AVX512-NEXT:    movzwl %di, %eax
-; AVX512-NEXT:    vmovd %eax, %xmm2
+; AVX512-NEXT:    vmovd %edi, %xmm2
 ; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; AVX512-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT:    movzwl %dx, %eax
-; AVX512-NEXT:    vmovd %eax, %xmm1
-; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX512-NEXT:    movl %edx, %eax
 ; AVX512-NEXT:    shrl $16, %eax
-; AVX512-NEXT:    movzwl %ax, %eax
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX512-NEXT:    movzwl %dx, %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm2
 ; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
 ; AVX512-NEXT:    movzwl %r9w, %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm2
 ; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX512-NEXT:    shrq $48, %rdx
-; AVX512-NEXT:    movzwl %dx, %eax
-; AVX512-NEXT:    vmovd %eax, %xmm2
+; AVX512-NEXT:    vmovd %edx, %xmm2
 ; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
-; AVX512-NEXT:    movzwl %r10w, %eax
-; AVX512-NEXT:    vmovd %eax, %xmm2
-; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512-NEXT:    movl %r10d, %eax
 ; AVX512-NEXT:    shrl $16, %eax
-; AVX512-NEXT:    movzwl %ax, %eax
+; AVX512-NEXT:    vmovd %eax, %xmm2
+; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT:    movzwl %r10w, %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm3
 ; AVX512-NEXT:    vcvtph2ps %xmm3, %xmm3
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
 ; AVX512-NEXT:    movzwl %r8w, %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm3
 ; AVX512-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
 ; AVX512-NEXT:    shrq $48, %r10
-; AVX512-NEXT:    movzwl %r10w, %eax
-; AVX512-NEXT:    vmovd %eax, %xmm3
+; AVX512-NEXT:    vmovd %r10d, %xmm3
 ; AVX512-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
 ; AVX512-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
@@ -440,21 +408,19 @@ define <4 x float> @load_cvt_4i16_to_4f32(<4 x i16>* %a0) nounwind {
 ; ALL-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm0
 ; ALL-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm1
 ; ALL-NEXT:    vpextrw $1, %xmm1, %eax
-; ALL-NEXT:    movzwl %ax, %eax
 ; ALL-NEXT:    vmovd %eax, %xmm2
 ; ALL-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; ALL-NEXT:    vmovd %xmm1, %eax
 ; ALL-NEXT:    movzwl %ax, %eax
 ; ALL-NEXT:    vmovd %eax, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; ALL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; ALL-NEXT:    vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
 ; ALL-NEXT:    vmovd %xmm0, %eax
 ; ALL-NEXT:    movzwl %ax, %eax
 ; ALL-NEXT:    vmovd %eax, %xmm2
 ; ALL-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; ALL-NEXT:    vpextrw $1, %xmm0, %eax
-; ALL-NEXT:    movzwl %ax, %eax
 ; ALL-NEXT:    vmovd %eax, %xmm0
 ; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
@@ -471,21 +437,19 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind {
 ; ALL-NEXT:    movq (%rdi), %rax
 ; ALL-NEXT:    movq %rax, %rcx
 ; ALL-NEXT:    shrq $32, %rcx
-; ALL-NEXT:    movzwl %ax, %edx
-; ALL-NEXT:    vmovd %edx, %xmm0
-; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; ALL-NEXT:    movl %eax, %edx
 ; ALL-NEXT:    shrl $16, %edx
-; ALL-NEXT:    movzwl %dx, %edx
+; ALL-NEXT:    vmovd %edx, %xmm0
+; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
+; ALL-NEXT:    movzwl %ax, %edx
 ; ALL-NEXT:    vmovd %edx, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
-; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; ALL-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; ALL-NEXT:    movzwl %cx, %ecx
 ; ALL-NEXT:    vmovd %ecx, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
 ; ALL-NEXT:    shrq $48, %rax
-; ALL-NEXT:    movzwl %ax, %eax
 ; ALL-NEXT:    vmovd %eax, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
@@ -513,7 +477,6 @@ define <8 x float> @load_cvt_8i16_to_8f32(<8 x i16>* %a0) nounwind {
 ; ALL-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm2
 ; ALL-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm3
 ; ALL-NEXT:    vpextrw $1, %xmm3, %eax
-; ALL-NEXT:    movzwl %ax, %eax
 ; ALL-NEXT:    vmovd %eax, %xmm4
 ; ALL-NEXT:    vcvtph2ps %xmm4, %xmm4
 ; ALL-NEXT:    vmovd %xmm3, %eax
@@ -527,12 +490,10 @@ define <8 x float> @load_cvt_8i16_to_8f32(<8 x i16>* %a0) nounwind {
 ; ALL-NEXT:    vcvtph2ps %xmm4, %xmm4
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
 ; ALL-NEXT:    vpextrw $1, %xmm2, %eax
-; ALL-NEXT:    movzwl %ax, %eax
 ; ALL-NEXT:    vmovd %eax, %xmm2
 ; ALL-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[0]
 ; ALL-NEXT:    vpextrw $1, %xmm1, %eax
-; ALL-NEXT:    movzwl %ax, %eax
 ; ALL-NEXT:    vmovd %eax, %xmm3
 ; ALL-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; ALL-NEXT:    vmovd %xmm1, %eax
@@ -546,7 +507,6 @@ define <8 x float> @load_cvt_8i16_to_8f32(<8 x i16>* %a0) nounwind {
 ; ALL-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
 ; ALL-NEXT:    vpextrw $1, %xmm0, %eax
-; ALL-NEXT:    movzwl %ax, %eax
 ; ALL-NEXT:    vmovd %eax, %xmm0
 ; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; ALL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
@@ -587,7 +547,6 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX1-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm6
 ; AVX1-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm7
 ; AVX1-NEXT:    vpextrw $1, %xmm7, %eax
-; AVX1-NEXT:    movzwl %ax, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm1
 ; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX1-NEXT:    vmovd %xmm7, %eax
@@ -601,12 +560,10 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX1-NEXT:    vcvtph2ps %xmm7, %xmm7
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm7[0],xmm1[3]
 ; AVX1-NEXT:    vpextrw $1, %xmm6, %eax
-; AVX1-NEXT:    movzwl %ax, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm6
 ; AVX1-NEXT:    vcvtph2ps %xmm6, %xmm6
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm6[0]
 ; AVX1-NEXT:    vpextrw $1, %xmm5, %eax
-; AVX1-NEXT:    movzwl %ax, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm6
 ; AVX1-NEXT:    vcvtph2ps %xmm6, %xmm6
 ; AVX1-NEXT:    vmovd %xmm5, %eax
@@ -620,13 +577,11 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX1-NEXT:    vcvtph2ps %xmm6, %xmm6
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm6[0],xmm5[3]
 ; AVX1-NEXT:    vpextrw $1, %xmm0, %eax
-; AVX1-NEXT:    movzwl %ax, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm0
 ; AVX1-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm5[0,1,2],xmm0[0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    vpextrw $1, %xmm4, %eax
-; AVX1-NEXT:    movzwl %ax, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm1
 ; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX1-NEXT:    vmovd %xmm4, %eax
@@ -640,12 +595,10 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX1-NEXT:    vcvtph2ps %xmm4, %xmm4
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3]
 ; AVX1-NEXT:    vpextrw $1, %xmm3, %eax
-; AVX1-NEXT:    movzwl %ax, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm3
 ; AVX1-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0]
 ; AVX1-NEXT:    vpextrw $1, %xmm2, %eax
-; AVX1-NEXT:    movzwl %ax, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm3
 ; AVX1-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX1-NEXT:    vmovd %xmm2, %eax
@@ -659,7 +612,6 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX1-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
 ; AVX1-NEXT:    vpextrw $1, %xmm8, %eax
-; AVX1-NEXT:    movzwl %ax, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm3
 ; AVX1-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
@@ -695,7 +647,6 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX2-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm6
 ; AVX2-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm7
 ; AVX2-NEXT:    vpextrw $1, %xmm7, %eax
-; AVX2-NEXT:    movzwl %ax, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm1
 ; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX2-NEXT:    vmovd %xmm7, %eax
@@ -709,12 +660,10 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX2-NEXT:    vcvtph2ps %xmm7, %xmm7
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm7[0],xmm1[3]
 ; AVX2-NEXT:    vpextrw $1, %xmm6, %eax
-; AVX2-NEXT:    movzwl %ax, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm6
 ; AVX2-NEXT:    vcvtph2ps %xmm6, %xmm6
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm6[0]
 ; AVX2-NEXT:    vpextrw $1, %xmm5, %eax
-; AVX2-NEXT:    movzwl %ax, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm6
 ; AVX2-NEXT:    vcvtph2ps %xmm6, %xmm6
 ; AVX2-NEXT:    vmovd %xmm5, %eax
@@ -728,13 +677,11 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX2-NEXT:    vcvtph2ps %xmm6, %xmm6
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm6[0],xmm5[3]
 ; AVX2-NEXT:    vpextrw $1, %xmm0, %eax
-; AVX2-NEXT:    movzwl %ax, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm0
 ; AVX2-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm5[0,1,2],xmm0[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpextrw $1, %xmm4, %eax
-; AVX2-NEXT:    movzwl %ax, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm1
 ; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX2-NEXT:    vmovd %xmm4, %eax
@@ -748,12 +695,10 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX2-NEXT:    vcvtph2ps %xmm4, %xmm4
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3]
 ; AVX2-NEXT:    vpextrw $1, %xmm3, %eax
-; AVX2-NEXT:    movzwl %ax, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm3
 ; AVX2-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0]
 ; AVX2-NEXT:    vpextrw $1, %xmm2, %eax
-; AVX2-NEXT:    movzwl %ax, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm3
 ; AVX2-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX2-NEXT:    vmovd %xmm2, %eax
@@ -767,7 +712,6 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX2-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
 ; AVX2-NEXT:    vpextrw $1, %xmm8, %eax
-; AVX2-NEXT:    movzwl %ax, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm3
 ; AVX2-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
@@ -803,7 +747,6 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX512F-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm6
 ; AVX512F-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm7
 ; AVX512F-NEXT:    vpextrw $1, %xmm7, %eax
-; AVX512F-NEXT:    movzwl %ax, %eax
 ; AVX512F-NEXT:    vmovd %eax, %xmm0
 ; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX512F-NEXT:    vmovd %xmm7, %eax
@@ -817,12 +760,10 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX512F-NEXT:    vcvtph2ps %xmm7, %xmm7
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm7[0],xmm0[3]
 ; AVX512F-NEXT:    vpextrw $1, %xmm6, %eax
-; AVX512F-NEXT:    movzwl %ax, %eax
 ; AVX512F-NEXT:    vmovd %eax, %xmm6
 ; AVX512F-NEXT:    vcvtph2ps %xmm6, %xmm6
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm6[0]
 ; AVX512F-NEXT:    vpextrw $1, %xmm5, %eax
-; AVX512F-NEXT:    movzwl %ax, %eax
 ; AVX512F-NEXT:    vmovd %eax, %xmm6
 ; AVX512F-NEXT:    vcvtph2ps %xmm6, %xmm6
 ; AVX512F-NEXT:    vmovd %xmm5, %eax
@@ -836,13 +777,11 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX512F-NEXT:    vcvtph2ps %xmm6, %xmm6
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm6[0],xmm5[3]
 ; AVX512F-NEXT:    vpextrw $1, %xmm4, %eax
-; AVX512F-NEXT:    movzwl %ax, %eax
 ; AVX512F-NEXT:    vmovd %eax, %xmm4
 ; AVX512F-NEXT:    vcvtph2ps %xmm4, %xmm4
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
 ; AVX512F-NEXT:    vinsertf128 $1, %xmm0, %ymm4, %ymm4
 ; AVX512F-NEXT:    vpextrw $1, %xmm3, %eax
-; AVX512F-NEXT:    movzwl %ax, %eax
 ; AVX512F-NEXT:    vmovd %eax, %xmm0
 ; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX512F-NEXT:    vmovd %xmm3, %eax
@@ -856,12 +795,10 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX512F-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
 ; AVX512F-NEXT:    vpextrw $1, %xmm2, %eax
-; AVX512F-NEXT:    movzwl %ax, %eax
 ; AVX512F-NEXT:    vmovd %eax, %xmm2
 ; AVX512F-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[0]
 ; AVX512F-NEXT:    vpextrw $1, %xmm1, %eax
-; AVX512F-NEXT:    movzwl %ax, %eax
 ; AVX512F-NEXT:    vmovd %eax, %xmm2
 ; AVX512F-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512F-NEXT:    vmovd %xmm1, %eax
@@ -875,7 +812,6 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX512F-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
 ; AVX512F-NEXT:    vpextrw $1, %xmm8, %eax
-; AVX512F-NEXT:    movzwl %ax, %eax
 ; AVX512F-NEXT:    vmovd %eax, %xmm2
 ; AVX512F-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512F-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
@@ -912,7 +848,6 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX512VL-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm6
 ; AVX512VL-NEXT:    vmovdqa -{{[0-9]+}}(%rsp), %xmm7
 ; AVX512VL-NEXT:    vpextrw $1, %xmm7, %eax
-; AVX512VL-NEXT:    movzwl %ax, %eax
 ; AVX512VL-NEXT:    vmovd %eax, %xmm0
 ; AVX512VL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX512VL-NEXT:    vmovd %xmm7, %eax
@@ -926,12 +861,10 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX512VL-NEXT:    vcvtph2ps %xmm7, %xmm7
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm7[0],xmm0[3]
 ; AVX512VL-NEXT:    vpextrw $1, %xmm6, %eax
-; AVX512VL-NEXT:    movzwl %ax, %eax
 ; AVX512VL-NEXT:    vmovd %eax, %xmm6
 ; AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm6[0]
 ; AVX512VL-NEXT:    vpextrw $1, %xmm5, %eax
-; AVX512VL-NEXT:    movzwl %ax, %eax
 ; AVX512VL-NEXT:    vmovd %eax, %xmm6
 ; AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
 ; AVX512VL-NEXT:    vmovd %xmm5, %eax
@@ -945,13 +878,11 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX512VL-NEXT:    vcvtph2ps %xmm6, %xmm6
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm6[0],xmm5[3]
 ; AVX512VL-NEXT:    vpextrw $1, %xmm4, %eax
-; AVX512VL-NEXT:    movzwl %ax, %eax
 ; AVX512VL-NEXT:    vmovd %eax, %xmm4
 ; AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
 ; AVX512VL-NEXT:    vinsertf128 $1, %xmm0, %ymm4, %ymm0
 ; AVX512VL-NEXT:    vpextrw $1, %xmm3, %eax
-; AVX512VL-NEXT:    movzwl %ax, %eax
 ; AVX512VL-NEXT:    vmovd %eax, %xmm4
 ; AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
 ; AVX512VL-NEXT:    vmovd %xmm3, %eax
@@ -965,12 +896,10 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX512VL-NEXT:    vcvtph2ps %xmm4, %xmm4
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
 ; AVX512VL-NEXT:    vpextrw $1, %xmm2, %eax
-; AVX512VL-NEXT:    movzwl %ax, %eax
 ; AVX512VL-NEXT:    vmovd %eax, %xmm2
 ; AVX512VL-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[0]
 ; AVX512VL-NEXT:    vpextrw $1, %xmm1, %eax
-; AVX512VL-NEXT:    movzwl %ax, %eax
 ; AVX512VL-NEXT:    vmovd %eax, %xmm3
 ; AVX512VL-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX512VL-NEXT:    vmovd %xmm1, %eax
@@ -984,7 +913,6 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind {
 ; AVX512VL-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
 ; AVX512VL-NEXT:    vpextrw $1, %xmm8, %eax
-; AVX512VL-NEXT:    movzwl %ax, %eax
 ; AVX512VL-NEXT:    vmovd %eax, %xmm3
 ; AVX512VL-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX512VL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0]
@@ -1021,14 +949,13 @@ define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind {
 ; ALL-NEXT:    vmovd %xmm0, %eax
 ; ALL-NEXT:    movzwl %ax, %ecx
 ; ALL-NEXT:    shrl $16, %eax
-; ALL-NEXT:    vmovd %ecx, %xmm0
+; ALL-NEXT:    vmovd %eax, %xmm0
 ; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; ALL-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; ALL-NEXT:    movzwl %ax, %eax
-; ALL-NEXT:    vmovd %eax, %xmm1
+; ALL-NEXT:    vmovd %ecx, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; ALL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; ALL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; ALL-NEXT:    retq
   %1 = bitcast <2 x i16> %a0 to <2 x half>
   %2 = fpext <2 x half> %1 to <2 x double>
@@ -1046,7 +973,6 @@ define <4 x double> @cvt_4i16_to_4f64(<4 x i16> %a0) nounwind {
 ; ALL-NEXT:    shrl $16, %eax
 ; ALL-NEXT:    shrq $32, %rcx
 ; ALL-NEXT:    shrq $48, %rdx
-; ALL-NEXT:    movzwl %dx, %edx
 ; ALL-NEXT:    vmovd %edx, %xmm0
 ; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; ALL-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
@@ -1055,14 +981,13 @@ define <4 x double> @cvt_4i16_to_4f64(<4 x i16> %a0) nounwind {
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; ALL-NEXT:    vmovd %esi, %xmm1
+; ALL-NEXT:    vmovd %eax, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; ALL-NEXT:    movzwl %ax, %eax
-; ALL-NEXT:    vmovd %eax, %xmm2
+; ALL-NEXT:    vmovd %esi, %xmm2
 ; ALL-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; ALL-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
-; ALL-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; ALL-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
 ; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %1 = bitcast <4 x i16> %a0 to <4 x half>
@@ -1076,14 +1001,13 @@ define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) nounwind {
 ; ALL-NEXT:    vmovd %xmm0, %eax
 ; ALL-NEXT:    movzwl %ax, %ecx
 ; ALL-NEXT:    shrl $16, %eax
-; ALL-NEXT:    vmovd %ecx, %xmm0
+; ALL-NEXT:    vmovd %eax, %xmm0
 ; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; ALL-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; ALL-NEXT:    movzwl %ax, %eax
-; ALL-NEXT:    vmovd %eax, %xmm1
+; ALL-NEXT:    vmovd %ecx, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; ALL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; ALL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
 ; ALL-NEXT:    retq
   %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
   %2 = bitcast <2 x i16> %1 to <2 x half>
@@ -1102,7 +1026,6 @@ define <4 x double> @cvt_8i16_to_4f64(<8 x i16> %a0) nounwind {
 ; ALL-NEXT:    shrl $16, %eax
 ; ALL-NEXT:    shrq $32, %rcx
 ; ALL-NEXT:    shrq $48, %rdx
-; ALL-NEXT:    movzwl %dx, %edx
 ; ALL-NEXT:    vmovd %edx, %xmm0
 ; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; ALL-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
@@ -1111,14 +1034,13 @@ define <4 x double> @cvt_8i16_to_4f64(<8 x i16> %a0) nounwind {
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; ALL-NEXT:    vmovd %esi, %xmm1
+; ALL-NEXT:    vmovd %eax, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; ALL-NEXT:    movzwl %ax, %eax
-; ALL-NEXT:    vmovd %eax, %xmm2
+; ALL-NEXT:    vmovd %esi, %xmm2
 ; ALL-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; ALL-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
-; ALL-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; ALL-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
 ; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1146,7 +1068,6 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
 ; AVX1-NEXT:    shrl $16, %edi
 ; AVX1-NEXT:    shrq $32, %rsi
 ; AVX1-NEXT:    shrq $48, %rax
-; AVX1-NEXT:    movzwl %ax, %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm0
 ; AVX1-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX1-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
@@ -1155,17 +1076,15 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
 ; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX1-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT:    vmovd %ecx, %xmm1
+; AVX1-NEXT:    vmovd %edi, %xmm1
 ; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX1-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    movzwl %di, %eax
-; AVX1-NEXT:    vmovd %eax, %xmm2
+; AVX1-NEXT:    vmovd %ecx, %xmm2
 ; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX1-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT:    movzwl %r10w, %eax
-; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    vmovd %r10d, %xmm1
 ; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX1-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    movzwl %r9w, %eax
@@ -1173,14 +1092,13 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
 ; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX1-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX1-NEXT:    vmovd %r8d, %xmm2
+; AVX1-NEXT:    vmovd %edx, %xmm2
 ; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX1-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    movzwl %dx, %eax
-; AVX1-NEXT:    vmovd %eax, %xmm3
+; AVX1-NEXT:    vmovd %r8d, %xmm3
 ; AVX1-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX1-NEXT:    vcvtss2sd %xmm3, %xmm3, %xmm3
-; AVX1-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX1-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; AVX1-NEXT:    retq
 ;
@@ -1202,7 +1120,6 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
 ; AVX2-NEXT:    shrl $16, %edi
 ; AVX2-NEXT:    shrq $32, %rsi
 ; AVX2-NEXT:    shrq $48, %rax
-; AVX2-NEXT:    movzwl %ax, %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm0
 ; AVX2-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX2-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
@@ -1211,17 +1128,15 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
 ; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX2-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT:    vmovd %ecx, %xmm1
+; AVX2-NEXT:    vmovd %edi, %xmm1
 ; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX2-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    movzwl %di, %eax
-; AVX2-NEXT:    vmovd %eax, %xmm2
+; AVX2-NEXT:    vmovd %ecx, %xmm2
 ; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX2-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT:    movzwl %r10w, %eax
-; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    vmovd %r10d, %xmm1
 ; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX2-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    movzwl %r9w, %eax
@@ -1229,14 +1144,13 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
 ; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX2-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-NEXT:    vmovd %r8d, %xmm2
+; AVX2-NEXT:    vmovd %edx, %xmm2
 ; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX2-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    movzwl %dx, %eax
-; AVX2-NEXT:    vmovd %eax, %xmm3
+; AVX2-NEXT:    vmovd %r8d, %xmm3
 ; AVX2-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX2-NEXT:    vcvtss2sd %xmm3, %xmm3, %xmm3
-; AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; AVX2-NEXT:    retq
 ;
@@ -1258,7 +1172,6 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
 ; AVX512-NEXT:    shrl $16, %edi
 ; AVX512-NEXT:    shrq $32, %rsi
 ; AVX512-NEXT:    shrq $48, %rax
-; AVX512-NEXT:    movzwl %ax, %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm0
 ; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX512-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
@@ -1267,17 +1180,15 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
 ; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX512-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT:    vmovd %ecx, %xmm1
+; AVX512-NEXT:    vmovd %edi, %xmm1
 ; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX512-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; AVX512-NEXT:    movzwl %di, %eax
-; AVX512-NEXT:    vmovd %eax, %xmm2
+; AVX512-NEXT:    vmovd %ecx, %xmm2
 ; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
-; AVX512-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
 ; AVX512-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT:    movzwl %r10w, %eax
-; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vmovd %r10d, %xmm1
 ; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX512-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    movzwl %r9w, %eax
@@ -1285,14 +1196,13 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
 ; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512-NEXT:    vmovd %r8d, %xmm2
+; AVX512-NEXT:    vmovd %edx, %xmm2
 ; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
-; AVX512-NEXT:    movzwl %dx, %eax
-; AVX512-NEXT:    vmovd %eax, %xmm3
+; AVX512-NEXT:    vmovd %r8d, %xmm3
 ; AVX512-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX512-NEXT:    vcvtss2sd %xmm3, %xmm3, %xmm3
-; AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
 ; AVX512-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
 ; AVX512-NEXT:    vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512-NEXT:    retq
@@ -1322,11 +1232,11 @@ define double @load_cvt_i16_to_f64(i16* %a0) nounwind {
 define <2 x double> @load_cvt_2i16_to_2f64(<2 x i16>* %a0) nounwind {
 ; ALL-LABEL: load_cvt_2i16_to_2f64:
 ; ALL:       # %bb.0:
-; ALL-NEXT:    movzwl 2(%rdi), %eax
-; ALL-NEXT:    vmovd %eax, %xmm0
+; ALL-NEXT:    movzwl (%rdi), %eax
+; ALL-NEXT:    movzwl 2(%rdi), %ecx
+; ALL-NEXT:    vmovd %ecx, %xmm0
 ; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; ALL-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; ALL-NEXT:    movzwl (%rdi), %eax
 ; ALL-NEXT:    vmovd %eax, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
@@ -1341,20 +1251,20 @@ define <2 x double> @load_cvt_2i16_to_2f64(<2 x i16>* %a0) nounwind {
 define <4 x double> @load_cvt_4i16_to_4f64(<4 x i16>* %a0) nounwind {
 ; ALL-LABEL: load_cvt_4i16_to_4f64:
 ; ALL:       # %bb.0:
-; ALL-NEXT:    movzwl 6(%rdi), %eax
-; ALL-NEXT:    vmovd %eax, %xmm0
+; ALL-NEXT:    movzwl (%rdi), %eax
+; ALL-NEXT:    movzwl 2(%rdi), %ecx
+; ALL-NEXT:    movzwl 4(%rdi), %edx
+; ALL-NEXT:    movzwl 6(%rdi), %esi
+; ALL-NEXT:    vmovd %esi, %xmm0
 ; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; ALL-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; ALL-NEXT:    movzwl 4(%rdi), %eax
-; ALL-NEXT:    vmovd %eax, %xmm1
+; ALL-NEXT:    vmovd %edx, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; ALL-NEXT:    movzwl 2(%rdi), %eax
-; ALL-NEXT:    vmovd %eax, %xmm1
+; ALL-NEXT:    vmovd %ecx, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; ALL-NEXT:    movzwl (%rdi), %eax
 ; ALL-NEXT:    vmovd %eax, %xmm2
 ; ALL-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; ALL-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
@@ -1378,7 +1288,6 @@ define <4 x double> @load_cvt_8i16_to_4f64(<8 x i16>* %a0) nounwind {
 ; ALL-NEXT:    shrl $16, %eax
 ; ALL-NEXT:    shrq $32, %rcx
 ; ALL-NEXT:    shrq $48, %rdx
-; ALL-NEXT:    movzwl %dx, %edx
 ; ALL-NEXT:    vmovd %edx, %xmm0
 ; ALL-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; ALL-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
@@ -1387,14 +1296,13 @@ define <4 x double> @load_cvt_8i16_to_4f64(<8 x i16>* %a0) nounwind {
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; ALL-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; ALL-NEXT:    vmovd %esi, %xmm1
+; ALL-NEXT:    vmovd %eax, %xmm1
 ; ALL-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; ALL-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; ALL-NEXT:    movzwl %ax, %eax
-; ALL-NEXT:    vmovd %eax, %xmm2
+; ALL-NEXT:    vmovd %esi, %xmm2
 ; ALL-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; ALL-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
-; ALL-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; ALL-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
 ; ALL-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; ALL-NEXT:    retq
   %1 = load <8 x i16>, <8 x i16>* %a0
@@ -1407,40 +1315,40 @@ define <4 x double> @load_cvt_8i16_to_4f64(<8 x i16>* %a0) nounwind {
 define <8 x double> @load_cvt_8i16_to_8f64(<8 x i16>* %a0) nounwind {
 ; AVX1-LABEL: load_cvt_8i16_to_8f64:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    movzwl 6(%rdi), %eax
-; AVX1-NEXT:    vmovd %eax, %xmm0
+; AVX1-NEXT:    movzwl 8(%rdi), %r8d
+; AVX1-NEXT:    movzwl 10(%rdi), %r9d
+; AVX1-NEXT:    movzwl 12(%rdi), %r10d
+; AVX1-NEXT:    movzwl 14(%rdi), %esi
+; AVX1-NEXT:    movzwl (%rdi), %eax
+; AVX1-NEXT:    movzwl 2(%rdi), %ecx
+; AVX1-NEXT:    movzwl 4(%rdi), %edx
+; AVX1-NEXT:    movzwl 6(%rdi), %edi
+; AVX1-NEXT:    vmovd %edi, %xmm0
 ; AVX1-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX1-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; AVX1-NEXT:    movzwl 4(%rdi), %eax
-; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    vmovd %edx, %xmm1
 ; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX1-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT:    movzwl 2(%rdi), %eax
-; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    vmovd %ecx, %xmm1
 ; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX1-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    movzwl (%rdi), %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm2
 ; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX1-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT:    movzwl 14(%rdi), %eax
-; AVX1-NEXT:    vmovd %eax, %xmm1
+; AVX1-NEXT:    vmovd %esi, %xmm1
 ; AVX1-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX1-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    movzwl 12(%rdi), %eax
-; AVX1-NEXT:    vmovd %eax, %xmm2
+; AVX1-NEXT:    vmovd %r10d, %xmm2
 ; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX1-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX1-NEXT:    movzwl 10(%rdi), %eax
-; AVX1-NEXT:    vmovd %eax, %xmm2
+; AVX1-NEXT:    vmovd %r9d, %xmm2
 ; AVX1-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX1-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    movzwl 8(%rdi), %eax
-; AVX1-NEXT:    vmovd %eax, %xmm3
+; AVX1-NEXT:    vmovd %r8d, %xmm3
 ; AVX1-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX1-NEXT:    vcvtss2sd %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
@@ -1449,40 +1357,40 @@ define <8 x double> @load_cvt_8i16_to_8f64(<8 x i16>* %a0) nounwind {
 ;
 ; AVX2-LABEL: load_cvt_8i16_to_8f64:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    movzwl 6(%rdi), %eax
-; AVX2-NEXT:    vmovd %eax, %xmm0
+; AVX2-NEXT:    movzwl 8(%rdi), %r8d
+; AVX2-NEXT:    movzwl 10(%rdi), %r9d
+; AVX2-NEXT:    movzwl 12(%rdi), %r10d
+; AVX2-NEXT:    movzwl 14(%rdi), %esi
+; AVX2-NEXT:    movzwl (%rdi), %eax
+; AVX2-NEXT:    movzwl 2(%rdi), %ecx
+; AVX2-NEXT:    movzwl 4(%rdi), %edx
+; AVX2-NEXT:    movzwl 6(%rdi), %edi
+; AVX2-NEXT:    vmovd %edi, %xmm0
 ; AVX2-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX2-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; AVX2-NEXT:    movzwl 4(%rdi), %eax
-; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    vmovd %edx, %xmm1
 ; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX2-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT:    movzwl 2(%rdi), %eax
-; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    vmovd %ecx, %xmm1
 ; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX2-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    movzwl (%rdi), %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm2
 ; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX2-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT:    movzwl 14(%rdi), %eax
-; AVX2-NEXT:    vmovd %eax, %xmm1
+; AVX2-NEXT:    vmovd %esi, %xmm1
 ; AVX2-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX2-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    movzwl 12(%rdi), %eax
-; AVX2-NEXT:    vmovd %eax, %xmm2
+; AVX2-NEXT:    vmovd %r10d, %xmm2
 ; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX2-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
 ; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-NEXT:    movzwl 10(%rdi), %eax
-; AVX2-NEXT:    vmovd %eax, %xmm2
+; AVX2-NEXT:    vmovd %r9d, %xmm2
 ; AVX2-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX2-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    movzwl 8(%rdi), %eax
-; AVX2-NEXT:    vmovd %eax, %xmm3
+; AVX2-NEXT:    vmovd %r8d, %xmm3
 ; AVX2-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX2-NEXT:    vcvtss2sd %xmm3, %xmm3, %xmm3
 ; AVX2-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
@@ -1491,40 +1399,40 @@ define <8 x double> @load_cvt_8i16_to_8f64(<8 x i16>* %a0) nounwind {
 ;
 ; AVX512-LABEL: load_cvt_8i16_to_8f64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    movzwl 14(%rdi), %eax
-; AVX512-NEXT:    vmovd %eax, %xmm0
+; AVX512-NEXT:    movzwl (%rdi), %r8d
+; AVX512-NEXT:    movzwl 2(%rdi), %r9d
+; AVX512-NEXT:    movzwl 4(%rdi), %r10d
+; AVX512-NEXT:    movzwl 6(%rdi), %esi
+; AVX512-NEXT:    movzwl 8(%rdi), %eax
+; AVX512-NEXT:    movzwl 10(%rdi), %ecx
+; AVX512-NEXT:    movzwl 12(%rdi), %edx
+; AVX512-NEXT:    movzwl 14(%rdi), %edi
+; AVX512-NEXT:    vmovd %edi, %xmm0
 ; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
 ; AVX512-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; AVX512-NEXT:    movzwl 12(%rdi), %eax
-; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vmovd %edx, %xmm1
 ; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX512-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
 ; AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT:    movzwl 10(%rdi), %eax
-; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vmovd %ecx, %xmm1
 ; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX512-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; AVX512-NEXT:    movzwl 8(%rdi), %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm2
 ; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
 ; AVX512-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT:    movzwl 6(%rdi), %eax
-; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vmovd %esi, %xmm1
 ; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
 ; AVX512-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1
-; AVX512-NEXT:    movzwl 4(%rdi), %eax
-; AVX512-NEXT:    vmovd %eax, %xmm2
+; AVX512-NEXT:    vmovd %r10d, %xmm2
 ; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
 ; AVX512-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512-NEXT:    movzwl 2(%rdi), %eax
-; AVX512-NEXT:    vmovd %eax, %xmm2
+; AVX512-NEXT:    vmovd %r9d, %xmm2
 ; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512-NEXT:    vcvtss2sd %xmm2, %xmm2, %xmm2
-; AVX512-NEXT:    movzwl (%rdi), %eax
-; AVX512-NEXT:    vmovd %eax, %xmm3
+; AVX512-NEXT:    vmovd %r8d, %xmm3
 ; AVX512-NEXT:    vcvtph2ps %xmm3, %xmm3
 ; AVX512-NEXT:    vcvtss2sd %xmm3, %xmm3, %xmm3
 ; AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
@@ -1557,20 +1465,16 @@ define <4 x i16> @cvt_4f32_to_4i16(<4 x float> %a0) nounwind {
 ; ALL-LABEL: cvt_4f32_to_4i16:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; ALL-NEXT:    vmovd %xmm0, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm0, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
 ; ALL-NEXT:    retq
   %1 = fptrunc <4 x float> %a0 to <4 x half>
@@ -1582,20 +1486,16 @@ define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind {
 ; ALL-LABEL: cvt_4f32_to_8i16_undef:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; ALL-NEXT:    vmovd %xmm0, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm0, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
 ; ALL-NEXT:    retq
   %1 = fptrunc <4 x float> %a0 to <4 x half>
@@ -1608,20 +1508,16 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind {
 ; ALL-LABEL: cvt_4f32_to_8i16_zero:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; ALL-NEXT:    vmovd %xmm0, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm0, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; ALL-NEXT:    retq
   %1 = fptrunc <4 x float> %a0 to <4 x half>
@@ -1633,42 +1529,38 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind {
 define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) nounwind {
 ; ALL-LABEL: cvt_8f32_to_8i16:
 ; ALL:       # %bb.0:
+; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm1
+; ALL-NEXT:    vpextrw $0, %xmm1, %eax
 ; ALL-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    shll $16, %eax
-; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm1
 ; ALL-NEXT:    vmovd %xmm1, %ecx
-; ALL-NEXT:    movzwl %cx, %ecx
+; ALL-NEXT:    shll $16, %ecx
 ; ALL-NEXT:    orl %eax, %ecx
-; ALL-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %edx
-; ALL-NEXT:    shll $16, %edx
 ; ALL-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; ALL-NEXT:    vpextrw $0, %xmm1, %edx
+; ALL-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
 ; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movzwl %ax, %eax
+; ALL-NEXT:    shll $16, %eax
 ; ALL-NEXT:    orl %edx, %eax
 ; ALL-NEXT:    shlq $32, %rax
 ; ALL-NEXT:    orq %rcx, %rax
 ; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm1
+; ALL-NEXT:    vpextrw $0, %xmm1, %ecx
 ; ALL-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %ecx
-; ALL-NEXT:    shll $16, %ecx
-; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm1
 ; ALL-NEXT:    vmovd %xmm1, %edx
-; ALL-NEXT:    movzwl %dx, %edx
+; ALL-NEXT:    shll $16, %edx
 ; ALL-NEXT:    orl %ecx, %edx
-; ALL-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; ALL-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %ecx
-; ALL-NEXT:    shll $16, %ecx
-; ALL-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; ALL-NEXT:    vpextrw $0, %xmm1, %ecx
+; ALL-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
 ; ALL-NEXT:    vmovd %xmm0, %esi
-; ALL-NEXT:    movzwl %si, %esi
+; ALL-NEXT:    shll $16, %esi
 ; ALL-NEXT:    orl %ecx, %esi
 ; ALL-NEXT:    shlq $32, %rsi
 ; ALL-NEXT:    orq %rdx, %rsi
@@ -1685,204 +1577,222 @@ define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) nounwind {
 define <16 x i16> @cvt_16f32_to_16i16(<16 x float> %a0) nounwind {
 ; AVX1-LABEL: cvt_16f32_to_16i16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm2
-; AVX1-NEXT:    vmovd %xmm2, %eax
-; AVX1-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT:    pushq %rbp
+; AVX1-NEXT:    pushq %r15
+; AVX1-NEXT:    pushq %r14
+; AVX1-NEXT:    pushq %r12
+; AVX1-NEXT:    pushq %rbx
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = xmm2[3,1,2,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX1-NEXT:    vmovd %xmm3, %r8d
+; AVX1-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX1-NEXT:    vmovd %xmm3, %r9d
+; AVX1-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX1-NEXT:    vmovd %xmm3, %r10d
 ; AVX1-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX1-NEXT:    vmovd %eax, %xmm3
-; AVX1-NEXT:    vmovd %xmm2, %eax
-; AVX1-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX1-NEXT:    vmovd %xmm2, %r11d
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
 ; AVX1-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX1-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; AVX1-NEXT:    vmovd %xmm2, %eax
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX1-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; AVX1-NEXT:    vmovd %xmm1, %eax
-; AVX1-NEXT:    vcvtps2ph $4, %xmm2, %xmm1
-; AVX1-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; AVX1-NEXT:    vmovd %xmm1, %eax
-; AVX1-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX1-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; AVX1-NEXT:    vmovd %xmm1, %eax
-; AVX1-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX1-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; AVX1-NEXT:    vmovd %xmm1, %eax
-; AVX1-NEXT:    vcvtps2ph $4, %xmm0, %xmm1
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX1-NEXT:    vmovd %xmm2, %r14d
+; AVX1-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
 ; AVX1-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX1-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
-; AVX1-NEXT:    vmovd %xmm2, %eax
-; AVX1-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm2
-; AVX1-NEXT:    vmovd %xmm1, %eax
-; AVX1-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX1-NEXT:    vmovd %eax, %xmm3
-; AVX1-NEXT:    vmovd %xmm1, %eax
-; AVX1-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX1-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; AVX1-NEXT:    vmovd %xmm1, %eax
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX1-NEXT:    vmovd %xmm2, %r15d
+; AVX1-NEXT:    vcvtps2ph $4, %xmm0, %xmm2
+; AVX1-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; AVX1-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; AVX1-NEXT:    vmovd %xmm0, %eax
-; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm0
-; AVX1-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; AVX1-NEXT:    vmovd %xmm0, %eax
-; AVX1-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vmovd %xmm0, %r12d
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm0
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX1-NEXT:    vmovd %xmm3, %edx
+; AVX1-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX1-NEXT:    vmovd %xmm3, %esi
+; AVX1-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX1-NEXT:    vmovd %xmm3, %ebx
 ; AVX1-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX1-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; AVX1-NEXT:    vmovd %xmm0, %eax
+; AVX1-NEXT:    vmovd %xmm0, %ebp
 ; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3]
 ; AVX1-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX1-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX1-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; AVX1-NEXT:    vmovd %xmm1, %eax
-; AVX1-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm1
+; AVX1-NEXT:    vmovd %xmm0, %edi
+; AVX1-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm1[1,0]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
-; AVX1-NEXT:    vpinsrw $7, %eax, %xmm1, %xmm0
-; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm0
+; AVX1-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; AVX1-NEXT:    vmovd %xmm1, %ecx
+; AVX1-NEXT:    vpinsrw $1, %ecx, %xmm0, %xmm0
+; AVX1-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT:    vpinsrw $3, %edi, %xmm0, %xmm0
+; AVX1-NEXT:    vpinsrw $4, %ebp, %xmm0, %xmm0
+; AVX1-NEXT:    vpinsrw $5, %ebx, %xmm0, %xmm0
+; AVX1-NEXT:    vpinsrw $6, %esi, %xmm0, %xmm0
+; AVX1-NEXT:    vpinsrw $7, %edx, %xmm0, %xmm0
+; AVX1-NEXT:    vpinsrw $1, %r12d, %xmm2, %xmm1
+; AVX1-NEXT:    vpinsrw $2, %r15d, %xmm1, %xmm1
+; AVX1-NEXT:    vpinsrw $3, %r14d, %xmm1, %xmm1
+; AVX1-NEXT:    vpinsrw $4, %r11d, %xmm1, %xmm1
+; AVX1-NEXT:    vpinsrw $5, %r10d, %xmm1, %xmm1
+; AVX1-NEXT:    vpinsrw $6, %r9d, %xmm1, %xmm1
+; AVX1-NEXT:    vpinsrw $7, %r8d, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    popq %rbx
+; AVX1-NEXT:    popq %r12
+; AVX1-NEXT:    popq %r14
+; AVX1-NEXT:    popq %r15
+; AVX1-NEXT:    popq %rbp
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: cvt_16f32_to_16i16:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm2
-; AVX2-NEXT:    vmovd %xmm2, %eax
-; AVX2-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-NEXT:    pushq %rbp
+; AVX2-NEXT:    pushq %r15
+; AVX2-NEXT:    pushq %r14
+; AVX2-NEXT:    pushq %r12
+; AVX2-NEXT:    pushq %rbx
+; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX2-NEXT:    vpermilps {{.*#+}} xmm3 = xmm2[3,1,2,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX2-NEXT:    vmovd %xmm3, %r8d
+; AVX2-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX2-NEXT:    vmovd %xmm3, %r9d
+; AVX2-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX2-NEXT:    vmovd %xmm3, %r10d
 ; AVX2-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX2-NEXT:    vmovd %eax, %xmm3
-; AVX2-NEXT:    vmovd %xmm2, %eax
-; AVX2-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX2-NEXT:    vmovd %xmm2, %r11d
+; AVX2-NEXT:    vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
 ; AVX2-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX2-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; AVX2-NEXT:    vmovd %xmm2, %eax
-; AVX2-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX2-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX2-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; AVX2-NEXT:    vmovd %xmm1, %eax
-; AVX2-NEXT:    vcvtps2ph $4, %xmm2, %xmm1
-; AVX2-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; AVX2-NEXT:    vmovd %xmm1, %eax
-; AVX2-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX2-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; AVX2-NEXT:    vmovd %xmm1, %eax
-; AVX2-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX2-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; AVX2-NEXT:    vmovd %xmm1, %eax
-; AVX2-NEXT:    vcvtps2ph $4, %xmm0, %xmm1
-; AVX2-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX2-NEXT:    vmovd %xmm2, %r14d
+; AVX2-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
 ; AVX2-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX2-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
-; AVX2-NEXT:    vmovd %xmm2, %eax
-; AVX2-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm2
-; AVX2-NEXT:    vmovd %xmm1, %eax
-; AVX2-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX2-NEXT:    vmovd %eax, %xmm3
-; AVX2-NEXT:    vmovd %xmm1, %eax
-; AVX2-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX2-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; AVX2-NEXT:    vmovd %xmm1, %eax
-; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX2-NEXT:    vmovd %xmm2, %r15d
+; AVX2-NEXT:    vcvtps2ph $4, %xmm0, %xmm2
+; AVX2-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; AVX2-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; AVX2-NEXT:    vmovd %xmm0, %eax
-; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm0
-; AVX2-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; AVX2-NEXT:    vmovd %xmm0, %eax
-; AVX2-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX2-NEXT:    vmovd %xmm0, %r12d
+; AVX2-NEXT:    vextractf128 $1, %ymm1, %xmm0
+; AVX2-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX2-NEXT:    vmovd %xmm3, %edx
+; AVX2-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX2-NEXT:    vmovd %xmm3, %esi
+; AVX2-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX2-NEXT:    vmovd %xmm3, %ebx
 ; AVX2-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX2-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; AVX2-NEXT:    vmovd %xmm0, %eax
+; AVX2-NEXT:    vmovd %xmm0, %ebp
 ; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3]
 ; AVX2-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX2-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX2-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; AVX2-NEXT:    vmovd %xmm1, %eax
-; AVX2-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm1
+; AVX2-NEXT:    vmovd %xmm0, %edi
+; AVX2-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm1[1,0]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovd %xmm0, %eax
-; AVX2-NEXT:    vpinsrw $7, %eax, %xmm1, %xmm0
-; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm0
+; AVX2-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; AVX2-NEXT:    vmovd %xmm1, %ecx
+; AVX2-NEXT:    vpinsrw $1, %ecx, %xmm0, %xmm0
+; AVX2-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT:    vpinsrw $3, %edi, %xmm0, %xmm0
+; AVX2-NEXT:    vpinsrw $4, %ebp, %xmm0, %xmm0
+; AVX2-NEXT:    vpinsrw $5, %ebx, %xmm0, %xmm0
+; AVX2-NEXT:    vpinsrw $6, %esi, %xmm0, %xmm0
+; AVX2-NEXT:    vpinsrw $7, %edx, %xmm0, %xmm0
+; AVX2-NEXT:    vpinsrw $1, %r12d, %xmm2, %xmm1
+; AVX2-NEXT:    vpinsrw $2, %r15d, %xmm1, %xmm1
+; AVX2-NEXT:    vpinsrw $3, %r14d, %xmm1, %xmm1
+; AVX2-NEXT:    vpinsrw $4, %r11d, %xmm1, %xmm1
+; AVX2-NEXT:    vpinsrw $5, %r10d, %xmm1, %xmm1
+; AVX2-NEXT:    vpinsrw $6, %r9d, %xmm1, %xmm1
+; AVX2-NEXT:    vpinsrw $7, %r8d, %xmm1, %xmm1
+; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT:    popq %rbx
+; AVX2-NEXT:    popq %r12
+; AVX2-NEXT:    popq %r14
+; AVX2-NEXT:    popq %r15
+; AVX2-NEXT:    popq %rbp
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: cvt_16f32_to_16i16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vextractf64x4 $1, %zmm0, %ymm1
-; AVX512-NEXT:    vcvtps2ph $4, %xmm1, %xmm2
-; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX512-NEXT:    pushq %rbp
+; AVX512-NEXT:    pushq %r15
+; AVX512-NEXT:    pushq %r14
+; AVX512-NEXT:    pushq %r12
+; AVX512-NEXT:    pushq %rbx
+; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
 ; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX512-NEXT:    vmovd %eax, %xmm3
-; AVX512-NEXT:    vmovd %xmm2, %eax
+; AVX512-NEXT:    vmovd %xmm2, %r8d
 ; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
 ; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX512-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX512-NEXT:    vmovd %xmm2, %r9d
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT:    vmovd %xmm2, %r10d
 ; AVX512-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX512-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vmovd %xmm1, %eax
-; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm1
-; AVX512-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vmovd %xmm1, %eax
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; AVX512-NEXT:    vmovd %xmm1, %r11d
+; AVX512-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
 ; AVX512-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX512-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vmovd %xmm1, %eax
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
+; AVX512-NEXT:    vmovd %xmm1, %r14d
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; AVX512-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX512-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vmovd %xmm1, %eax
+; AVX512-NEXT:    vmovd %xmm1, %r15d
 ; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm1
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT:    vmovd %xmm2, %r12d
+; AVX512-NEXT:    vextractf64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX512-NEXT:    vpermilps {{.*#+}} xmm3 = xmm2[3,1,2,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX512-NEXT:    vmovd %xmm3, %edx
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX512-NEXT:    vmovd %xmm3, %esi
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX512-NEXT:    vmovd %xmm3, %ebx
+; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT:    vmovd %xmm2, %ebp
+; AVX512-NEXT:    vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT:    vmovd %xmm2, %edi
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
 ; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX512-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm3
 ; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    vpinsrw $7, %eax, %xmm3, %xmm2
-; AVX512-NEXT:    vmovd %xmm1, %eax
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX512-NEXT:    vmovd %eax, %xmm3
-; AVX512-NEXT:    vmovd %xmm1, %eax
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX512-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vmovd %xmm1, %eax
-; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vmovd %xmm0, %eax
-; AVX512-NEXT:    vcvtps2ph $4, %xmm1, %xmm0
-; AVX512-NEXT:    vpinsrw $3, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vmovd %xmm0, %eax
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512-NEXT:    vpinsrw $4, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vmovd %xmm0, %eax
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm1[1,0]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm2
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vmovd %xmm0, %eax
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512-NEXT:    vpinsrw $6, %eax, %xmm3, %xmm1
-; AVX512-NEXT:    vmovd %xmm0, %eax
-; AVX512-NEXT:    vpinsrw $7, %eax, %xmm1, %xmm0
-; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512-NEXT:    vmovd %xmm0, %ecx
+; AVX512-NEXT:    vpinsrw $1, %ecx, %xmm2, %xmm0
+; AVX512-NEXT:    vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX512-NEXT:    vpinsrw $3, %edi, %xmm0, %xmm0
+; AVX512-NEXT:    vpinsrw $4, %ebp, %xmm0, %xmm0
+; AVX512-NEXT:    vpinsrw $5, %ebx, %xmm0, %xmm0
+; AVX512-NEXT:    vpinsrw $6, %esi, %xmm0, %xmm0
+; AVX512-NEXT:    vpinsrw $7, %edx, %xmm0, %xmm0
+; AVX512-NEXT:    vpinsrw $1, %r12d, %xmm1, %xmm1
+; AVX512-NEXT:    vpinsrw $2, %r15d, %xmm1, %xmm1
+; AVX512-NEXT:    vpinsrw $3, %r14d, %xmm1, %xmm1
+; AVX512-NEXT:    vpinsrw $4, %r11d, %xmm1, %xmm1
+; AVX512-NEXT:    vpinsrw $5, %r10d, %xmm1, %xmm1
+; AVX512-NEXT:    vpinsrw $6, %r9d, %xmm1, %xmm1
+; AVX512-NEXT:    vpinsrw $7, %r8d, %xmm1, %xmm1
+; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT:    popq %rbx
+; AVX512-NEXT:    popq %r12
+; AVX512-NEXT:    popq %r14
+; AVX512-NEXT:    popq %r15
+; AVX512-NEXT:    popq %rbp
 ; AVX512-NEXT:    retq
   %1 = fptrunc <16 x float> %a0 to <16 x half>
   %2 = bitcast <16 x half> %1 to <16 x i16>
@@ -1897,8 +1807,7 @@ define void @store_cvt_f32_to_i16(float %a0, i16* %a1) nounwind {
 ; ALL-LABEL: store_cvt_f32_to_i16:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; ALL-NEXT:    vmovd %xmm0, %eax
-; ALL-NEXT:    movw %ax, (%rdi)
+; ALL-NEXT:    vpextrw $0, %xmm0, (%rdi)
 ; ALL-NEXT:    retq
   %1 = fptrunc float %a0 to half
   %2 = bitcast half %1 to i16
@@ -1911,19 +1820,15 @@ define void @store_cvt_4f32_to_4i16(<4 x float> %a0, <4 x i16>* %a1) nounwind {
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %ecx
-; ALL-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %edx
+; ALL-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; ALL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; ALL-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
+; ALL-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; ALL-NEXT:    vmovd %xmm0, %esi
-; ALL-NEXT:    movw %si, (%rdi)
-; ALL-NEXT:    movw %dx, 6(%rdi)
-; ALL-NEXT:    movw %cx, 4(%rdi)
-; ALL-NEXT:    movw %ax, 2(%rdi)
+; ALL-NEXT:    vpextrw $0, %xmm0, (%rdi)
+; ALL-NEXT:    vpextrw $0, %xmm3, 6(%rdi)
+; ALL-NEXT:    vpextrw $0, %xmm2, 4(%rdi)
+; ALL-NEXT:    vpextrw $0, %xmm1, 2(%rdi)
 ; ALL-NEXT:    retq
   %1 = fptrunc <4 x float> %a0 to <4 x half>
   %2 = bitcast <4 x half> %1 to <4 x i16>
@@ -1935,20 +1840,16 @@ define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounw
 ; ALL-LABEL: store_cvt_4f32_to_8i16_undef:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; ALL-NEXT:    vmovd %xmm0, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm0, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
 ; ALL-NEXT:    vmovaps %xmm0, (%rdi)
 ; ALL-NEXT:    retq
@@ -1963,20 +1864,16 @@ define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwi
 ; ALL-LABEL: store_cvt_4f32_to_8i16_zero:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm1, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; ALL-NEXT:    vmovd %xmm0, %eax
-; ALL-NEXT:    movw %ax, -{{[0-9]+}}(%rsp)
+; ALL-NEXT:    vpextrw $0, %xmm0, -{{[0-9]+}}(%rsp)
 ; ALL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; ALL-NEXT:    vmovaps %xmm0, (%rdi)
 ; ALL-NEXT:    retq
@@ -1992,35 +1889,27 @@ define void @store_cvt_8f32_to_8i16(<8 x float> %a0, <8 x i16>* %a1) nounwind {
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %r8d
-; ALL-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %r9d
-; ALL-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; ALL-NEXT:    vmovd %xmm1, %r10d
-; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; ALL-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; ALL-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
 ; ALL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; ALL-NEXT:    vmovd %xmm2, %r11d
-; ALL-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
-; ALL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; ALL-NEXT:    vmovd %xmm2, %eax
-; ALL-NEXT:    vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
-; ALL-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; ALL-NEXT:    vmovd %xmm2, %ecx
+; ALL-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
+; ALL-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; ALL-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; ALL-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; ALL-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; ALL-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm4[1,0]
+; ALL-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; ALL-NEXT:    vpermilps {{.*#+}} xmm7 = xmm4[3,1,2,3]
+; ALL-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
 ; ALL-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; ALL-NEXT:    vmovd %xmm0, %edx
-; ALL-NEXT:    vcvtps2ph $4, %xmm1, %xmm0
-; ALL-NEXT:    vmovd %xmm0, %esi
-; ALL-NEXT:    movw %si, 8(%rdi)
-; ALL-NEXT:    movw %dx, (%rdi)
-; ALL-NEXT:    movw %cx, 14(%rdi)
-; ALL-NEXT:    movw %ax, 12(%rdi)
-; ALL-NEXT:    movw %r11w, 10(%rdi)
-; ALL-NEXT:    movw %r10w, 6(%rdi)
-; ALL-NEXT:    movw %r9w, 4(%rdi)
-; ALL-NEXT:    movw %r8w, 2(%rdi)
+; ALL-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; ALL-NEXT:    vpextrw $0, %xmm4, 8(%rdi)
+; ALL-NEXT:    vpextrw $0, %xmm0, (%rdi)
+; ALL-NEXT:    vpextrw $0, %xmm7, 14(%rdi)
+; ALL-NEXT:    vpextrw $0, %xmm6, 12(%rdi)
+; ALL-NEXT:    vpextrw $0, %xmm5, 10(%rdi)
+; ALL-NEXT:    vpextrw $0, %xmm3, 6(%rdi)
+; ALL-NEXT:    vpextrw $0, %xmm2, 4(%rdi)
+; ALL-NEXT:    vpextrw $0, %xmm1, 2(%rdi)
 ; ALL-NEXT:    vzeroupper
 ; ALL-NEXT:    retq
   %1 = fptrunc <8 x float> %a0 to <8 x half>
@@ -2032,203 +1921,155 @@ define void @store_cvt_8f32_to_8i16(<8 x float> %a0, <8 x i16>* %a1) nounwind {
 define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) nounwind {
 ; AVX1-LABEL: store_cvt_16f32_to_16i16:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT:    vcvtps2ph $4, %xmm3, %xmm4
-; AVX1-NEXT:    vmovd %xmm4, %eax
-; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm4
-; AVX1-NEXT:    movw %ax, 24(%rdi)
-; AVX1-NEXT:    vmovd %xmm4, %eax
-; AVX1-NEXT:    vcvtps2ph $4, %xmm2, %xmm4
-; AVX1-NEXT:    movw %ax, 16(%rdi)
-; AVX1-NEXT:    vmovd %xmm4, %eax
-; AVX1-NEXT:    vcvtps2ph $4, %xmm0, %xmm4
-; AVX1-NEXT:    movw %ax, 8(%rdi)
-; AVX1-NEXT:    vmovd %xmm4, %eax
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; AVX1-NEXT:    movw %ax, (%rdi)
-; AVX1-NEXT:    vmovd %xmm4, %eax
-; AVX1-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; AVX1-NEXT:    movw %ax, 30(%rdi)
-; AVX1-NEXT:    vmovd %xmm4, %eax
-; AVX1-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; AVX1-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; AVX1-NEXT:    movw %ax, 28(%rdi)
-; AVX1-NEXT:    vmovd %xmm3, %eax
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm3 = xmm1[3,1,2,3]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; AVX1-NEXT:    movw %ax, 26(%rdi)
-; AVX1-NEXT:    vmovd %xmm3, %eax
-; AVX1-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; AVX1-NEXT:    movw %ax, 22(%rdi)
-; AVX1-NEXT:    vmovd %xmm3, %eax
+; AVX1-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm2, %xmm8
 ; AVX1-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm3, %xmm9
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm0[3,1,2,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm4, %xmm10
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm6[1,1,3,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm5, %xmm11
+; AVX1-NEXT:    vpermilpd {{.*#+}} xmm7 = xmm6[1,0]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm7, %xmm12
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm2 = xmm6[3,1,2,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm2, %xmm13
+; AVX1-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm3, %xmm14
+; AVX1-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm4, %xmm15
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm5 = xmm1[3,1,2,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; AVX1-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm7[1,1,3,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; AVX1-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm7[1,0]
 ; AVX1-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm4 = xmm7[3,1,2,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
 ; AVX1-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX1-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
 ; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX1-NEXT:    movw %ax, 20(%rdi)
-; AVX1-NEXT:    vmovd %xmm1, %eax
-; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm2[3,1,2,3]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX1-NEXT:    movw %ax, 18(%rdi)
-; AVX1-NEXT:    vmovd %xmm1, %eax
-; AVX1-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX1-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; AVX1-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX1-NEXT:    movw %ax, 14(%rdi)
-; AVX1-NEXT:    vmovd %xmm2, %eax
-; AVX1-NEXT:    movw %ax, 12(%rdi)
-; AVX1-NEXT:    vmovd %xmm1, %eax
-; AVX1-NEXT:    movw %ax, 10(%rdi)
-; AVX1-NEXT:    vmovd %xmm0, %eax
-; AVX1-NEXT:    movw %ax, 6(%rdi)
-; AVX1-NEXT:    vmovd %xmm3, %eax
-; AVX1-NEXT:    movw %ax, 4(%rdi)
-; AVX1-NEXT:    vmovd %xmm4, %eax
-; AVX1-NEXT:    movw %ax, 2(%rdi)
+; AVX1-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; AVX1-NEXT:    vpextrw $0, %xmm7, 24(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm1, 16(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm6, 8(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm0, (%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm4, 30(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm3, 28(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm2, 26(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm5, 22(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm15, 20(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm14, 18(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm13, 14(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm12, 12(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm11, 10(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm10, 6(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm9, 4(%rdi)
+; AVX1-NEXT:    vpextrw $0, %xmm8, 2(%rdi)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: store_cvt_16f32_to_16i16:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX2-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX2-NEXT:    vcvtps2ph $4, %xmm3, %xmm4
-; AVX2-NEXT:    vmovd %xmm4, %eax
-; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm4
-; AVX2-NEXT:    movw %ax, 24(%rdi)
-; AVX2-NEXT:    vmovd %xmm4, %eax
-; AVX2-NEXT:    vcvtps2ph $4, %xmm2, %xmm4
-; AVX2-NEXT:    movw %ax, 16(%rdi)
-; AVX2-NEXT:    vmovd %xmm4, %eax
-; AVX2-NEXT:    vcvtps2ph $4, %xmm0, %xmm4
-; AVX2-NEXT:    movw %ax, 8(%rdi)
-; AVX2-NEXT:    vmovd %xmm4, %eax
-; AVX2-NEXT:    vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; AVX2-NEXT:    movw %ax, (%rdi)
-; AVX2-NEXT:    vmovd %xmm4, %eax
-; AVX2-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; AVX2-NEXT:    movw %ax, 30(%rdi)
-; AVX2-NEXT:    vmovd %xmm4, %eax
-; AVX2-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; AVX2-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; AVX2-NEXT:    movw %ax, 28(%rdi)
-; AVX2-NEXT:    vmovd %xmm3, %eax
-; AVX2-NEXT:    vpermilps {{.*#+}} xmm3 = xmm1[3,1,2,3]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; AVX2-NEXT:    movw %ax, 26(%rdi)
-; AVX2-NEXT:    vmovd %xmm3, %eax
-; AVX2-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; AVX2-NEXT:    movw %ax, 22(%rdi)
-; AVX2-NEXT:    vmovd %xmm3, %eax
+; AVX2-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm2, %xmm8
 ; AVX2-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm3, %xmm9
+; AVX2-NEXT:    vpermilps {{.*#+}} xmm4 = xmm0[3,1,2,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm4, %xmm10
+; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX2-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm6[1,1,3,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm5, %xmm11
+; AVX2-NEXT:    vpermilpd {{.*#+}} xmm7 = xmm6[1,0]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm7, %xmm12
+; AVX2-NEXT:    vpermilps {{.*#+}} xmm2 = xmm6[3,1,2,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm2, %xmm13
+; AVX2-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm3, %xmm14
+; AVX2-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm4, %xmm15
+; AVX2-NEXT:    vpermilps {{.*#+}} xmm5 = xmm1[3,1,2,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; AVX2-NEXT:    vextractf128 $1, %ymm1, %xmm7
+; AVX2-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm7[1,1,3,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; AVX2-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm7[1,0]
 ; AVX2-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX2-NEXT:    vpermilps {{.*#+}} xmm4 = xmm7[3,1,2,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
 ; AVX2-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX2-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
 ; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX2-NEXT:    movw %ax, 20(%rdi)
-; AVX2-NEXT:    vmovd %xmm1, %eax
-; AVX2-NEXT:    vpermilps {{.*#+}} xmm1 = xmm2[3,1,2,3]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX2-NEXT:    movw %ax, 18(%rdi)
-; AVX2-NEXT:    vmovd %xmm1, %eax
-; AVX2-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX2-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; AVX2-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX2-NEXT:    movw %ax, 14(%rdi)
-; AVX2-NEXT:    vmovd %xmm2, %eax
-; AVX2-NEXT:    movw %ax, 12(%rdi)
-; AVX2-NEXT:    vmovd %xmm1, %eax
-; AVX2-NEXT:    movw %ax, 10(%rdi)
-; AVX2-NEXT:    vmovd %xmm0, %eax
-; AVX2-NEXT:    movw %ax, 6(%rdi)
-; AVX2-NEXT:    vmovd %xmm3, %eax
-; AVX2-NEXT:    movw %ax, 4(%rdi)
-; AVX2-NEXT:    vmovd %xmm4, %eax
-; AVX2-NEXT:    movw %ax, 2(%rdi)
+; AVX2-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; AVX2-NEXT:    vpextrw $0, %xmm7, 24(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm1, 16(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm6, 8(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm0, (%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm4, 30(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm3, 28(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm2, 26(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm5, 22(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm15, 20(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm14, 18(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm13, 14(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm12, 12(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm11, 10(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm10, 6(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm9, 4(%rdi)
+; AVX2-NEXT:    vpextrw $0, %xmm8, 2(%rdi)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: store_cvt_16f32_to_16i16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX512-NEXT:    vextractf64x4 $1, %zmm0, %ymm2
-; AVX512-NEXT:    vextractf128 $1, %ymm2, %xmm3
-; AVX512-NEXT:    vcvtps2ph $4, %xmm3, %xmm4
-; AVX512-NEXT:    vmovd %xmm4, %eax
-; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm4
-; AVX512-NEXT:    movw %ax, 24(%rdi)
-; AVX512-NEXT:    vmovd %xmm4, %eax
-; AVX512-NEXT:    vcvtps2ph $4, %xmm1, %xmm4
-; AVX512-NEXT:    movw %ax, 16(%rdi)
-; AVX512-NEXT:    vmovd %xmm4, %eax
-; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm4
-; AVX512-NEXT:    movw %ax, 8(%rdi)
-; AVX512-NEXT:    vmovd %xmm4, %eax
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; AVX512-NEXT:    movw %ax, (%rdi)
-; AVX512-NEXT:    vmovd %xmm4, %eax
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; AVX512-NEXT:    movw %ax, 30(%rdi)
-; AVX512-NEXT:    vmovd %xmm4, %eax
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; AVX512-NEXT:    movw %ax, 28(%rdi)
-; AVX512-NEXT:    vmovd %xmm3, %eax
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm3 = xmm2[3,1,2,3]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; AVX512-NEXT:    movw %ax, 26(%rdi)
-; AVX512-NEXT:    vmovd %xmm3, %eax
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; AVX512-NEXT:    movw %ax, 22(%rdi)
-; AVX512-NEXT:    vmovd %xmm3, %eax
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX512-NEXT:    movw %ax, 20(%rdi)
-; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
-; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX512-NEXT:    movw %ax, 18(%rdi)
-; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm1, %xmm9
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm10
+; AVX512-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm3, %xmm11
+; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm6[1,1,3,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm4, %xmm12
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm6[1,0]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm5, %xmm13
+; AVX512-NEXT:    vpermilps {{.*#+}} xmm7 = xmm6[3,1,2,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm7, %xmm14
+; AVX512-NEXT:    vextractf64x4 $1, %zmm0, %ymm8
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm8[1,1,3,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm1, %xmm15
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm8[1,0]
 ; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512-NEXT:    vpermilps {{.*#+}} xmm3 = xmm8[3,1,2,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX512-NEXT:    vextractf128 $1, %ymm8, %xmm4
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; AVX512-NEXT:    vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm7, %xmm7
+; AVX512-NEXT:    vpermilps {{.*#+}} xmm1 = xmm4[3,1,2,3]
 ; AVX512-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
-; AVX512-NEXT:    movw %ax, 14(%rdi)
-; AVX512-NEXT:    vmovd %xmm1, %eax
-; AVX512-NEXT:    movw %ax, 12(%rdi)
-; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    movw %ax, 10(%rdi)
-; AVX512-NEXT:    vmovd %xmm0, %eax
-; AVX512-NEXT:    movw %ax, 6(%rdi)
-; AVX512-NEXT:    vmovd %xmm3, %eax
-; AVX512-NEXT:    movw %ax, 4(%rdi)
-; AVX512-NEXT:    vmovd %xmm4, %eax
-; AVX512-NEXT:    movw %ax, 2(%rdi)
+; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT:    vcvtps2ph $4, %xmm6, %xmm6
+; AVX512-NEXT:    vcvtps2ph $4, %xmm8, %xmm8
+; AVX512-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; AVX512-NEXT:    vpextrw $0, %xmm4, 24(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm8, 16(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm6, 8(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm0, (%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm1, 30(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm7, 28(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm5, 26(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm3, 22(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm2, 20(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm15, 18(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm14, 14(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm13, 12(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm12, 10(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm11, 6(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm10, 4(%rdi)
+; AVX512-NEXT:    vpextrw $0, %xmm9, 2(%rdi)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %1 = fptrunc <16 x float> %a0 to <16 x half>


        


More information about the llvm-commits mailing list