[llvm] r277932 - [AVX-512] Add AVX512 run line to a test and re-generate the checks. Future commits will refine some of the sequences.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Aug 6 12:31:47 PDT 2016


Author: ctopper
Date: Sat Aug  6 14:31:47 2016
New Revision: 277932

URL: http://llvm.org/viewvc/llvm-project?rev=277932&view=rev
Log:
[AVX-512] Add AVX512 run line to a test and re-generate the checks. Future commits will refine some of the sequences.

Modified:
    llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll

Modified: llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll?rev=277932&r1=277931&r2=277932&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll Sat Aug  6 14:31:47 2016
@@ -1,6 +1,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=VEX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=VEX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512
 ;
 ; 32-bit tests to make sure we're not doing anything stupid.
 ; RUN: llc < %s -mtriple=i686-unknown-unknown
@@ -24,15 +25,24 @@ define <2 x double> @sitofp_2i64_to_2f64
 ; SSE-NEXT:    movapd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: sitofp_2i64_to_2f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm1
-; AVX-NEXT:    vmovq %xmm0, %rax
-; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; AVX-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm0
-; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX-NEXT:    retq
+; VEX-LABEL: sitofp_2i64_to_2f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpextrq $1, %xmm0, %rax
+; VEX-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm1
+; VEX-NEXT:    vmovq %xmm0, %rax
+; VEX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; VEX-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm0
+; VEX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_2i64_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512-NEXT:    retq
   %cvt = sitofp <2 x i64> %a to <2 x double>
   ret <2 x double> %cvt
 }
@@ -58,12 +68,18 @@ define <2 x double> @sitofp_4i32_to_2f64
 ; SSE-NEXT:    cvtdq2pd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: sitofp_4i32_to_2f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX-NEXT:    # kill
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; VEX-LABEL: sitofp_4i32_to_2f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    vcvtdq2pd %xmm0, %ymm0
+; VEX-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; VEX-NEXT:    vzeroupper
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_4i32_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vcvtdq2pd %xmm0, %ymm0
+; AVX512-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = sitofp <4 x i32> %a to <4 x double>
   %shuf = shufflevector <4 x double> %cvt, <4 x double> undef, <2 x i32> <i32 0, i32 1>
   ret <2 x double> %shuf
@@ -99,7 +115,7 @@ define <2 x double> @sitofp_8i16_to_2f64
 ; AVX1:       # BB#0:
 ; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm0
 ; AVX1-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT:    # kill
+; AVX1-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -107,9 +123,16 @@ define <2 x double> @sitofp_8i16_to_2f64
 ; AVX2:       # BB#0:
 ; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; AVX2-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT:    # kill
+; AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_8i16_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovsxwd %xmm0, %ymm0
+; AVX512-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; AVX512-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = sitofp <8 x i16> %a to <8 x double>
   %shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <2 x i32> <i32 0, i32 1>
   ret <2 x double> %shuf
@@ -147,7 +170,7 @@ define <2 x double> @sitofp_16i8_to_2f64
 ; AVX1:       # BB#0:
 ; AVX1-NEXT:    vpmovsxbd %xmm0, %xmm0
 ; AVX1-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT:    # kill
+; AVX1-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -156,9 +179,16 @@ define <2 x double> @sitofp_16i8_to_2f64
 ; AVX2-NEXT:    vpmovsxbw %xmm0, %ymm0
 ; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; AVX2-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT:    # kill
+; AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_16i8_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovsxbd %xmm0, %zmm0
+; AVX512-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; AVX512-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = sitofp <16 x i8> %a to <16 x double>
   %shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <2 x i32> <i32 0, i32 1>
   ret <2 x double> %shuf
@@ -218,6 +248,22 @@ define <4 x double> @sitofp_4i64_to_4f64
 ; AVX2-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_4i64_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vextracti32x4 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX512-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm1, %rax
+; AVX512-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX512-NEXT:    vinsertf32x4 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %cvt = sitofp <4 x i64> %a to <4 x double>
   ret <4 x double> %cvt
 }
@@ -280,6 +326,13 @@ define <4 x double> @sitofp_8i16_to_4f64
 ; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; AVX2-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_8i16_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovsxwd %xmm0, %ymm0
+; AVX512-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; AVX512-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = sitofp <8 x i16> %a to <8 x double>
   %shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x double> %shuf
@@ -329,6 +382,13 @@ define <4 x double> @sitofp_16i8_to_4f64
 ; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; AVX2-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_16i8_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovsxbd %xmm0, %zmm0
+; AVX512-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; AVX512-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = sitofp <16 x i8> %a to <16 x double>
   %shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x double> %shuf
@@ -355,19 +415,28 @@ define <2 x double> @uitofp_2i64_to_2f64
 ; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_2i64_to_2f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
-; AVX-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX-NEXT:    vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25]
-; AVX-NEXT:    vsubpd %xmm3, %xmm2, %xmm2
-; AVX-NEXT:    vhaddpd %xmm2, %xmm2, %xmm2
-; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX-NEXT:    vsubpd %xmm3, %xmm0, %xmm0
-; AVX-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
-; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0]
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_2i64_to_2f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
+; VEX-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; VEX-NEXT:    vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25]
+; VEX-NEXT:    vsubpd %xmm3, %xmm2, %xmm2
+; VEX-NEXT:    vhaddpd %xmm2, %xmm2, %xmm2
+; VEX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; VEX-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; VEX-NEXT:    vsubpd %xmm3, %xmm0, %xmm0
+; VEX-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; VEX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_2i64_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512-NEXT:    retq
   %cvt = uitofp <2 x i64> %a to <2 x double>
   ret <2 x double> %cvt
 }
@@ -391,20 +460,30 @@ define <2 x double> @uitofp_2i32_to_2f64
 ; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_2i32_to_2f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
-; AVX-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX-NEXT:    vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25]
-; AVX-NEXT:    vsubpd %xmm3, %xmm2, %xmm2
-; AVX-NEXT:    vhaddpd %xmm2, %xmm2, %xmm2
-; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX-NEXT:    vsubpd %xmm3, %xmm0, %xmm0
-; AVX-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
-; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0]
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_2i32_to_2f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; VEX-NEXT:    vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
+; VEX-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; VEX-NEXT:    vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25]
+; VEX-NEXT:    vsubpd %xmm3, %xmm2, %xmm2
+; VEX-NEXT:    vhaddpd %xmm2, %xmm2, %xmm2
+; VEX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; VEX-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; VEX-NEXT:    vsubpd %xmm3, %xmm0, %xmm0
+; VEX-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; VEX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_2i32_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512-NEXT:    retq
   %shuf = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
   %cvt = uitofp <2 x i32> %shuf to <2 x double>
   ret <2 x double> %cvt
@@ -437,7 +516,7 @@ define <2 x double> @uitofp_4i32_to_2f64
 ; AVX1-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; AVX1-NEXT:    vmulpd {{.*}}(%rip), %ymm0, %ymm0
 ; AVX1-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
-; AVX1-NEXT:    # kill
+; AVX1-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -451,9 +530,15 @@ define <2 x double> @uitofp_4i32_to_2f64
 ; AVX2-NEXT:    vpand %xmm2, %xmm0, %xmm0
 ; AVX2-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; AVX2-NEXT:    vaddpd %ymm0, %ymm1, %ymm0
-; AVX2-NEXT:    # kill
+; AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_4i32_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vcvtudq2pd %xmm0, %ymm0
+; AVX512-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = uitofp <4 x i32> %a to <4 x double>
   %shuf = shufflevector <4 x double> %cvt, <4 x double> undef, <2 x i32> <i32 0, i32 1>
   ret <2 x double> %shuf
@@ -489,7 +574,7 @@ define <2 x double> @uitofp_8i16_to_2f64
 ; AVX1:       # BB#0:
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX1-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT:    # kill
+; AVX1-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -497,9 +582,16 @@ define <2 x double> @uitofp_8i16_to_2f64
 ; AVX2:       # BB#0:
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT:    # kill
+; AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_8i16_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; AVX512-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = uitofp <8 x i16> %a to <8 x double>
   %shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <2 x i32> <i32 0, i32 1>
   ret <2 x double> %shuf
@@ -537,7 +629,7 @@ define <2 x double> @uitofp_16i8_to_2f64
 ; AVX1:       # BB#0:
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; AVX1-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT:    # kill
+; AVX1-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -546,9 +638,16 @@ define <2 x double> @uitofp_16i8_to_2f64
 ; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT:    # kill
+; AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_16i8_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; AVX512-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = uitofp <16 x i8> %a to <16 x double>
   %shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <2 x i32> <i32 0, i32 1>
   ret <2 x double> %shuf
@@ -628,6 +727,22 @@ define <4 x double> @uitofp_4i64_to_4f64
 ; AVX2-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm3[0],xmm0[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_4i64_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vextracti32x4 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm1, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX512-NEXT:    vinsertf32x4 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %cvt = uitofp <4 x i64> %a to <4 x double>
   ret <4 x double> %cvt
 }
@@ -684,6 +799,11 @@ define <4 x double> @uitofp_4i32_to_4f64
 ; AVX2-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; AVX2-NEXT:    vaddpd %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_4i32_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vcvtudq2pd %xmm0, %ymm0
+; AVX512-NEXT:    retq
   %cvt = uitofp <4 x i32> %a to <4 x double>
   ret <4 x double> %cvt
 }
@@ -699,11 +819,17 @@ define <4 x double> @uitofp_4i16_to_4f64
 ; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_4i16_to_4f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_4i16_to_4f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; VEX-NEXT:    vcvtdq2pd %xmm0, %ymm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_4i16_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT:    vcvtudq2pd %xmm0, %ymm0
+; AVX512-NEXT:    retq
   %shuf = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %cvt = uitofp <4 x i16> %shuf to <4 x double>
   ret <4 x double> %cvt
@@ -731,6 +857,13 @@ define <4 x double> @uitofp_8i16_to_4f64
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_8i16_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; AVX512-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = uitofp <8 x i16> %a to <8 x double>
   %shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x double> %shuf
@@ -748,11 +881,17 @@ define <4 x double> @uitofp_4i8_to_4f64(
 ; SSE-NEXT:    movaps %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_4i8_to_4f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_4i8_to_4f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; VEX-NEXT:    vcvtdq2pd %xmm0, %ymm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_4i8_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512-NEXT:    vcvtudq2pd %xmm0, %ymm0
+; AVX512-NEXT:    retq
   %shuf = shufflevector <16 x i8> %a, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %cvt = uitofp <4 x i8> %shuf to <4 x double>
   ret <4 x double> %cvt
@@ -782,6 +921,13 @@ define <4 x double> @uitofp_16i8_to_4f64
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_16i8_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; AVX512-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = uitofp <16 x i8> %a to <16 x double>
   %shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x double> %shuf
@@ -804,18 +950,30 @@ define <4 x float> @sitofp_2i64_to_4f32(
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: sitofp_2i64_to_4f32:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
-; AVX-NEXT:    vmovq %xmm0, %rax
-; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
-; AVX-NEXT:    retq
+; VEX-LABEL: sitofp_2i64_to_4f32:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpextrq $1, %xmm0, %rax
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; VEX-NEXT:    vmovq %xmm0, %rax
+; VEX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
+; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_2i64_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; AVX512-NEXT:    retq
   %cvt = sitofp <2 x i64> %a to <2 x float>
   %ext = shufflevector <2 x float> %cvt, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   ret <4 x float> %ext
@@ -837,18 +995,30 @@ define <4 x float> @sitofp_4i64_to_4f32_
 ; SSE-NEXT:    movaps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: sitofp_4i64_to_4f32_undef:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
-; AVX-NEXT:    vmovq %xmm0, %rax
-; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
-; AVX-NEXT:    retq
+; VEX-LABEL: sitofp_4i64_to_4f32_undef:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpextrq $1, %xmm0, %rax
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; VEX-NEXT:    vmovq %xmm0, %rax
+; VEX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
+; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_4i64_to_4f32_undef:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; AVX512-NEXT:    retq
   %ext = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %cvt = sitofp <4 x i64> %ext to <4 x float>
   ret <4 x float> %cvt
@@ -901,7 +1071,7 @@ define <4 x float> @sitofp_8i16_to_4f32(
 ; AVX1-NEXT:    vpmovsxwd %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT:    # kill
+; AVX1-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -909,9 +1079,16 @@ define <4 x float> @sitofp_8i16_to_4f32(
 ; AVX2:       # BB#0:
 ; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT:    # kill
+; AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_8i16_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovsxwd %xmm0, %ymm0
+; AVX512-NEXT:    vcvtdq2ps %ymm0, %ymm0
+; AVX512-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = sitofp <8 x i16> %a to <8 x float>
   %shuf = shufflevector <8 x float> %cvt, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x float> %shuf
@@ -952,7 +1129,7 @@ define <4 x float> @sitofp_16i8_to_4f32(
 ; AVX1-NEXT:    vpmovsxbd %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT:    # kill
+; AVX1-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -961,9 +1138,16 @@ define <4 x float> @sitofp_16i8_to_4f32(
 ; AVX2-NEXT:    vpmovsxbw %xmm0, %ymm0
 ; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT:    # kill
+; AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_16i8_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovsxbd %xmm0, %zmm0
+; AVX512-NEXT:    vcvtdq2ps %zmm0, %zmm0
+; AVX512-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = sitofp <16 x i8> %a to <16 x float>
   %shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x float> %shuf
@@ -1025,6 +1209,22 @@ define <4 x float> @sitofp_4i64_to_4f32(
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_4i64_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX512-NEXT:    vextracti32x4 $1, %ymm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512-NEXT:    retq
   %cvt = sitofp <4 x i64> %a to <4 x float>
   ret <4 x float> %cvt
 }
@@ -1070,6 +1270,12 @@ define <8 x float> @sitofp_8i16_to_8f32(
 ; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_8i16_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovsxwd %xmm0, %ymm0
+; AVX512-NEXT:    vcvtdq2ps %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %cvt = sitofp <8 x i16> %a to <8 x float>
   ret <8 x float> %cvt
 }
@@ -1103,6 +1309,12 @@ define <8 x float> @sitofp_8i8_to_8f32(<
 ; AVX2-NEXT:    vpmovsxbd %xmm0, %ymm0
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_8i8_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovsxbd %xmm0, %ymm0
+; AVX512-NEXT:    vcvtdq2ps %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shuf = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %cvt = sitofp <8 x i8> %shuf to <8 x float>
   ret <8 x float> %cvt
@@ -1138,6 +1350,13 @@ define <8 x float> @sitofp_16i8_to_8f32(
 ; AVX2-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_16i8_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovsxbd %xmm0, %zmm0
+; AVX512-NEXT:    vcvtdq2ps %zmm0, %zmm0
+; AVX512-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = sitofp <16 x i8> %a to <16 x float>
   %shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <8 x float> %shuf
@@ -1187,48 +1406,60 @@ define <4 x float> @uitofp_2i64_to_4f32(
 ; SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_2i64_to_4f32:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX-NEXT:    movl %eax, %ecx
-; AVX-NEXT:    andl $1, %ecx
-; AVX-NEXT:    testq %rax, %rax
-; AVX-NEXT:    js .LBB38_1
-; AVX-NEXT:  # BB#2:
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
-; AVX-NEXT:    jmp .LBB38_3
-; AVX-NEXT:  .LBB38_1:
-; AVX-NEXT:    shrq %rax
-; AVX-NEXT:    orq %rax, %rcx
-; AVX-NEXT:    vcvtsi2ssq %rcx, %xmm0, %xmm1
-; AVX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
-; AVX-NEXT:  .LBB38_3:
-; AVX-NEXT:    vmovq %xmm0, %rax
-; AVX-NEXT:    movl %eax, %ecx
-; AVX-NEXT:    andl $1, %ecx
-; AVX-NEXT:    testq %rax, %rax
-; AVX-NEXT:    js .LBB38_4
-; AVX-NEXT:  # BB#5:
-; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
-; AVX-NEXT:    jmp .LBB38_6
-; AVX-NEXT:  .LBB38_4:
-; AVX-NEXT:    shrq %rax
-; AVX-NEXT:    orq %rax, %rcx
-; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; AVX-NEXT:    vcvtsi2ssq %rcx, %xmm0, %xmm0
-; AVX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
-; AVX-NEXT:  .LBB38_6:
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT:    testq %rax, %rax
-; AVX-NEXT:    js .LBB38_8
-; AVX-NEXT:  # BB#7:
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
-; AVX-NEXT:  .LBB38_8:
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_2i64_to_4f32:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpextrq $1, %xmm0, %rax
+; VEX-NEXT:    movl %eax, %ecx
+; VEX-NEXT:    andl $1, %ecx
+; VEX-NEXT:    testq %rax, %rax
+; VEX-NEXT:    js .LBB38_1
+; VEX-NEXT:  # BB#2:
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; VEX-NEXT:    jmp .LBB38_3
+; VEX-NEXT:  .LBB38_1:
+; VEX-NEXT:    shrq %rax
+; VEX-NEXT:    orq %rax, %rcx
+; VEX-NEXT:    vcvtsi2ssq %rcx, %xmm0, %xmm1
+; VEX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
+; VEX-NEXT:  .LBB38_3:
+; VEX-NEXT:    vmovq %xmm0, %rax
+; VEX-NEXT:    movl %eax, %ecx
+; VEX-NEXT:    andl $1, %ecx
+; VEX-NEXT:    testq %rax, %rax
+; VEX-NEXT:    js .LBB38_4
+; VEX-NEXT:  # BB#5:
+; VEX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
+; VEX-NEXT:    jmp .LBB38_6
+; VEX-NEXT:  .LBB38_4:
+; VEX-NEXT:    shrq %rax
+; VEX-NEXT:    orq %rax, %rcx
+; VEX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; VEX-NEXT:    vcvtsi2ssq %rcx, %xmm0, %xmm0
+; VEX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
+; VEX-NEXT:  .LBB38_6:
+; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; VEX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; VEX-NEXT:    testq %rax, %rax
+; VEX-NEXT:    js .LBB38_8
+; VEX-NEXT:  # BB#7:
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; VEX-NEXT:  .LBB38_8:
+; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_2i64_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; AVX512-NEXT:    retq
   %cvt = uitofp <2 x i64> %a to <2 x float>
   %ext = shufflevector <2 x float> %cvt, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   ret <4 x float> %ext
@@ -1283,48 +1514,60 @@ define <4 x float> @uitofp_4i64_to_4f32_
 ; SSE-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_4i64_to_4f32_undef:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX-NEXT:    movl %eax, %ecx
-; AVX-NEXT:    andl $1, %ecx
-; AVX-NEXT:    testq %rax, %rax
-; AVX-NEXT:    js .LBB39_1
-; AVX-NEXT:  # BB#2:
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
-; AVX-NEXT:    jmp .LBB39_3
-; AVX-NEXT:  .LBB39_1:
-; AVX-NEXT:    shrq %rax
-; AVX-NEXT:    orq %rax, %rcx
-; AVX-NEXT:    vcvtsi2ssq %rcx, %xmm0, %xmm1
-; AVX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
-; AVX-NEXT:  .LBB39_3:
-; AVX-NEXT:    vmovq %xmm0, %rax
-; AVX-NEXT:    movl %eax, %ecx
-; AVX-NEXT:    andl $1, %ecx
-; AVX-NEXT:    testq %rax, %rax
-; AVX-NEXT:    js .LBB39_4
-; AVX-NEXT:  # BB#5:
-; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
-; AVX-NEXT:    jmp .LBB39_6
-; AVX-NEXT:  .LBB39_4:
-; AVX-NEXT:    shrq %rax
-; AVX-NEXT:    orq %rax, %rcx
-; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; AVX-NEXT:    vcvtsi2ssq %rcx, %xmm0, %xmm0
-; AVX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
-; AVX-NEXT:  .LBB39_6:
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT:    testq %rax, %rax
-; AVX-NEXT:    js .LBB39_8
-; AVX-NEXT:  # BB#7:
-; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
-; AVX-NEXT:  .LBB39_8:
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_4i64_to_4f32_undef:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpextrq $1, %xmm0, %rax
+; VEX-NEXT:    movl %eax, %ecx
+; VEX-NEXT:    andl $1, %ecx
+; VEX-NEXT:    testq %rax, %rax
+; VEX-NEXT:    js .LBB39_1
+; VEX-NEXT:  # BB#2:
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; VEX-NEXT:    jmp .LBB39_3
+; VEX-NEXT:  .LBB39_1:
+; VEX-NEXT:    shrq %rax
+; VEX-NEXT:    orq %rax, %rcx
+; VEX-NEXT:    vcvtsi2ssq %rcx, %xmm0, %xmm1
+; VEX-NEXT:    vaddss %xmm1, %xmm1, %xmm1
+; VEX-NEXT:  .LBB39_3:
+; VEX-NEXT:    vmovq %xmm0, %rax
+; VEX-NEXT:    movl %eax, %ecx
+; VEX-NEXT:    andl $1, %ecx
+; VEX-NEXT:    testq %rax, %rax
+; VEX-NEXT:    js .LBB39_4
+; VEX-NEXT:  # BB#5:
+; VEX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
+; VEX-NEXT:    jmp .LBB39_6
+; VEX-NEXT:  .LBB39_4:
+; VEX-NEXT:    shrq %rax
+; VEX-NEXT:    orq %rax, %rcx
+; VEX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; VEX-NEXT:    vcvtsi2ssq %rcx, %xmm0, %xmm0
+; VEX-NEXT:    vaddss %xmm0, %xmm0, %xmm0
+; VEX-NEXT:  .LBB39_6:
+; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; VEX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; VEX-NEXT:    testq %rax, %rax
+; VEX-NEXT:    js .LBB39_8
+; VEX-NEXT:  # BB#7:
+; VEX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; VEX-NEXT:  .LBB39_8:
+; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; VEX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_4i64_to_4f32_undef:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; AVX512-NEXT:    retq
   %ext = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
   %cvt = uitofp <4 x i64> %ext to <4 x float>
   ret <4 x float> %cvt
@@ -1362,6 +1605,11 @@ define <4 x float> @uitofp_4i32_to_4f32(
 ; AVX2-NEXT:    vaddps %xmm2, %xmm0, %xmm0
 ; AVX2-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_4i32_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %cvt = uitofp <4 x i32> %a to <4 x float>
   ret <4 x float> %cvt
 }
@@ -1374,11 +1622,17 @@ define <4 x float> @uitofp_4i16_to_4f32(
 ; SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_4i16_to_4f32:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX-NEXT:    vcvtdq2ps %xmm0, %xmm0
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_4i16_to_4f32:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; VEX-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_4i16_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %shuf = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %cvt = uitofp <4 x i16> %shuf to <4 x float>
   ret <4 x float> %cvt
@@ -1399,7 +1653,7 @@ define <4 x float> @uitofp_8i16_to_4f32(
 ; AVX1-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX1-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT:    # kill
+; AVX1-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -1407,9 +1661,16 @@ define <4 x float> @uitofp_8i16_to_4f32(
 ; AVX2:       # BB#0:
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT:    # kill
+; AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_8i16_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT:    vcvtudq2ps %ymm0, %ymm0
+; AVX512-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = uitofp <8 x i16> %a to <8 x float>
   %shuf = shufflevector <8 x float> %cvt, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x float> %shuf
@@ -1424,11 +1685,17 @@ define <4 x float> @uitofp_4i8_to_4f32(<
 ; SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_4i8_to_4f32:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX-NEXT:    vcvtdq2ps %xmm0, %xmm0
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_4i8_to_4f32:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; VEX-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_4i8_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %shuf = shufflevector <16 x i8> %a, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   %cvt = uitofp <4 x i8> %shuf to <4 x float>
   ret <4 x float> %cvt
@@ -1450,7 +1717,7 @@ define <4 x float> @uitofp_16i8_to_4f32(
 ; AVX1-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT:    # kill
+; AVX1-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
@@ -1459,9 +1726,16 @@ define <4 x float> @uitofp_16i8_to_4f32(
 ; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT:    # kill
+; AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_16i8_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT:    vcvtudq2ps %zmm0, %zmm0
+; AVX512-NEXT:    # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = uitofp <16 x i8> %a to <16 x float>
   %shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x float> %shuf
@@ -1673,6 +1947,22 @@ define <4 x float> @uitofp_4i64_to_4f32(
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_4i64_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX512-NEXT:    vextracti32x4 $1, %ymm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512-NEXT:    retq
   %cvt = uitofp <4 x i64> %a to <4 x float>
   ret <4 x float> %cvt
 }
@@ -1723,6 +2013,11 @@ define <8 x float> @uitofp_8i32_to_8f32(
 ; AVX2-NEXT:    vaddps %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vaddps %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_8i32_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vcvtudq2ps %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %cvt = uitofp <8 x i32> %a to <8 x float>
   ret <8 x float> %cvt
 }
@@ -1753,6 +2048,12 @@ define <8 x float> @uitofp_8i16_to_8f32(
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_8i16_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512-NEXT:    vcvtudq2ps %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %cvt = uitofp <8 x i16> %a to <8 x float>
   ret <8 x float> %cvt
 }
@@ -1784,6 +2085,12 @@ define <8 x float> @uitofp_8i8_to_8f32(<
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_8i8_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX512-NEXT:    vcvtudq2ps %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shuf = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   %cvt = uitofp <8 x i8> %shuf to <8 x float>
   ret <8 x float> %cvt
@@ -1817,6 +2124,13 @@ define <8 x float> @uitofp_16i8_to_8f32(
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_16i8_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT:    vcvtudq2ps %zmm0, %zmm0
+; AVX512-NEXT:    # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT:    retq
   %cvt = uitofp <16 x i8> %a to <16 x float>
   %shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <8 x float> %shuf
@@ -1839,16 +2153,26 @@ define <2 x double> @sitofp_load_2i64_to
 ; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: sitofp_load_2i64_to_2f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm1
-; AVX-NEXT:    vmovq %xmm0, %rax
-; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; AVX-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm0
-; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX-NEXT:    retq
+; VEX-LABEL: sitofp_load_2i64_to_2f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    vmovdqa (%rdi), %xmm0
+; VEX-NEXT:    vpextrq $1, %xmm0, %rax
+; VEX-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm1
+; VEX-NEXT:    vmovq %xmm0, %rax
+; VEX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; VEX-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm0
+; VEX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_load_2i64_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovdqa64 (%rdi), %xmm0
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512-NEXT:    retq
   %ld = load <2 x i64>, <2 x i64> *%a
   %cvt = sitofp <2 x i64> %ld to <2 x double>
   ret <2 x double> %cvt
@@ -1860,10 +2184,17 @@ define <2 x double> @sitofp_load_2i32_to
 ; SSE-NEXT:    cvtdq2pd (%rdi), %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: sitofp_load_2i32_to_2f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vcvtdq2pd (%rdi), %xmm0
-; AVX-NEXT:    retq
+; VEX-LABEL: sitofp_load_2i32_to_2f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    vcvtdq2pd (%rdi), %xmm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_load_2i32_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX512-NEXT:    vcvtdq2pd %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %ld = load <2 x i32>, <2 x i32> *%a
   %cvt = sitofp <2 x i32> %ld to <2 x double>
   ret <2 x double> %cvt
@@ -1968,6 +2299,23 @@ define <4 x double> @sitofp_load_4i64_to
 ; AVX2-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_load_4i64_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovdqa64 (%rdi), %ymm0
+; AVX512-NEXT:    vextracti32x4 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX512-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm1, %rax
+; AVX512-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX512-NEXT:    vinsertf32x4 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %ld = load <4 x i64>, <4 x i64> *%a
   %cvt = sitofp <4 x i64> %ld to <4 x double>
   ret <4 x double> %cvt
@@ -2056,20 +2404,30 @@ define <2 x double> @uitofp_load_2i64_to
 ; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_load_2i64_to_2f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
-; AVX-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX-NEXT:    vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25]
-; AVX-NEXT:    vsubpd %xmm3, %xmm2, %xmm2
-; AVX-NEXT:    vhaddpd %xmm2, %xmm2, %xmm2
-; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX-NEXT:    vsubpd %xmm3, %xmm0, %xmm0
-; AVX-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
-; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0]
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_load_2i64_to_2f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    vmovdqa (%rdi), %xmm0
+; VEX-NEXT:    vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
+; VEX-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; VEX-NEXT:    vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25]
+; VEX-NEXT:    vsubpd %xmm3, %xmm2, %xmm2
+; VEX-NEXT:    vhaddpd %xmm2, %xmm2, %xmm2
+; VEX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; VEX-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; VEX-NEXT:    vsubpd %xmm3, %xmm0, %xmm0
+; VEX-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; VEX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_2i64_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovdqa64 (%rdi), %xmm0
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512-NEXT:    retq
   %ld = load <2 x i64>, <2 x i64> *%a
   %cvt = uitofp <2 x i64> %ld to <2 x double>
   ret <2 x double> %cvt
@@ -2095,20 +2453,32 @@ define <2 x double> @uitofp_load_2i32_to
 ; SSE-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_load_2i32_to_2f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
-; AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
-; AVX-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX-NEXT:    vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25]
-; AVX-NEXT:    vsubpd %xmm3, %xmm2, %xmm2
-; AVX-NEXT:    vhaddpd %xmm2, %xmm2, %xmm2
-; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; AVX-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX-NEXT:    vsubpd %xmm3, %xmm0, %xmm0
-; AVX-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
-; AVX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0]
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_load_2i32_to_2f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; VEX-NEXT:    vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0]
+; VEX-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; VEX-NEXT:    vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25]
+; VEX-NEXT:    vsubpd %xmm3, %xmm2, %xmm2
+; VEX-NEXT:    vhaddpd %xmm2, %xmm2, %xmm2
+; VEX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; VEX-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; VEX-NEXT:    vsubpd %xmm3, %xmm0, %xmm0
+; VEX-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; VEX-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_2i32_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
+; AVX512-NEXT:    vpxord %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512-NEXT:    retq
   %ld = load <2 x i32>, <2 x i32> *%a
   %cvt = uitofp <2 x i32> %ld to <2 x double>
   ret <2 x double> %cvt
@@ -2123,12 +2493,21 @@ define <2 x double> @uitofp_load_2i16_to
 ; SSE-NEXT:    cvtdq2pd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_load_2i16_to_2f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX-NEXT:    vcvtdq2pd %xmm0, %xmm0
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_load_2i16_to_2f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; VEX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; VEX-NEXT:    vcvtdq2pd %xmm0, %xmm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_2i16_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX512-NEXT:    vpxord %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3,4,5,6,7]
+; AVX512-NEXT:    vcvtdq2pd %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %ld = load <2 x i16>, <2 x i16> *%a
   %cvt = uitofp <2 x i16> %ld to <2 x double>
   ret <2 x double> %cvt
@@ -2145,13 +2524,20 @@ define <2 x double> @uitofp_load_2i8_to_
 ; SSE-NEXT:    cvtdq2pd %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_load_2i8_to_2f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    movzwl (%rdi), %eax
-; AVX-NEXT:    vmovd %eax, %xmm0
-; AVX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX-NEXT:    vcvtdq2pd %xmm0, %xmm0
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_load_2i8_to_2f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    movzwl (%rdi), %eax
+; VEX-NEXT:    vmovd %eax, %xmm0
+; VEX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; VEX-NEXT:    vcvtdq2pd %xmm0, %xmm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_2i8_to_2f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[u],zero,zero,zero,xmm0[u],zero,zero,zero
+; AVX512-NEXT:    vcvtdq2pd %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %ld = load <2 x i8>, <2 x i8> *%a
   %cvt = uitofp <2 x i8> %ld to <2 x double>
   ret <2 x double> %cvt
@@ -2235,6 +2621,23 @@ define <4 x double> @uitofp_load_4i64_to
 ; AVX2-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm3[0],xmm0[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_4i64_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovdqa64 (%rdi), %ymm0
+; AVX512-NEXT:    vextracti32x4 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm1, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2sdq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX512-NEXT:    vinsertf32x4 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %ld = load <4 x i64>, <4 x i64> *%a
   %cvt = uitofp <4 x i64> %ld to <4 x double>
   ret <4 x double> %cvt
@@ -2295,6 +2698,11 @@ define <4 x double> @uitofp_load_4i32_to
 ; AVX2-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; AVX2-NEXT:    vaddpd %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_4i32_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vcvtudq2pd (%rdi), %ymm0
+; AVX512-NEXT:    retq
   %ld = load <4 x i32>, <4 x i32> *%a
   %cvt = uitofp <4 x i32> %ld to <4 x double>
   ret <4 x double> %cvt
@@ -2311,11 +2719,17 @@ define <4 x double> @uitofp_load_4i16_to
 ; SSE-NEXT:    cvtdq2pd %xmm1, %xmm1
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_load_4i16_to_4f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_load_4i16_to_4f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; VEX-NEXT:    vcvtdq2pd %xmm0, %ymm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_4i16_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX512-NEXT:    vcvtudq2pd %xmm0, %ymm0
+; AVX512-NEXT:    retq
   %ld = load <4 x i16>, <4 x i16> *%a
   %cvt = uitofp <4 x i16> %ld to <4 x double>
   ret <4 x double> %cvt
@@ -2333,11 +2747,17 @@ define <4 x double> @uitofp_load_4i8_to_
 ; SSE-NEXT:    cvtdq2pd %xmm1, %xmm1
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_load_4i8_to_4f64:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_load_4i8_to_4f64:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; VEX-NEXT:    vcvtdq2pd %xmm0, %ymm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_4i8_to_4f64:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX512-NEXT:    vcvtudq2pd %xmm0, %ymm0
+; AVX512-NEXT:    retq
   %ld = load <4 x i8>, <4 x i8> *%a
   %cvt = uitofp <4 x i8> %ld to <4 x double>
   ret <4 x double> %cvt
@@ -2406,6 +2826,23 @@ define <4 x float> @sitofp_load_4i64_to_
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_load_4i64_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovdqa64 (%rdi), %ymm0
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX512-NEXT:    vextracti32x4 $1, %ymm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512-NEXT:    retq
   %ld = load <4 x i64>, <4 x i64> *%a
   %cvt = sitofp <4 x i64> %ld to <4 x float>
   ret <4 x float> %cvt
@@ -2569,6 +3006,37 @@ define <8 x float> @sitofp_load_8i64_to_
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_load_8i64_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
+; AVX512-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm1, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX512-NEXT:    vextracti32x4 $3, %zmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm2, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm3
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; AVX512-NEXT:    vpextrq $1, %xmm2, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm3
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
+; AVX512-NEXT:    vextracti32x4 $1, %zmm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm3
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
+; AVX512-NEXT:    vinsertf32x4 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %ld = load <8 x i64>, <8 x i64> *%a
   %cvt = sitofp <8 x i64> %ld to <8 x float>
   ret <8 x float> %cvt
@@ -2616,6 +3084,12 @@ define <8 x float> @sitofp_load_8i16_to_
 ; AVX2-NEXT:    vpmovsxwd (%rdi), %ymm0
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_load_8i16_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovsxwd (%rdi), %ymm0
+; AVX512-NEXT:    vcvtdq2ps %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %ld = load <8 x i16>, <8 x i16> *%a
   %cvt = sitofp <8 x i16> %ld to <8 x float>
   ret <8 x float> %cvt
@@ -2651,6 +3125,12 @@ define <8 x float> @sitofp_load_8i8_to_8
 ; AVX2-NEXT:    vpmovsxbd (%rdi), %ymm0
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: sitofp_load_8i8_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovsxbd (%rdi), %ymm0
+; AVX512-NEXT:    vcvtdq2ps %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %ld = load <8 x i8>, <8 x i8> *%a
   %cvt = sitofp <8 x i8> %ld to <8 x float>
   ret <8 x float> %cvt
@@ -2869,6 +3349,23 @@ define <4 x float> @uitofp_load_4i64_to_
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_4i64_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovdqa64 (%rdi), %ymm0
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX512-NEXT:    vextracti32x4 $1, %ymm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512-NEXT:    retq
   %ld = load <4 x i64>, <4 x i64> *%a
   %cvt = uitofp <4 x i64> %ld to <4 x float>
   ret <4 x float> %cvt
@@ -2909,6 +3406,11 @@ define <4 x float> @uitofp_load_4i32_to_
 ; AVX2-NEXT:    vaddps %xmm2, %xmm0, %xmm0
 ; AVX2-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_4i32_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vcvtudq2ps (%rdi), %xmm0
+; AVX512-NEXT:    retq
   %ld = load <4 x i32>, <4 x i32> *%a
   %cvt = uitofp <4 x i32> %ld to <4 x float>
   ret <4 x float> %cvt
@@ -2923,11 +3425,17 @@ define <4 x float> @uitofp_load_4i16_to_
 ; SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_load_4i16_to_4f32:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX-NEXT:    vcvtdq2ps %xmm0, %xmm0
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_load_4i16_to_4f32:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; VEX-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_4i16_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX512-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %ld = load <4 x i16>, <4 x i16> *%a
   %cvt = uitofp <4 x i16> %ld to <4 x float>
   ret <4 x float> %cvt
@@ -2943,11 +3451,17 @@ define <4 x float> @uitofp_load_4i8_to_4
 ; SSE-NEXT:    cvtdq2ps %xmm0, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: uitofp_load_4i8_to_4f32:
-; AVX:       # BB#0:
-; AVX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; AVX-NEXT:    vcvtdq2ps %xmm0, %xmm0
-; AVX-NEXT:    retq
+; VEX-LABEL: uitofp_load_4i8_to_4f32:
+; VEX:       # BB#0:
+; VEX-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; VEX-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; VEX-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_4i8_to_4f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; AVX512-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %ld = load <4 x i8>, <4 x i8> *%a
   %cvt = uitofp <4 x i8> %ld to <4 x float>
   ret <4 x float> %cvt
@@ -3349,6 +3863,37 @@ define <8 x float> @uitofp_load_8i64_to_
 ; AVX2-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
 ; AVX2-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_8i64_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
+; AVX512-NEXT:    vpextrq $1, %xmm1, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm1, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm1
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX512-NEXT:    vextracti32x4 $3, %zmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm2, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm3
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; AVX512-NEXT:    vpextrq $1, %xmm2, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm2
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm3
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
+; AVX512-NEXT:    vextracti32x4 $1, %zmm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm3
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
+; AVX512-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512-NEXT:    vcvtusi2ssq %rax, %xmm0, %xmm0
+; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
+; AVX512-NEXT:    vinsertf32x4 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %ld = load <8 x i64>, <8 x i64> *%a
   %cvt = uitofp <8 x i64> %ld to <8 x float>
   ret <8 x float> %cvt
@@ -3404,6 +3949,11 @@ define <8 x float> @uitofp_load_8i32_to_
 ; AVX2-NEXT:    vaddps %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vaddps %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_8i32_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vcvtudq2ps (%rdi), %ymm0
+; AVX512-NEXT:    retq
   %ld = load <8 x i32>, <8 x i32> *%a
   %cvt = uitofp <8 x i32> %ld to <8 x float>
   ret <8 x float> %cvt
@@ -3434,6 +3984,12 @@ define <8 x float> @uitofp_load_8i16_to_
 ; AVX2-NEXT:    vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_8i16_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX512-NEXT:    vcvtudq2ps %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %ld = load <8 x i16>, <8 x i16> *%a
   %cvt = uitofp <8 x i16> %ld to <8 x float>
   ret <8 x float> %cvt
@@ -3465,6 +4021,12 @@ define <8 x float> @uitofp_load_8i8_to_8
 ; AVX2-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
 ; AVX2-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: uitofp_load_8i8_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX512-NEXT:    vcvtudq2ps %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %ld = load <8 x i8>, <8 x i8> *%a
   %cvt = uitofp <8 x i8> %ld to <8 x float>
   ret <8 x float> %cvt
@@ -3511,6 +4073,14 @@ define void @aggregate_sitofp_8i16_to_8f
 ; AVX2-NEXT:    vmovaps %ymm0, (%rax)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: aggregate_sitofp_8i16_to_8f32:
+; AVX512:       # BB#0:
+; AVX512-NEXT:    movq 24(%rdi), %rax
+; AVX512-NEXT:    vpmovsxwd 8(%rdi), %ymm0
+; AVX512-NEXT:    vcvtdq2ps %ymm0, %ymm0
+; AVX512-NEXT:    vmovaps %ymm0, (%rax)
+; AVX512-NEXT:    retq
  %1 = load %Arguments, %Arguments* %a0, align 1
  %2 = extractvalue %Arguments %1, 1
  %3 = extractvalue %Arguments %1, 2




More information about the llvm-commits mailing list