[llvm] r291744 - [AVX-512] Add more varied avx512 feature command lines to the avx512-cvt.ll test to show some poor codegen examples.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 11 22:49:03 PST 2017


Author: ctopper
Date: Thu Jan 12 00:49:03 2017
New Revision: 291744

URL: http://llvm.org/viewvc/llvm-project?rev=291744&view=rev
Log:
[AVX-512] Add more varied avx512 feature command lines to the avx512-cvt.ll test to show some poor codegen examples.

We're definitely doing bad things when avx512vl is enabled without avx512dq. It looks like avx512vl/dq without avx512bw may also have some issues.

Modified:
    llvm/trunk/test/CodeGen/X86/avx512-cvt.ll

Modified: llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-cvt.ll?rev=291744&r1=291743&r2=291744&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-cvt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-cvt.ll Thu Jan 12 00:49:03 2017
@@ -1,6 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl  | FileCheck %s --check-prefix=ALL --check-prefix=KNL
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx  | FileCheck %s --check-prefix=ALL --check-prefix=SKX
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl  | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=NODQ --check-prefix=NOVLDQ --check-prefix=KNL
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx  | FileCheck %s --check-prefix=ALL --check-prefix=DQ --check-prefix=VL --check-prefix=VLDQ --check-prefix=VLBW --check-prefix=SKX
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512vl  | FileCheck %s --check-prefix=ALL --check-prefix=NODQ --check-prefix=VL --check-prefix=VLNODQ --check-prefix=VLNOBW --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512dq  | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=DQ --check-prefix=AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512bw  | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=NODQ --check-prefix=NOVLDQ --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512vl,avx512dq  | FileCheck %s --check-prefix=ALL --check-prefix=DQ --check-prefix=VL --check-prefix=VLDQ --check-prefix=VLNOBW --check-prefix=AVX512VLDQ
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx512vl,avx512bw  | FileCheck %s --check-prefix=ALL --check-prefix=NODQ --check-prefix=VL --check-prefix=VLNODQ --check-prefix=VLBW --check-prefix=AVX512VLBW
+
 
 define <16 x float> @sitof32(<16 x i32> %a) nounwind {
 ; ALL-LABEL: sitof32:
@@ -12,255 +18,304 @@ define <16 x float> @sitof32(<16 x i32>
 }
 
 define <8 x double> @sltof864(<8 x i64> %a) {
-; KNL-LABEL: sltof864:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
-; KNL-NEXT:    vpextrq $1, %xmm1, %rax
-; KNL-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
-; KNL-NEXT:    vmovq %xmm1, %rax
-; KNL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
-; KNL-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; KNL-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
-; KNL-NEXT:    vpextrq $1, %xmm2, %rax
-; KNL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm3
-; KNL-NEXT:    vmovq %xmm2, %rax
-; KNL-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
-; KNL-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; KNL-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; KNL-NEXT:    vextracti32x4 $1, %zmm0, %xmm2
-; KNL-NEXT:    vpextrq $1, %xmm2, %rax
-; KNL-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm3
-; KNL-NEXT:    vmovq %xmm2, %rax
-; KNL-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
-; KNL-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; KNL-NEXT:    vpextrq $1, %xmm0, %rax
-; KNL-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm3
-; KNL-NEXT:    vmovq %xmm0, %rax
-; KNL-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm0
-; KNL-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm3[0]
-; KNL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; KNL-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sltof864:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvtqq2pd %zmm0, %zmm0
-; SKX-NEXT:    retq
+; NODQ-LABEL: sltof864:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
+; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
+; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vmovq %xmm1, %rax
+; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
+; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
+; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vmovq %xmm2, %rax
+; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; NODQ-NEXT:    vextracti32x4 $1, %zmm0, %xmm2
+; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
+; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vmovq %xmm2, %rax
+; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vmovq %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm4, %xmm0
+; NODQ-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; NODQ-NEXT:    retq
+;
+; DQ-LABEL: sltof864:
+; DQ:       ## BB#0:
+; DQ-NEXT:    vcvtqq2pd %zmm0, %zmm0
+; DQ-NEXT:    retq
   %b = sitofp <8 x i64> %a to <8 x double>
   ret <8 x double> %b
 }
 
 define <4 x double> @sltof464(<4 x i64> %a) {
-; KNL-LABEL: sltof464:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; KNL-NEXT:    vpextrq $1, %xmm1, %rax
-; KNL-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
-; KNL-NEXT:    vmovq %xmm1, %rax
-; KNL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
-; KNL-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; KNL-NEXT:    vpextrq $1, %xmm0, %rax
-; KNL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
-; KNL-NEXT:    vmovq %xmm0, %rax
-; KNL-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
-; KNL-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; KNL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sltof464:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvtqq2pd %ymm0, %ymm0
-; SKX-NEXT:    retq
+; NODQ-LABEL: sltof464:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
+; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vmovq %xmm1, %rax
+; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm2
+; NODQ-NEXT:    vmovq %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2sdq %rax, %xmm3, %xmm0
+; NODQ-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; NODQ-NEXT:    retq
+;
+; VLDQ-LABEL: sltof464:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vcvtqq2pd %ymm0, %ymm0
+; VLDQ-NEXT:    retq
+;
+; AVX512DQ-LABEL: sltof464:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT:    vcvtqq2pd %zmm0, %zmm0
+; AVX512DQ-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT:    retq
   %b = sitofp <4 x i64> %a to <4 x double>
   ret <4 x double> %b
 }
 
 define <2 x float> @sltof2f32(<2 x i64> %a) {
-; KNL-LABEL: sltof2f32:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vpextrq $1, %xmm0, %rax
-; KNL-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
-; KNL-NEXT:    vmovq %xmm0, %rax
-; KNL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
-; KNL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; KNL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
-; KNL-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sltof2f32:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvtqq2ps %xmm0, %xmm0
-; SKX-NEXT:    retq
+; NODQ-LABEL: sltof2f32:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; NODQ-NEXT:    vmovq %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm1
+; NODQ-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,0]
+; NODQ-NEXT:    retq
+;
+; VLDQ-LABEL: sltof2f32:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vcvtqq2ps %xmm0, %xmm0
+; VLDQ-NEXT:    retq
+;
+; AVX512DQ-LABEL: sltof2f32:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT:    vcvtqq2ps %zmm0, %ymm0
+; AVX512DQ-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT:    retq
   %b = sitofp <2 x i64> %a to <2 x float>
   ret <2 x float>%b
 }
 
 define <4 x float> @sltof4f32_mem(<4 x i64>* %a) {
-; KNL-LABEL: sltof4f32_mem:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vmovdqu (%rdi), %ymm0
-; KNL-NEXT:    vpextrq $1, %xmm0, %rax
-; KNL-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
-; KNL-NEXT:    vmovq %xmm0, %rax
-; KNL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
-; KNL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vmovq %xmm0, %rax
-; KNL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
-; KNL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; KNL-NEXT:    vpextrq $1, %xmm0, %rax
-; KNL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
-; KNL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sltof4f32_mem:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvtqq2psy (%rdi), %xmm0
-; SKX-NEXT:    retq
+; NODQ-LABEL: sltof4f32_mem:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vmovdqu (%rdi), %ymm0
+; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; NODQ-NEXT:    vmovq %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; NODQ-NEXT:    vmovq %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; NODQ-NEXT:    retq
+;
+; VLDQ-LABEL: sltof4f32_mem:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vcvtqq2psy (%rdi), %xmm0
+; VLDQ-NEXT:    retq
+;
+; AVX512DQ-LABEL: sltof4f32_mem:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vmovups (%rdi), %ymm0
+; AVX512DQ-NEXT:    vcvtqq2ps %zmm0, %ymm0
+; AVX512DQ-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT:    retq
   %a1 = load <4 x i64>, <4 x i64>* %a, align 8
   %b = sitofp <4 x i64> %a1 to <4 x float>
   ret <4 x float>%b
 }
 
 define <4 x i64> @f64tosl(<4 x double> %a) {
-; KNL-LABEL: f64tosl:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; KNL-NEXT:    vcvttsd2si %xmm1, %rax
-; KNL-NEXT:    vmovq %rax, %xmm2
-; KNL-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; KNL-NEXT:    vcvttsd2si %xmm1, %rax
-; KNL-NEXT:    vmovq %rax, %xmm1
-; KNL-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; KNL-NEXT:    vcvttsd2si %xmm0, %rax
-; KNL-NEXT:    vmovq %rax, %xmm2
-; KNL-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; KNL-NEXT:    vcvttsd2si %xmm0, %rax
-; KNL-NEXT:    vmovq %rax, %xmm0
-; KNL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
-; KNL-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: f64tosl:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvttpd2qq %ymm0, %ymm0
-; SKX-NEXT:    retq
+; NODQ-LABEL: f64tosl:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; NODQ-NEXT:    vcvttsd2si %xmm1, %rax
+; NODQ-NEXT:    vmovq %rax, %xmm2
+; NODQ-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; NODQ-NEXT:    vcvttsd2si %xmm1, %rax
+; NODQ-NEXT:    vmovq %rax, %xmm1
+; NODQ-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; NODQ-NEXT:    vcvttsd2si %xmm0, %rax
+; NODQ-NEXT:    vmovq %rax, %xmm2
+; NODQ-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; NODQ-NEXT:    vcvttsd2si %xmm0, %rax
+; NODQ-NEXT:    vmovq %rax, %xmm0
+; NODQ-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; NODQ-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; NODQ-NEXT:    retq
+;
+; VLDQ-LABEL: f64tosl:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vcvttpd2qq %ymm0, %ymm0
+; VLDQ-NEXT:    retq
+;
+; AVX512DQ-LABEL: f64tosl:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT:    vcvttpd2qq %zmm0, %zmm0
+; AVX512DQ-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT:    retq
   %b = fptosi <4 x double> %a to <4 x i64>
   ret <4 x i64> %b
 }
 
 define <4 x i64> @f32tosl(<4 x float> %a) {
-; KNL-LABEL: f32tosl:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; KNL-NEXT:    vcvttss2si %xmm1, %rax
-; KNL-NEXT:    vmovq %rax, %xmm1
-; KNL-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; KNL-NEXT:    vcvttss2si %xmm2, %rax
-; KNL-NEXT:    vmovq %rax, %xmm2
-; KNL-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; KNL-NEXT:    vcvttss2si %xmm0, %rax
-; KNL-NEXT:    vmovq %rax, %xmm2
-; KNL-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; KNL-NEXT:    vcvttss2si %xmm0, %rax
-; KNL-NEXT:    vmovq %rax, %xmm0
-; KNL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
-; KNL-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: f32tosl:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvttps2qq %xmm0, %ymm0
-; SKX-NEXT:    retq
+; NODQ-LABEL: f32tosl:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; NODQ-NEXT:    vcvttss2si %xmm1, %rax
+; NODQ-NEXT:    vmovq %rax, %xmm1
+; NODQ-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; NODQ-NEXT:    vcvttss2si %xmm2, %rax
+; NODQ-NEXT:    vmovq %rax, %xmm2
+; NODQ-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; NODQ-NEXT:    vcvttss2si %xmm0, %rax
+; NODQ-NEXT:    vmovq %rax, %xmm2
+; NODQ-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; NODQ-NEXT:    vcvttss2si %xmm0, %rax
+; NODQ-NEXT:    vmovq %rax, %xmm0
+; NODQ-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; NODQ-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; NODQ-NEXT:    retq
+;
+; VLDQ-LABEL: f32tosl:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vcvttps2qq %xmm0, %ymm0
+; VLDQ-NEXT:    retq
+;
+; AVX512DQ-LABEL: f32tosl:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512DQ-NEXT:    vcvttps2qq %ymm0, %zmm0
+; AVX512DQ-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT:    retq
   %b = fptosi <4 x float> %a to <4 x i64>
   ret <4 x i64> %b
 }
 
 define <4 x float> @sltof432(<4 x i64> %a) {
-; KNL-LABEL: sltof432:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vpextrq $1, %xmm0, %rax
-; KNL-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
-; KNL-NEXT:    vmovq %xmm0, %rax
-; KNL-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
-; KNL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vmovq %xmm0, %rax
-; KNL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
-; KNL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; KNL-NEXT:    vpextrq $1, %xmm0, %rax
-; KNL-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
-; KNL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sltof432:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvtqq2ps %ymm0, %xmm0
-; SKX-NEXT:    retq
+; NODQ-LABEL: sltof432:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm1
+; NODQ-NEXT:    vmovq %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; NODQ-NEXT:    vmovq %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm2
+; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
+; NODQ-NEXT:    vcvtsi2ssq %rax, %xmm3, %xmm0
+; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; NODQ-NEXT:    retq
+;
+; VLDQ-LABEL: sltof432:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vcvtqq2ps %ymm0, %xmm0
+; VLDQ-NEXT:    retq
+;
+; AVX512DQ-LABEL: sltof432:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT:    vcvtqq2ps %zmm0, %ymm0
+; AVX512DQ-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT:    retq
   %b = sitofp <4 x i64> %a to <4 x float>
   ret <4 x float> %b
 }
 
 define <4 x float> @ultof432(<4 x i64> %a) {
-; KNL-LABEL: ultof432:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vpextrq $1, %xmm0, %rax
-; KNL-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
-; KNL-NEXT:    vmovq %xmm0, %rax
-; KNL-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
-; KNL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
-; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; KNL-NEXT:    vmovq %xmm0, %rax
-; KNL-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm2
-; KNL-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; KNL-NEXT:    vpextrq $1, %xmm0, %rax
-; KNL-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm0
-; KNL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: ultof432:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvtuqq2ps %ymm0, %xmm0
-; SKX-NEXT:    retq
+; NODQ-LABEL: ultof432:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
+; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm1, %xmm1
+; NODQ-NEXT:    vmovq %xmm0, %rax
+; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; NODQ-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; NODQ-NEXT:    vmovq %xmm0, %rax
+; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm2
+; NODQ-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
+; NODQ-NEXT:    vcvtusi2ssq %rax, %xmm3, %xmm0
+; NODQ-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; NODQ-NEXT:    retq
+;
+; VLDQ-LABEL: ultof432:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vcvtuqq2ps %ymm0, %xmm0
+; VLDQ-NEXT:    retq
+;
+; AVX512DQ-LABEL: ultof432:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT:    vcvtuqq2ps %zmm0, %ymm0
+; AVX512DQ-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT:    retq
   %b = uitofp <4 x i64> %a to <4 x float>
   ret <4 x float> %b
 }
 
 define <8 x double> @ultof64(<8 x i64> %a) {
-; KNL-LABEL: ultof64:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
-; KNL-NEXT:    vpextrq $1, %xmm1, %rax
-; KNL-NEXT:    vcvtusi2sdq %rax, %xmm2, %xmm2
-; KNL-NEXT:    vmovq %xmm1, %rax
-; KNL-NEXT:    vcvtusi2sdq %rax, %xmm3, %xmm1
-; KNL-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; KNL-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
-; KNL-NEXT:    vpextrq $1, %xmm2, %rax
-; KNL-NEXT:    vcvtusi2sdq %rax, %xmm3, %xmm3
-; KNL-NEXT:    vmovq %xmm2, %rax
-; KNL-NEXT:    vcvtusi2sdq %rax, %xmm4, %xmm2
-; KNL-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; KNL-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; KNL-NEXT:    vextracti32x4 $1, %zmm0, %xmm2
-; KNL-NEXT:    vpextrq $1, %xmm2, %rax
-; KNL-NEXT:    vcvtusi2sdq %rax, %xmm4, %xmm3
-; KNL-NEXT:    vmovq %xmm2, %rax
-; KNL-NEXT:    vcvtusi2sdq %rax, %xmm4, %xmm2
-; KNL-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; KNL-NEXT:    vpextrq $1, %xmm0, %rax
-; KNL-NEXT:    vcvtusi2sdq %rax, %xmm4, %xmm3
-; KNL-NEXT:    vmovq %xmm0, %rax
-; KNL-NEXT:    vcvtusi2sdq %rax, %xmm4, %xmm0
-; KNL-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm3[0]
-; KNL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; KNL-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: ultof64:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvtuqq2pd %zmm0, %zmm0
-; SKX-NEXT:    retq
+; NODQ-LABEL: ultof64:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
+; NODQ-NEXT:    vpextrq $1, %xmm1, %rax
+; NODQ-NEXT:    vcvtusi2sdq %rax, %xmm2, %xmm2
+; NODQ-NEXT:    vmovq %xmm1, %rax
+; NODQ-NEXT:    vcvtusi2sdq %rax, %xmm3, %xmm1
+; NODQ-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; NODQ-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
+; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
+; NODQ-NEXT:    vcvtusi2sdq %rax, %xmm3, %xmm3
+; NODQ-NEXT:    vmovq %xmm2, %rax
+; NODQ-NEXT:    vcvtusi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; NODQ-NEXT:    vextracti32x4 $1, %zmm0, %xmm2
+; NODQ-NEXT:    vpextrq $1, %xmm2, %rax
+; NODQ-NEXT:    vcvtusi2sdq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vmovq %xmm2, %rax
+; NODQ-NEXT:    vcvtusi2sdq %rax, %xmm4, %xmm2
+; NODQ-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; NODQ-NEXT:    vpextrq $1, %xmm0, %rax
+; NODQ-NEXT:    vcvtusi2sdq %rax, %xmm4, %xmm3
+; NODQ-NEXT:    vmovq %xmm0, %rax
+; NODQ-NEXT:    vcvtusi2sdq %rax, %xmm4, %xmm0
+; NODQ-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; NODQ-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; NODQ-NEXT:    retq
+;
+; DQ-LABEL: ultof64:
+; DQ:       ## BB#0:
+; DQ-NEXT:    vcvtuqq2pd %zmm0, %zmm0
+; DQ-NEXT:    retq
   %b = uitofp <8 x i64> %a to <8 x double>
   ret <8 x double> %b
 }
@@ -284,33 +339,33 @@ define <16 x i32> @fptoui00(<16 x float>
 }
 
 define <8 x i32> @fptoui_256(<8 x float> %a) nounwind {
-; KNL-LABEL: fptoui_256:
-; KNL:       ## BB#0:
-; KNL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; KNL-NEXT:    vcvttps2udq %zmm0, %zmm0
-; KNL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: fptoui_256:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvttps2udq %ymm0, %ymm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: fptoui_256:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NOVL-NEXT:    vcvttps2udq %zmm0, %zmm0
+; NOVL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NOVL-NEXT:    retq
+;
+; VL-LABEL: fptoui_256:
+; VL:       ## BB#0:
+; VL-NEXT:    vcvttps2udq %ymm0, %ymm0
+; VL-NEXT:    retq
   %b = fptoui <8 x float> %a to <8 x i32>
   ret <8 x i32> %b
 }
 
 define <4 x i32> @fptoui_128(<4 x float> %a) nounwind {
-; KNL-LABEL: fptoui_128:
-; KNL:       ## BB#0:
-; KNL-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; KNL-NEXT:    vcvttps2udq %zmm0, %zmm0
-; KNL-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: fptoui_128:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvttps2udq %xmm0, %xmm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: fptoui_128:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; NOVL-NEXT:    vcvttps2udq %zmm0, %zmm0
+; NOVL-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; NOVL-NEXT:    retq
+;
+; VL-LABEL: fptoui_128:
+; VL:       ## BB#0:
+; VL-NEXT:    vcvttps2udq %xmm0, %xmm0
+; VL-NEXT:    retq
   %b = fptoui <4 x float> %a to <4 x i32>
   ret <4 x i32> %b
 }
@@ -325,17 +380,17 @@ define <8 x i32> @fptoui01(<8 x double>
 }
 
 define <4 x i32> @fptoui_256d(<4 x double> %a) nounwind {
-; KNL-LABEL: fptoui_256d:
-; KNL:       ## BB#0:
-; KNL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; KNL-NEXT:    vcvttpd2udq %zmm0, %ymm0
-; KNL-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: fptoui_256d:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvttpd2udq %ymm0, %xmm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: fptoui_256d:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NOVL-NEXT:    vcvttpd2udq %zmm0, %ymm0
+; NOVL-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; NOVL-NEXT:    retq
+;
+; VL-LABEL: fptoui_256d:
+; VL:       ## BB#0:
+; VL-NEXT:    vcvttpd2udq %ymm0, %xmm0
+; VL-NEXT:    retq
   %b = fptoui <4 x double> %a to <4 x i32>
   ret <4 x i32> %b
 }
@@ -349,34 +404,34 @@ define <8 x double> @sitof64(<8 x i32> %
   ret <8 x double> %b
 }
 define <8 x double> @sitof64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwind {
-; KNL-LABEL: sitof64_mask:
-; KNL:       ## BB#0:
-; KNL-NEXT:    kmovw %edi, %k1
-; KNL-NEXT:    vcvtdq2pd %ymm1, %zmm0 {%k1}
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sitof64_mask:
-; SKX:       ## BB#0:
-; SKX-NEXT:    kmovb %edi, %k1
-; SKX-NEXT:    vcvtdq2pd %ymm1, %zmm0 {%k1}
-; SKX-NEXT:    retq
+; NODQ-LABEL: sitof64_mask:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    kmovw %edi, %k1
+; NODQ-NEXT:    vcvtdq2pd %ymm1, %zmm0 {%k1}
+; NODQ-NEXT:    retq
+;
+; DQ-LABEL: sitof64_mask:
+; DQ:       ## BB#0:
+; DQ-NEXT:    kmovb %edi, %k1
+; DQ-NEXT:    vcvtdq2pd %ymm1, %zmm0 {%k1}
+; DQ-NEXT:    retq
   %1 = bitcast i8 %c to <8 x i1>
   %2 = sitofp <8 x i32> %b to <8 x double>
   %3 = select <8 x i1> %1, <8 x double> %2, <8 x double> %a
   ret <8 x double> %3
 }
 define <8 x double> @sitof64_maskz(<8 x i32> %a, i8 %b) nounwind {
-; KNL-LABEL: sitof64_maskz:
-; KNL:       ## BB#0:
-; KNL-NEXT:    kmovw %edi, %k1
-; KNL-NEXT:    vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sitof64_maskz:
-; SKX:       ## BB#0:
-; SKX-NEXT:    kmovb %edi, %k1
-; SKX-NEXT:    vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
-; SKX-NEXT:    retq
+; NODQ-LABEL: sitof64_maskz:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    kmovw %edi, %k1
+; NODQ-NEXT:    vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
+; NODQ-NEXT:    retq
+;
+; DQ-LABEL: sitof64_maskz:
+; DQ:       ## BB#0:
+; DQ-NEXT:    kmovb %edi, %k1
+; DQ-NEXT:    vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
+; DQ-NEXT:    retq
   %1 = bitcast i8 %b to <8 x i1>
   %2 = sitofp <8 x i32> %a to <8 x double>
   %3 = select <8 x i1> %1, <8 x double> %2, <8 x double> zeroinitializer
@@ -402,19 +457,19 @@ define <4 x i32> @fptosi03(<4 x double>
 }
 
 define <16 x float> @fptrunc00(<16 x double> %b) nounwind {
-; KNL-LABEL: fptrunc00:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vcvtpd2ps %zmm0, %ymm0
-; KNL-NEXT:    vcvtpd2ps %zmm1, %ymm1
-; KNL-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: fptrunc00:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvtpd2ps %zmm0, %ymm0
-; SKX-NEXT:    vcvtpd2ps %zmm1, %ymm1
-; SKX-NEXT:    vinsertf32x8 $1, %ymm1, %zmm0, %zmm0
-; SKX-NEXT:    retq
+; NODQ-LABEL: fptrunc00:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vcvtpd2ps %zmm0, %ymm0
+; NODQ-NEXT:    vcvtpd2ps %zmm1, %ymm1
+; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; NODQ-NEXT:    retq
+;
+; DQ-LABEL: fptrunc00:
+; DQ:       ## BB#0:
+; DQ-NEXT:    vcvtpd2ps %zmm0, %ymm0
+; DQ-NEXT:    vcvtpd2ps %zmm1, %ymm1
+; DQ-NEXT:    vinsertf32x8 $1, %ymm1, %zmm0, %zmm0
+; DQ-NEXT:    retq
   %a = fptrunc <16 x double> %b to <16 x float>
   ret <16 x float> %a
 }
@@ -429,20 +484,20 @@ define <4 x float> @fptrunc01(<4 x doubl
 }
 
 define <4 x float> @fptrunc02(<4 x double> %b, <4 x i1> %mask) {
-; KNL-LABEL: fptrunc02:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vpslld $31, %xmm1, %xmm1
-; KNL-NEXT:    vpsrad $31, %xmm1, %xmm1
-; KNL-NEXT:    vcvtpd2ps %ymm0, %xmm0
-; KNL-NEXT:    vpand %xmm0, %xmm1, %xmm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: fptrunc02:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vpslld $31, %xmm1, %xmm1
-; SKX-NEXT:    vptestmd %xmm1, %xmm1, %k1
-; SKX-NEXT:    vcvtpd2ps %ymm0, %xmm0 {%k1} {z}
-; SKX-NEXT:    retq
+; NOVL-LABEL: fptrunc02:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    vpslld $31, %xmm1, %xmm1
+; NOVL-NEXT:    vpsrad $31, %xmm1, %xmm1
+; NOVL-NEXT:    vcvtpd2ps %ymm0, %xmm0
+; NOVL-NEXT:    vpand %xmm0, %xmm1, %xmm0
+; NOVL-NEXT:    retq
+;
+; VL-LABEL: fptrunc02:
+; VL:       ## BB#0:
+; VL-NEXT:    vpslld $31, %xmm1, %xmm1
+; VL-NEXT:    vptestmd %xmm1, %xmm1, %k1
+; VL-NEXT:    vcvtpd2ps %ymm0, %xmm0 {%k1} {z}
+; VL-NEXT:    retq
   %a = fptrunc <4 x double> %b to <4 x float>
   %c = select <4 x i1>%mask, <4 x float>%a, <4 x float> zeroinitializer
   ret <4 x float> %c
@@ -469,18 +524,18 @@ define <8 x double> @fpext00(<8 x float>
 }
 
 define <4 x double> @fpext01(<4 x float> %b, <4 x double>%b1, <4 x double>%a1) {
-; KNL-LABEL: fpext01:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vcvtps2pd %xmm0, %ymm0
-; KNL-NEXT:    vcmpltpd %ymm2, %ymm1, %ymm1
-; KNL-NEXT:    vandpd %ymm0, %ymm1, %ymm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: fpext01:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcmpltpd %ymm2, %ymm1, %k1
-; SKX-NEXT:    vcvtps2pd %xmm0, %ymm0 {%k1} {z}
-; SKX-NEXT:    retq
+; NOVL-LABEL: fpext01:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    vcvtps2pd %xmm0, %ymm0
+; NOVL-NEXT:    vcmpltpd %ymm2, %ymm1, %ymm1
+; NOVL-NEXT:    vandpd %ymm0, %ymm1, %ymm0
+; NOVL-NEXT:    retq
+;
+; VL-LABEL: fpext01:
+; VL:       ## BB#0:
+; VL-NEXT:    vcmpltpd %ymm2, %ymm1, %k1
+; VL-NEXT:    vcvtps2pd %xmm0, %ymm0 {%k1} {z}
+; VL-NEXT:    retq
   %a = fpext <4 x float> %b to <4 x double>
   %mask = fcmp ogt <4 x double>%a1, %b1
   %c = select <4 x i1>%mask,  <4 x double>%a, <4 x double>zeroinitializer
@@ -611,53 +666,53 @@ define i32 @float_to_int(float %x) {
 }
 
 define <16 x double> @uitof64(<16 x i32> %a) nounwind {
-; KNL-LABEL: uitof64:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vcvtudq2pd %ymm0, %zmm2
-; KNL-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; KNL-NEXT:    vcvtudq2pd %ymm0, %zmm1
-; KNL-NEXT:    vmovaps %zmm2, %zmm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: uitof64:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvtudq2pd %ymm0, %zmm2
-; SKX-NEXT:    vextracti32x8 $1, %zmm0, %ymm0
-; SKX-NEXT:    vcvtudq2pd %ymm0, %zmm1
-; SKX-NEXT:    vmovaps %zmm2, %zmm0
-; SKX-NEXT:    retq
+; NODQ-LABEL: uitof64:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vcvtudq2pd %ymm0, %zmm2
+; NODQ-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
+; NODQ-NEXT:    vcvtudq2pd %ymm0, %zmm1
+; NODQ-NEXT:    vmovaps %zmm2, %zmm0
+; NODQ-NEXT:    retq
+;
+; DQ-LABEL: uitof64:
+; DQ:       ## BB#0:
+; DQ-NEXT:    vcvtudq2pd %ymm0, %zmm2
+; DQ-NEXT:    vextracti32x8 $1, %zmm0, %ymm0
+; DQ-NEXT:    vcvtudq2pd %ymm0, %zmm1
+; DQ-NEXT:    vmovaps %zmm2, %zmm0
+; DQ-NEXT:    retq
   %b = uitofp <16 x i32> %a to <16 x double>
   ret <16 x double> %b
 }
 define <8 x double> @uitof64_mask(<8 x double> %a, <8 x i32> %b, i8 %c) nounwind {
-; KNL-LABEL: uitof64_mask:
-; KNL:       ## BB#0:
-; KNL-NEXT:    kmovw %edi, %k1
-; KNL-NEXT:    vcvtudq2pd %ymm1, %zmm0 {%k1}
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: uitof64_mask:
-; SKX:       ## BB#0:
-; SKX-NEXT:    kmovb %edi, %k1
-; SKX-NEXT:    vcvtudq2pd %ymm1, %zmm0 {%k1}
-; SKX-NEXT:    retq
+; NODQ-LABEL: uitof64_mask:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    kmovw %edi, %k1
+; NODQ-NEXT:    vcvtudq2pd %ymm1, %zmm0 {%k1}
+; NODQ-NEXT:    retq
+;
+; DQ-LABEL: uitof64_mask:
+; DQ:       ## BB#0:
+; DQ-NEXT:    kmovb %edi, %k1
+; DQ-NEXT:    vcvtudq2pd %ymm1, %zmm0 {%k1}
+; DQ-NEXT:    retq
   %1 = bitcast i8 %c to <8 x i1>
   %2 = uitofp <8 x i32> %b to <8 x double>
   %3 = select <8 x i1> %1, <8 x double> %2, <8 x double> %a
   ret <8 x double> %3
 }
 define <8 x double> @uitof64_maskz(<8 x i32> %a, i8 %b) nounwind {
-; KNL-LABEL: uitof64_maskz:
-; KNL:       ## BB#0:
-; KNL-NEXT:    kmovw %edi, %k1
-; KNL-NEXT:    vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: uitof64_maskz:
-; SKX:       ## BB#0:
-; SKX-NEXT:    kmovb %edi, %k1
-; SKX-NEXT:    vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
-; SKX-NEXT:    retq
+; NODQ-LABEL: uitof64_maskz:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    kmovw %edi, %k1
+; NODQ-NEXT:    vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
+; NODQ-NEXT:    retq
+;
+; DQ-LABEL: uitof64_maskz:
+; DQ:       ## BB#0:
+; DQ-NEXT:    kmovb %edi, %k1
+; DQ-NEXT:    vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
+; DQ-NEXT:    retq
   %1 = bitcast i8 %b to <8 x i1>
   %2 = uitofp <8 x i32> %a to <8 x double>
   %3 = select <8 x i1> %1, <8 x double> %2, <8 x double> zeroinitializer
@@ -665,17 +720,17 @@ define <8 x double> @uitof64_maskz(<8 x
 }
 
 define <4 x double> @uitof64_256(<4 x i32> %a) nounwind {
-; KNL-LABEL: uitof64_256:
-; KNL:       ## BB#0:
-; KNL-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
-; KNL-NEXT:    vcvtudq2pd %ymm0, %zmm0
-; KNL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: uitof64_256:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvtudq2pd %xmm0, %ymm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: uitof64_256:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; NOVL-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; NOVL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NOVL-NEXT:    retq
+;
+; VL-LABEL: uitof64_256:
+; VL:       ## BB#0:
+; VL-NEXT:    vcvtudq2pd %xmm0, %ymm0
+; VL-NEXT:    retq
   %b = uitofp <4 x i32> %a to <4 x double>
   ret <4 x double> %b
 }
@@ -690,33 +745,33 @@ define <16 x float> @uitof32(<16 x i32>
 }
 
 define <8 x float> @uitof32_256(<8 x i32> %a) nounwind {
-; KNL-LABEL: uitof32_256:
-; KNL:       ## BB#0:
-; KNL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; KNL-NEXT:    vcvtudq2ps %zmm0, %zmm0
-; KNL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: uitof32_256:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvtudq2ps %ymm0, %ymm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: uitof32_256:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NOVL-NEXT:    vcvtudq2ps %zmm0, %zmm0
+; NOVL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NOVL-NEXT:    retq
+;
+; VL-LABEL: uitof32_256:
+; VL:       ## BB#0:
+; VL-NEXT:    vcvtudq2ps %ymm0, %ymm0
+; VL-NEXT:    retq
   %b = uitofp <8 x i32> %a to <8 x float>
   ret <8 x float> %b
 }
 
 define <4 x float> @uitof32_128(<4 x i32> %a) nounwind {
-; KNL-LABEL: uitof32_128:
-; KNL:       ## BB#0:
-; KNL-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; KNL-NEXT:    vcvtudq2ps %zmm0, %zmm0
-; KNL-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: uitof32_128:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vcvtudq2ps %xmm0, %xmm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: uitof32_128:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; NOVL-NEXT:    vcvtudq2ps %zmm0, %zmm0
+; NOVL-NEXT:    ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; NOVL-NEXT:    retq
+;
+; VL-LABEL: uitof32_128:
+; VL:       ## BB#0:
+; VL-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; VL-NEXT:    retq
   %b = uitofp <4 x i32> %a to <4 x float>
   ret <4 x float> %b
 }
@@ -758,21 +813,21 @@ define double @uitofp03(i32 %a) nounwind
 }
 
 define <16 x float> @sitofp_16i1_float(<16 x i32> %a) {
-; KNL-LABEL: sitofp_16i1_float:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
-; KNL-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
-; KNL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT:    vcvtdq2ps %zmm0, %zmm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sitofp_16i1_float:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
-; SKX-NEXT:    vpcmpgtd %zmm0, %zmm1, %k0
-; SKX-NEXT:    vpmovm2d %k0, %zmm0
-; SKX-NEXT:    vcvtdq2ps %zmm0, %zmm0
-; SKX-NEXT:    retq
+; NODQ-LABEL: sitofp_16i1_float:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NODQ-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
+; NODQ-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NODQ-NEXT:    vcvtdq2ps %zmm0, %zmm0
+; NODQ-NEXT:    retq
+;
+; DQ-LABEL: sitofp_16i1_float:
+; DQ:       ## BB#0:
+; DQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; DQ-NEXT:    vpcmpgtd %zmm0, %zmm1, %k0
+; DQ-NEXT:    vpmovm2d %k0, %zmm0
+; DQ-NEXT:    vcvtdq2ps %zmm0, %zmm0
+; DQ-NEXT:    retq
   %mask = icmp slt <16 x i32> %a, zeroinitializer
   %1 = sitofp <16 x i1> %mask to <16 x float>
   ret <16 x float> %1
@@ -821,157 +876,342 @@ define <8 x double> @sitofp_8i8_double(<
 }
 
 define <16 x double> @sitofp_16i1_double(<16 x double> %a) {
-; KNL-LABEL: sitofp_16i1_double:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vpxord %zmm2, %zmm2, %zmm2
-; KNL-NEXT:    vcmpltpd %zmm1, %zmm2, %k1
-; KNL-NEXT:    vcmpltpd %zmm0, %zmm2, %k2
-; KNL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
-; KNL-NEXT:    vpmovqd %zmm0, %ymm0
-; KNL-NEXT:    vcvtdq2pd %ymm0, %zmm0
-; KNL-NEXT:    vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
-; KNL-NEXT:    vpmovqd %zmm1, %ymm1
-; KNL-NEXT:    vcvtdq2pd %ymm1, %zmm1
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sitofp_16i1_double:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vxorpd %zmm2, %zmm2, %zmm2
-; SKX-NEXT:    vcmpltpd %zmm1, %zmm2, %k0
-; SKX-NEXT:    vcmpltpd %zmm0, %zmm2, %k1
-; SKX-NEXT:    vpmovm2d %k1, %ymm0
-; SKX-NEXT:    vcvtdq2pd %ymm0, %zmm0
-; SKX-NEXT:    vpmovm2d %k0, %ymm1
-; SKX-NEXT:    vcvtdq2pd %ymm1, %zmm1
-; SKX-NEXT:    retq
+; NODQ-LABEL: sitofp_16i1_double:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vpxord %zmm2, %zmm2, %zmm2
+; NODQ-NEXT:    vcmpltpd %zmm1, %zmm2, %k1
+; NODQ-NEXT:    vcmpltpd %zmm0, %zmm2, %k2
+; NODQ-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; NODQ-NEXT:    vpmovqd %zmm0, %ymm0
+; NODQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; NODQ-NEXT:    vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; NODQ-NEXT:    vpmovqd %zmm1, %ymm1
+; NODQ-NEXT:    vcvtdq2pd %ymm1, %zmm1
+; NODQ-NEXT:    retq
+;
+; VLDQ-LABEL: sitofp_16i1_double:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vxorpd %zmm2, %zmm2, %zmm2
+; VLDQ-NEXT:    vcmpltpd %zmm1, %zmm2, %k0
+; VLDQ-NEXT:    vcmpltpd %zmm0, %zmm2, %k1
+; VLDQ-NEXT:    vpmovm2d %k1, %ymm0
+; VLDQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; VLDQ-NEXT:    vpmovm2d %k0, %ymm1
+; VLDQ-NEXT:    vcvtdq2pd %ymm1, %zmm1
+; VLDQ-NEXT:    retq
+;
+; AVX512DQ-LABEL: sitofp_16i1_double:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vxorpd %zmm2, %zmm2, %zmm2
+; AVX512DQ-NEXT:    vcmpltpd %zmm1, %zmm2, %k0
+; AVX512DQ-NEXT:    vcmpltpd %zmm0, %zmm2, %k1
+; AVX512DQ-NEXT:    vpmovm2q %k1, %zmm0
+; AVX512DQ-NEXT:    vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; AVX512DQ-NEXT:    vpmovm2q %k0, %zmm1
+; AVX512DQ-NEXT:    vpmovqd %zmm1, %ymm1
+; AVX512DQ-NEXT:    vcvtdq2pd %ymm1, %zmm1
+; AVX512DQ-NEXT:    retq
   %cmpres = fcmp ogt <16 x double> %a, zeroinitializer
   %1 = sitofp <16 x i1> %cmpres to <16 x double>
   ret <16 x double> %1
 }
 
 define <8 x double> @sitofp_8i1_double(<8 x double> %a) {
-; KNL-LABEL: sitofp_8i1_double:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
-; KNL-NEXT:    vcmpltpd %zmm0, %zmm1, %k1
-; KNL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT:    vpmovqd %zmm0, %ymm0
-; KNL-NEXT:    vcvtdq2pd %ymm0, %zmm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sitofp_8i1_double:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vxorpd %zmm1, %zmm1, %zmm1
-; SKX-NEXT:    vcmpltpd %zmm0, %zmm1, %k0
-; SKX-NEXT:    vpmovm2d %k0, %ymm0
-; SKX-NEXT:    vcvtdq2pd %ymm0, %zmm0
-; SKX-NEXT:    retq
+; NODQ-LABEL: sitofp_8i1_double:
+; NODQ:       ## BB#0:
+; NODQ-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NODQ-NEXT:    vcmpltpd %zmm0, %zmm1, %k1
+; NODQ-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NODQ-NEXT:    vpmovqd %zmm0, %ymm0
+; NODQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; NODQ-NEXT:    retq
+;
+; VLDQ-LABEL: sitofp_8i1_double:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vxorpd %zmm1, %zmm1, %zmm1
+; VLDQ-NEXT:    vcmpltpd %zmm0, %zmm1, %k0
+; VLDQ-NEXT:    vpmovm2d %k0, %ymm0
+; VLDQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; VLDQ-NEXT:    retq
+;
+; AVX512DQ-LABEL: sitofp_8i1_double:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vxorpd %zmm1, %zmm1, %zmm1
+; AVX512DQ-NEXT:    vcmpltpd %zmm0, %zmm1, %k0
+; AVX512DQ-NEXT:    vpmovm2q %k0, %zmm0
+; AVX512DQ-NEXT:    vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; AVX512DQ-NEXT:    retq
   %cmpres = fcmp ogt <8 x double> %a, zeroinitializer
   %1 = sitofp <8 x i1> %cmpres to <8 x double>
   ret <8 x double> %1
 }
 
 define <8 x float> @sitofp_8i1_float(<8 x float> %a) {
-; KNL-LABEL: sitofp_8i1_float:
-; KNL:       ## BB#0:
-; KNL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; KNL-NEXT:    vxorps %ymm1, %ymm1, %ymm1
-; KNL-NEXT:    vcmpltps %zmm0, %zmm1, %k1
-; KNL-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT:    vpmovqd %zmm0, %ymm0
-; KNL-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sitofp_8i1_float:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vxorps %ymm1, %ymm1, %ymm1
-; SKX-NEXT:    vcmpltps %ymm0, %ymm1, %k0
-; SKX-NEXT:    vpmovm2d %k0, %ymm0
-; SKX-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; SKX-NEXT:    retq
+; NOVLDQ-LABEL: sitofp_8i1_float:
+; NOVLDQ:       ## BB#0:
+; NOVLDQ-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NOVLDQ-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; NOVLDQ-NEXT:    vcmpltps %zmm0, %zmm1, %k1
+; NOVLDQ-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; NOVLDQ-NEXT:    vpmovqd %zmm0, %ymm0
+; NOVLDQ-NEXT:    vcvtdq2ps %ymm0, %ymm0
+; NOVLDQ-NEXT:    retq
+;
+; VLDQ-LABEL: sitofp_8i1_float:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; VLDQ-NEXT:    vcmpltps %ymm0, %ymm1, %k0
+; VLDQ-NEXT:    vpmovm2d %k0, %ymm0
+; VLDQ-NEXT:    vcvtdq2ps %ymm0, %ymm0
+; VLDQ-NEXT:    retq
+;
+; VLNODQ-LABEL: sitofp_8i1_float:
+; VLNODQ:       ## BB#0:
+; VLNODQ-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; VLNODQ-NEXT:    vcmpltps %ymm0, %ymm1, %k1
+; VLNODQ-NEXT:    vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; VLNODQ-NEXT:    vpmovqd %zmm0, %ymm0
+; VLNODQ-NEXT:    vcvtdq2ps %ymm0, %ymm0
+; VLNODQ-NEXT:    retq
+;
+; AVX512DQ-LABEL: sitofp_8i1_float:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT:    vxorps %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vcmpltps %zmm0, %zmm1, %k0
+; AVX512DQ-NEXT:    vpmovm2q %k0, %zmm0
+; AVX512DQ-NEXT:    vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT:    vcvtdq2ps %ymm0, %ymm0
+; AVX512DQ-NEXT:    retq
   %cmpres = fcmp ogt <8 x float> %a, zeroinitializer
   %1 = sitofp <8 x i1> %cmpres to <8 x float>
   ret <8 x float> %1
 }
 
 define <4 x float> @sitofp_4i1_float(<4 x float> %a) {
-; KNL-LABEL: sitofp_4i1_float:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vcmpltps %xmm0, %xmm1, %xmm0
-; KNL-NEXT:    vcvtdq2ps %xmm0, %xmm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sitofp_4i1_float:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; SKX-NEXT:    vcmpltps %xmm0, %xmm1, %k0
-; SKX-NEXT:    vpmovm2d %k0, %xmm0
-; SKX-NEXT:    vcvtdq2ps %xmm0, %xmm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: sitofp_4i1_float:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vcmpltps %xmm0, %xmm1, %xmm0
+; NOVL-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; NOVL-NEXT:    retq
+;
+; VLDQ-LABEL: sitofp_4i1_float:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; VLDQ-NEXT:    vcmpltps %xmm0, %xmm1, %k0
+; VLDQ-NEXT:    vpmovm2d %k0, %xmm0
+; VLDQ-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; VLDQ-NEXT:    retq
+;
+; VLNODQ-LABEL: sitofp_4i1_float:
+; VLNODQ:       ## BB#0:
+; VLNODQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; VLNODQ-NEXT:    vcmpltps %xmm0, %xmm1, %k2
+; VLNODQ-NEXT:    kshiftlw $12, %k2, %k0
+; VLNODQ-NEXT:    kshiftrw $15, %k0, %k0
+; VLNODQ-NEXT:    kshiftlw $13, %k2, %k1
+; VLNODQ-NEXT:    kshiftrw $15, %k1, %k1
+; VLNODQ-NEXT:    kshiftlw $15, %k2, %k3
+; VLNODQ-NEXT:    kshiftrw $15, %k3, %k3
+; VLNODQ-NEXT:    kshiftlw $14, %k2, %k2
+; VLNODQ-NEXT:    kshiftrw $15, %k2, %k2
+; VLNODQ-NEXT:    kmovw %k2, %eax
+; VLNODQ-NEXT:    andl $1, %eax
+; VLNODQ-NEXT:    xorl %ecx, %ecx
+; VLNODQ-NEXT:    testb %al, %al
+; VLNODQ-NEXT:    movl $-1, %eax
+; VLNODQ-NEXT:    movl $0, %edx
+; VLNODQ-NEXT:    cmovnel %eax, %edx
+; VLNODQ-NEXT:    kmovw %k3, %esi
+; VLNODQ-NEXT:    andl $1, %esi
+; VLNODQ-NEXT:    testb %sil, %sil
+; VLNODQ-NEXT:    movl $0, %esi
+; VLNODQ-NEXT:    cmovnel %eax, %esi
+; VLNODQ-NEXT:    vmovd %esi, %xmm0
+; VLNODQ-NEXT:    vpinsrd $1, %edx, %xmm0, %xmm0
+; VLNODQ-NEXT:    kmovw %k1, %edx
+; VLNODQ-NEXT:    andl $1, %edx
+; VLNODQ-NEXT:    testb %dl, %dl
+; VLNODQ-NEXT:    movl $0, %edx
+; VLNODQ-NEXT:    cmovnel %eax, %edx
+; VLNODQ-NEXT:    vpinsrd $2, %edx, %xmm0, %xmm0
+; VLNODQ-NEXT:    kmovw %k0, %edx
+; VLNODQ-NEXT:    andl $1, %edx
+; VLNODQ-NEXT:    testb %dl, %dl
+; VLNODQ-NEXT:    cmovnel %eax, %ecx
+; VLNODQ-NEXT:    vpinsrd $3, %ecx, %xmm0, %xmm0
+; VLNODQ-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; VLNODQ-NEXT:    retq
   %cmpres = fcmp ogt <4 x float> %a, zeroinitializer
   %1 = sitofp <4 x i1> %cmpres to <4 x float>
   ret <4 x float> %1
 }
 
 define <4 x double> @sitofp_4i1_double(<4 x double> %a) {
-; KNL-LABEL: sitofp_4i1_double:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
-; KNL-NEXT:    vcmpltpd %ymm0, %ymm1, %ymm0
-; KNL-NEXT:    vpmovqd %zmm0, %ymm0
-; KNL-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sitofp_4i1_double:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
-; SKX-NEXT:    vcmpltpd %ymm0, %ymm1, %k0
-; SKX-NEXT:    vpmovm2d %k0, %xmm0
-; SKX-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: sitofp_4i1_double:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; NOVL-NEXT:    vcmpltpd %ymm0, %ymm1, %ymm0
+; NOVL-NEXT:    vpmovqd %zmm0, %ymm0
+; NOVL-NEXT:    vcvtdq2pd %xmm0, %ymm0
+; NOVL-NEXT:    retq
+;
+; VLDQ-LABEL: sitofp_4i1_double:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; VLDQ-NEXT:    vcmpltpd %ymm0, %ymm1, %k0
+; VLDQ-NEXT:    vpmovm2d %k0, %xmm0
+; VLDQ-NEXT:    vcvtdq2pd %xmm0, %ymm0
+; VLDQ-NEXT:    retq
+;
+; VLNODQ-LABEL: sitofp_4i1_double:
+; VLNODQ:       ## BB#0:
+; VLNODQ-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; VLNODQ-NEXT:    vcmpltpd %ymm0, %ymm1, %k2
+; VLNODQ-NEXT:    kshiftlw $12, %k2, %k0
+; VLNODQ-NEXT:    kshiftrw $15, %k0, %k0
+; VLNODQ-NEXT:    kshiftlw $13, %k2, %k1
+; VLNODQ-NEXT:    kshiftrw $15, %k1, %k1
+; VLNODQ-NEXT:    kshiftlw $15, %k2, %k3
+; VLNODQ-NEXT:    kshiftrw $15, %k3, %k3
+; VLNODQ-NEXT:    kshiftlw $14, %k2, %k2
+; VLNODQ-NEXT:    kshiftrw $15, %k2, %k2
+; VLNODQ-NEXT:    kmovw %k2, %eax
+; VLNODQ-NEXT:    andl $1, %eax
+; VLNODQ-NEXT:    xorl %ecx, %ecx
+; VLNODQ-NEXT:    testb %al, %al
+; VLNODQ-NEXT:    movl $-1, %eax
+; VLNODQ-NEXT:    movl $0, %edx
+; VLNODQ-NEXT:    cmovnel %eax, %edx
+; VLNODQ-NEXT:    kmovw %k3, %esi
+; VLNODQ-NEXT:    andl $1, %esi
+; VLNODQ-NEXT:    testb %sil, %sil
+; VLNODQ-NEXT:    movl $0, %esi
+; VLNODQ-NEXT:    cmovnel %eax, %esi
+; VLNODQ-NEXT:    vmovd %esi, %xmm0
+; VLNODQ-NEXT:    vpinsrd $1, %edx, %xmm0, %xmm0
+; VLNODQ-NEXT:    kmovw %k1, %edx
+; VLNODQ-NEXT:    andl $1, %edx
+; VLNODQ-NEXT:    testb %dl, %dl
+; VLNODQ-NEXT:    movl $0, %edx
+; VLNODQ-NEXT:    cmovnel %eax, %edx
+; VLNODQ-NEXT:    vpinsrd $2, %edx, %xmm0, %xmm0
+; VLNODQ-NEXT:    kmovw %k0, %edx
+; VLNODQ-NEXT:    andl $1, %edx
+; VLNODQ-NEXT:    testb %dl, %dl
+; VLNODQ-NEXT:    cmovnel %eax, %ecx
+; VLNODQ-NEXT:    vpinsrd $3, %ecx, %xmm0, %xmm0
+; VLNODQ-NEXT:    vcvtdq2pd %xmm0, %ymm0
+; VLNODQ-NEXT:    retq
   %cmpres = fcmp ogt <4 x double> %a, zeroinitializer
   %1 = sitofp <4 x i1> %cmpres to <4 x double>
   ret <4 x double> %1
 }
 
 define <2 x float> @sitofp_2i1_float(<2 x float> %a) {
-; KNL-LABEL: sitofp_2i1_float:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vcmpltps %xmm0, %xmm1, %xmm0
-; KNL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],zero,xmm0[1]
-; KNL-NEXT:    vcvtdq2ps %xmm0, %xmm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sitofp_2i1_float:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; SKX-NEXT:    vcmpltps %xmm0, %xmm1, %k0
-; SKX-NEXT:    vpmovm2d %k0, %xmm0
-; SKX-NEXT:    vcvtdq2ps %xmm0, %xmm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: sitofp_2i1_float:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vcmpltps %xmm0, %xmm1, %xmm0
+; NOVL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],zero,xmm0[1]
+; NOVL-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; NOVL-NEXT:    retq
+;
+; VLDQ-LABEL: sitofp_2i1_float:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; VLDQ-NEXT:    vcmpltps %xmm0, %xmm1, %k0
+; VLDQ-NEXT:    vpmovm2d %k0, %xmm0
+; VLDQ-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; VLDQ-NEXT:    retq
+;
+; VLNODQ-LABEL: sitofp_2i1_float:
+; VLNODQ:       ## BB#0:
+; VLNODQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; VLNODQ-NEXT:    vcmpltps %xmm0, %xmm1, %k2
+; VLNODQ-NEXT:    kshiftlw $12, %k2, %k0
+; VLNODQ-NEXT:    kshiftrw $15, %k0, %k0
+; VLNODQ-NEXT:    kshiftlw $13, %k2, %k1
+; VLNODQ-NEXT:    kshiftrw $15, %k1, %k1
+; VLNODQ-NEXT:    kshiftlw $15, %k2, %k3
+; VLNODQ-NEXT:    kshiftrw $15, %k3, %k3
+; VLNODQ-NEXT:    kshiftlw $14, %k2, %k2
+; VLNODQ-NEXT:    kshiftrw $15, %k2, %k2
+; VLNODQ-NEXT:    kmovw %k2, %eax
+; VLNODQ-NEXT:    andl $1, %eax
+; VLNODQ-NEXT:    xorl %ecx, %ecx
+; VLNODQ-NEXT:    testb %al, %al
+; VLNODQ-NEXT:    movl $-1, %eax
+; VLNODQ-NEXT:    movl $0, %edx
+; VLNODQ-NEXT:    cmovnel %eax, %edx
+; VLNODQ-NEXT:    kmovw %k3, %esi
+; VLNODQ-NEXT:    andl $1, %esi
+; VLNODQ-NEXT:    testb %sil, %sil
+; VLNODQ-NEXT:    movl $0, %esi
+; VLNODQ-NEXT:    cmovnel %eax, %esi
+; VLNODQ-NEXT:    vmovd %esi, %xmm0
+; VLNODQ-NEXT:    vpinsrd $1, %edx, %xmm0, %xmm0
+; VLNODQ-NEXT:    kmovw %k1, %edx
+; VLNODQ-NEXT:    andl $1, %edx
+; VLNODQ-NEXT:    testb %dl, %dl
+; VLNODQ-NEXT:    movl $0, %edx
+; VLNODQ-NEXT:    cmovnel %eax, %edx
+; VLNODQ-NEXT:    vpinsrd $2, %edx, %xmm0, %xmm0
+; VLNODQ-NEXT:    kmovw %k0, %edx
+; VLNODQ-NEXT:    andl $1, %edx
+; VLNODQ-NEXT:    testb %dl, %dl
+; VLNODQ-NEXT:    cmovnel %eax, %ecx
+; VLNODQ-NEXT:    vpinsrd $3, %ecx, %xmm0, %xmm0
+; VLNODQ-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; VLNODQ-NEXT:    retq
   %cmpres = fcmp ogt <2 x float> %a, zeroinitializer
   %1 = sitofp <2 x i1> %cmpres to <2 x float>
   ret <2 x float> %1
 }
 
 define <2 x double> @sitofp_2i1_double(<2 x double> %a) {
-; KNL-LABEL: sitofp_2i1_double:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vcmpltpd %xmm0, %xmm1, %xmm0
-; KNL-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; KNL-NEXT:    vcvtdq2pd %xmm0, %xmm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: sitofp_2i1_double:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; SKX-NEXT:    vcmpltpd %xmm0, %xmm1, %k0
-; SKX-NEXT:    vpmovm2q %k0, %xmm0
-; SKX-NEXT:    vcvtqq2pd %xmm0, %xmm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: sitofp_2i1_double:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vcmpltpd %xmm0, %xmm1, %xmm0
+; NOVL-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; NOVL-NEXT:    vcvtdq2pd %xmm0, %xmm0
+; NOVL-NEXT:    retq
+;
+; VLDQ-LABEL: sitofp_2i1_double:
+; VLDQ:       ## BB#0:
+; VLDQ-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; VLDQ-NEXT:    vcmpltpd %xmm0, %xmm1, %k0
+; VLDQ-NEXT:    vpmovm2q %k0, %xmm0
+; VLDQ-NEXT:    vcvtqq2pd %xmm0, %xmm0
+; VLDQ-NEXT:    retq
+;
+; VLNODQ-LABEL: sitofp_2i1_double:
+; VLNODQ:       ## BB#0:
+; VLNODQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; VLNODQ-NEXT:    vcmpltpd %xmm0, %xmm1, %k0
+; VLNODQ-NEXT:    kshiftlw $15, %k0, %k1
+; VLNODQ-NEXT:    kshiftrw $15, %k1, %k1
+; VLNODQ-NEXT:    kshiftlw $14, %k0, %k0
+; VLNODQ-NEXT:    kshiftrw $15, %k0, %k0
+; VLNODQ-NEXT:    kmovw %k0, %eax
+; VLNODQ-NEXT:    andl $1, %eax
+; VLNODQ-NEXT:    xorl %ecx, %ecx
+; VLNODQ-NEXT:    testb %al, %al
+; VLNODQ-NEXT:    movl $-1, %eax
+; VLNODQ-NEXT:    movl $0, %edx
+; VLNODQ-NEXT:    cmovnel %eax, %edx
+; VLNODQ-NEXT:    vcvtsi2sdl %edx, %xmm2, %xmm0
+; VLNODQ-NEXT:    kmovw %k1, %edx
+; VLNODQ-NEXT:    andl $1, %edx
+; VLNODQ-NEXT:    testb %dl, %dl
+; VLNODQ-NEXT:    cmovnel %eax, %ecx
+; VLNODQ-NEXT:    vcvtsi2sdl %ecx, %xmm2, %xmm1
+; VLNODQ-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; VLNODQ-NEXT:    retq
   %cmpres = fcmp ogt <2 x double> %a, zeroinitializer
   %1 = sitofp <2 x i1> %cmpres to <2 x double>
   ret <2 x double> %1
@@ -1011,165 +1251,239 @@ define <16 x float> @uitofp_16i1_float(<
 }
 
 define <16 x double> @uitofp_16i1_double(<16 x i32> %a) {
-; KNL-LABEL: uitofp_16i1_double:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
-; KNL-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
-; KNL-NEXT:    movq {{.*}}(%rip), %rax
-; KNL-NEXT:    vpbroadcastq %rax, %zmm0 {%k1} {z}
-; KNL-NEXT:    vpmovqd %zmm0, %ymm0
-; KNL-NEXT:    vcvtudq2pd %ymm0, %zmm0
-; KNL-NEXT:    kshiftrw $8, %k1, %k1
-; KNL-NEXT:    vpbroadcastq %rax, %zmm1 {%k1} {z}
-; KNL-NEXT:    vpmovqd %zmm1, %ymm1
-; KNL-NEXT:    vcvtudq2pd %ymm1, %zmm1
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: uitofp_16i1_double:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vpxord %zmm1, %zmm1, %zmm1
-; SKX-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
-; SKX-NEXT:    movl {{.*}}(%rip), %eax
-; SKX-NEXT:    vpbroadcastd %eax, %ymm0 {%k1} {z}
-; SKX-NEXT:    vcvtudq2pd %ymm0, %zmm0
-; SKX-NEXT:    kshiftrw $8, %k1, %k1
-; SKX-NEXT:    vpbroadcastd %eax, %ymm1 {%k1} {z}
-; SKX-NEXT:    vcvtudq2pd %ymm1, %zmm1
-; SKX-NEXT:    retq
+; NOVL-LABEL: uitofp_16i1_double:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; NOVL-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
+; NOVL-NEXT:    movq {{.*}}(%rip), %rax
+; NOVL-NEXT:    vpbroadcastq %rax, %zmm0 {%k1} {z}
+; NOVL-NEXT:    vpmovqd %zmm0, %ymm0
+; NOVL-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; NOVL-NEXT:    kshiftrw $8, %k1, %k1
+; NOVL-NEXT:    vpbroadcastq %rax, %zmm1 {%k1} {z}
+; NOVL-NEXT:    vpmovqd %zmm1, %ymm1
+; NOVL-NEXT:    vcvtudq2pd %ymm1, %zmm1
+; NOVL-NEXT:    retq
+;
+; VL-LABEL: uitofp_16i1_double:
+; VL:       ## BB#0:
+; VL-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; VL-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
+; VL-NEXT:    movl {{.*}}(%rip), %eax
+; VL-NEXT:    vpbroadcastd %eax, %ymm0 {%k1} {z}
+; VL-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; VL-NEXT:    kshiftrw $8, %k1, %k1
+; VL-NEXT:    vpbroadcastd %eax, %ymm1 {%k1} {z}
+; VL-NEXT:    vcvtudq2pd %ymm1, %zmm1
+; VL-NEXT:    retq
   %mask = icmp slt <16 x i32> %a, zeroinitializer
   %1 = uitofp <16 x i1> %mask to <16 x double>
   ret <16 x double> %1
 }
 
 define <8 x float> @uitofp_8i1_float(<8 x i32> %a) {
-; KNL-LABEL: uitofp_8i1_float:
-; KNL:       ## BB#0:
-; KNL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; KNL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
-; KNL-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
-; KNL-NEXT:    vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
-; KNL-NEXT:    vpmovqd %zmm0, %ymm0
-; KNL-NEXT:    vcvtudq2ps %zmm0, %zmm0
-; KNL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: uitofp_8i1_float:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %ymm1, %ymm1, %ymm1
-; SKX-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1
-; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
-; SKX-NEXT:    vcvtudq2ps %ymm0, %ymm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: uitofp_8i1_float:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NOVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; NOVL-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
+; NOVL-NEXT:    vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
+; NOVL-NEXT:    vpmovqd %zmm0, %ymm0
+; NOVL-NEXT:    vcvtudq2ps %zmm0, %zmm0
+; NOVL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NOVL-NEXT:    retq
+;
+; VL-LABEL: uitofp_8i1_float:
+; VL:       ## BB#0:
+; VL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; VL-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1
+; VL-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
+; VL-NEXT:    vcvtudq2ps %ymm0, %ymm0
+; VL-NEXT:    retq
   %mask = icmp slt <8 x i32> %a, zeroinitializer
   %1 = uitofp <8 x i1> %mask to <8 x float>
   ret <8 x float> %1
 }
 
 define <8 x double> @uitofp_8i1_double(<8 x i32> %a) {
-; KNL-LABEL: uitofp_8i1_double:
-; KNL:       ## BB#0:
-; KNL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; KNL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
-; KNL-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
-; KNL-NEXT:    vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
-; KNL-NEXT:    vpmovqd %zmm0, %ymm0
-; KNL-NEXT:    vcvtudq2pd %ymm0, %zmm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: uitofp_8i1_double:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %ymm1, %ymm1, %ymm1
-; SKX-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1
-; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
-; SKX-NEXT:    vcvtudq2pd %ymm0, %zmm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: uitofp_8i1_double:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NOVL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; NOVL-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
+; NOVL-NEXT:    vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
+; NOVL-NEXT:    vpmovqd %zmm0, %ymm0
+; NOVL-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; NOVL-NEXT:    retq
+;
+; VL-LABEL: uitofp_8i1_double:
+; VL:       ## BB#0:
+; VL-NEXT:    vpxor %ymm1, %ymm1, %ymm1
+; VL-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1
+; VL-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
+; VL-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; VL-NEXT:    retq
   %mask = icmp slt <8 x i32> %a, zeroinitializer
   %1 = uitofp <8 x i1> %mask to <8 x double>
   ret <8 x double> %1
 }
 
 define <4 x float> @uitofp_4i1_float(<4 x i32> %a) {
-; KNL-LABEL: uitofp_4i1_float:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
-; KNL-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm1
-; KNL-NEXT:    vpand %xmm1, %xmm0, %xmm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: uitofp_4i1_float:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; SKX-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
-; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
-; SKX-NEXT:    vcvtudq2ps %xmm0, %xmm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: uitofp_4i1_float:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
+; NOVL-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm1
+; NOVL-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; NOVL-NEXT:    retq
+;
+; VLBW-LABEL: uitofp_4i1_float:
+; VLBW:       ## BB#0:
+; VLBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; VLBW-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
+; VLBW-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
+; VLBW-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; VLBW-NEXT:    retq
+;
+; VLNOBW-LABEL: uitofp_4i1_float:
+; VLNOBW:       ## BB#0:
+; VLNOBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; VLNOBW-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
+; VLNOBW-NEXT:    kshiftlw $12, %k1, %k0
+; VLNOBW-NEXT:    kshiftrw $15, %k0, %k0
+; VLNOBW-NEXT:    kshiftlw $13, %k1, %k2
+; VLNOBW-NEXT:    kshiftrw $15, %k2, %k2
+; VLNOBW-NEXT:    kshiftlw $15, %k1, %k3
+; VLNOBW-NEXT:    kshiftrw $15, %k3, %k3
+; VLNOBW-NEXT:    kshiftlw $14, %k1, %k1
+; VLNOBW-NEXT:    kshiftrw $15, %k1, %k1
+; VLNOBW-NEXT:    kmovw %k1, %eax
+; VLNOBW-NEXT:    andl $1, %eax
+; VLNOBW-NEXT:    kmovw %k3, %ecx
+; VLNOBW-NEXT:    andl $1, %ecx
+; VLNOBW-NEXT:    vmovd %ecx, %xmm0
+; VLNOBW-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
+; VLNOBW-NEXT:    kmovw %k2, %eax
+; VLNOBW-NEXT:    andl $1, %eax
+; VLNOBW-NEXT:    vpinsrd $2, %eax, %xmm0, %xmm0
+; VLNOBW-NEXT:    kmovw %k0, %eax
+; VLNOBW-NEXT:    andl $1, %eax
+; VLNOBW-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
+; VLNOBW-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; VLNOBW-NEXT:    retq
   %mask = icmp slt <4 x i32> %a, zeroinitializer
   %1 = uitofp <4 x i1> %mask to <4 x float>
   ret <4 x float> %1
 }
 
 define <4 x double> @uitofp_4i1_double(<4 x i32> %a) {
-; KNL-LABEL: uitofp_4i1_double:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
-; KNL-NEXT:    vpsrld $31, %xmm0, %xmm0
-; KNL-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: uitofp_4i1_double:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; SKX-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
-; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
-; SKX-NEXT:    vcvtudq2pd %xmm0, %ymm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: uitofp_4i1_double:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm0
+; NOVL-NEXT:    vpsrld $31, %xmm0, %xmm0
+; NOVL-NEXT:    vcvtdq2pd %xmm0, %ymm0
+; NOVL-NEXT:    retq
+;
+; VLBW-LABEL: uitofp_4i1_double:
+; VLBW:       ## BB#0:
+; VLBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; VLBW-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
+; VLBW-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
+; VLBW-NEXT:    vcvtudq2pd %xmm0, %ymm0
+; VLBW-NEXT:    retq
+;
+; VLNOBW-LABEL: uitofp_4i1_double:
+; VLNOBW:       ## BB#0:
+; VLNOBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; VLNOBW-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
+; VLNOBW-NEXT:    kshiftlw $12, %k1, %k0
+; VLNOBW-NEXT:    kshiftrw $15, %k0, %k0
+; VLNOBW-NEXT:    kshiftlw $13, %k1, %k2
+; VLNOBW-NEXT:    kshiftrw $15, %k2, %k2
+; VLNOBW-NEXT:    kshiftlw $15, %k1, %k3
+; VLNOBW-NEXT:    kshiftrw $15, %k3, %k3
+; VLNOBW-NEXT:    kshiftlw $14, %k1, %k1
+; VLNOBW-NEXT:    kshiftrw $15, %k1, %k1
+; VLNOBW-NEXT:    kmovw %k1, %eax
+; VLNOBW-NEXT:    andl $1, %eax
+; VLNOBW-NEXT:    kmovw %k3, %ecx
+; VLNOBW-NEXT:    andl $1, %ecx
+; VLNOBW-NEXT:    vmovd %ecx, %xmm0
+; VLNOBW-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
+; VLNOBW-NEXT:    kmovw %k2, %eax
+; VLNOBW-NEXT:    andl $1, %eax
+; VLNOBW-NEXT:    vpinsrd $2, %eax, %xmm0, %xmm0
+; VLNOBW-NEXT:    kmovw %k0, %eax
+; VLNOBW-NEXT:    andl $1, %eax
+; VLNOBW-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
+; VLNOBW-NEXT:    vcvtdq2pd %xmm0, %ymm0
+; VLNOBW-NEXT:    retq
   %mask = icmp slt <4 x i32> %a, zeroinitializer
   %1 = uitofp <4 x i1> %mask to <4 x double>
   ret <4 x double> %1
 }
 
 define <2 x float> @uitofp_2i1_float(<2 x i32> %a) {
-; KNL-LABEL: uitofp_2i1_float:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; KNL-NEXT:    vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
-; KNL-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; KNL-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
-; KNL-NEXT:    vpextrq $1, %xmm0, %rax
-; KNL-NEXT:    andl $1, %eax
-; KNL-NEXT:    vcvtsi2ssl %eax, %xmm2, %xmm1
-; KNL-NEXT:    vmovq %xmm0, %rax
-; KNL-NEXT:    andl $1, %eax
-; KNL-NEXT:    vcvtsi2ssl %eax, %xmm2, %xmm0
-; KNL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; KNL-NEXT:    retq
-;
-; SKX-LABEL: uitofp_2i1_float:
-; SKX:       ## BB#0:
-; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; SKX-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; SKX-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1
-; SKX-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
-; SKX-NEXT:    vcvtudq2ps %xmm0, %xmm0
-; SKX-NEXT:    retq
+; NOVL-LABEL: uitofp_2i1_float:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; NOVL-NEXT:    vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; NOVL-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; NOVL-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
+; NOVL-NEXT:    vpextrq $1, %xmm0, %rax
+; NOVL-NEXT:    andl $1, %eax
+; NOVL-NEXT:    vcvtsi2ssl %eax, %xmm2, %xmm1
+; NOVL-NEXT:    vmovq %xmm0, %rax
+; NOVL-NEXT:    andl $1, %eax
+; NOVL-NEXT:    vcvtsi2ssl %eax, %xmm2, %xmm0
+; NOVL-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; NOVL-NEXT:    retq
+;
+; VLBW-LABEL: uitofp_2i1_float:
+; VLBW:       ## BB#0:
+; VLBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; VLBW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; VLBW-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1
+; VLBW-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm0 {%k1} {z}
+; VLBW-NEXT:    vcvtudq2ps %xmm0, %xmm0
+; VLBW-NEXT:    retq
+;
+; VLNOBW-LABEL: uitofp_2i1_float:
+; VLNOBW:       ## BB#0:
+; VLNOBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; VLNOBW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; VLNOBW-NEXT:    vpcmpltuq %xmm1, %xmm0, %k0
+; VLNOBW-NEXT:    kshiftlw $15, %k0, %k1
+; VLNOBW-NEXT:    kshiftrw $15, %k1, %k1
+; VLNOBW-NEXT:    kshiftlw $14, %k0, %k0
+; VLNOBW-NEXT:    kshiftrw $15, %k0, %k0
+; VLNOBW-NEXT:    kmovw %k0, %eax
+; VLNOBW-NEXT:    andl $1, %eax
+; VLNOBW-NEXT:    vmovd %eax, %xmm0
+; VLNOBW-NEXT:    kmovw %k1, %eax
+; VLNOBW-NEXT:    andl $1, %eax
+; VLNOBW-NEXT:    vmovd %eax, %xmm1
+; VLNOBW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; VLNOBW-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; VLNOBW-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; VLNOBW-NEXT:    retq
   %mask = icmp ult <2 x i32> %a, zeroinitializer
   %1 = uitofp <2 x i1> %mask to <2 x float>
   ret <2 x float> %1
 }
 
 define <2 x double> @uitofp_2i1_double(<2 x i32> %a) {
-; KNL-LABEL: uitofp_2i1_double:
-; KNL:       ## BB#0:
-; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; KNL-NEXT:    vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
-; KNL-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; KNL-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
-; KNL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
-; KNL-NEXT:    retq
+; NOVL-LABEL: uitofp_2i1_double:
+; NOVL:       ## BB#0:
+; NOVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; NOVL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; NOVL-NEXT:    vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
+; NOVL-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; NOVL-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm0
+; NOVL-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; NOVL-NEXT:    retq
 ;
 ; SKX-LABEL: uitofp_2i1_double:
 ; SKX:       ## BB#0:
@@ -1179,6 +1493,56 @@ define <2 x double> @uitofp_2i1_double(<
 ; SKX-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0 {%k1} {z}
 ; SKX-NEXT:    vcvtuqq2pd %xmm0, %xmm0
 ; SKX-NEXT:    retq
+;
+; AVX512VL-LABEL: uitofp_2i1_double:
+; AVX512VL:       ## BB#0:
+; AVX512VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX512VL-NEXT:    vpcmpltuq %xmm1, %xmm0, %k0
+; AVX512VL-NEXT:    kshiftlw $15, %k0, %k1
+; AVX512VL-NEXT:    kshiftrw $15, %k1, %k1
+; AVX512VL-NEXT:    kshiftlw $14, %k0, %k0
+; AVX512VL-NEXT:    kshiftrw $15, %k0, %k0
+; AVX512VL-NEXT:    kmovw %k0, %eax
+; AVX512VL-NEXT:    andl $1, %eax
+; AVX512VL-NEXT:    vcvtsi2sdl %eax, %xmm2, %xmm0
+; AVX512VL-NEXT:    kmovw %k1, %eax
+; AVX512VL-NEXT:    andl $1, %eax
+; AVX512VL-NEXT:    vcvtsi2sdl %eax, %xmm2, %xmm1
+; AVX512VL-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512VL-NEXT:    retq
+;
+; AVX512VLDQ-LABEL: uitofp_2i1_double:
+; AVX512VLDQ:       ## BB#0:
+; AVX512VLDQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VLDQ-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX512VLDQ-NEXT:    vpcmpltuq %xmm1, %xmm0, %k0
+; AVX512VLDQ-NEXT:    kshiftlw $15, %k0, %k1
+; AVX512VLDQ-NEXT:    kshiftrw $15, %k1, %k1
+; AVX512VLDQ-NEXT:    kshiftlw $14, %k0, %k0
+; AVX512VLDQ-NEXT:    kshiftrw $15, %k0, %k0
+; AVX512VLDQ-NEXT:    kmovw %k0, %eax
+; AVX512VLDQ-NEXT:    andq $1, %rax
+; AVX512VLDQ-NEXT:    vmovq %rax, %xmm0
+; AVX512VLDQ-NEXT:    kmovw %k1, %eax
+; AVX512VLDQ-NEXT:    andq $1, %rax
+; AVX512VLDQ-NEXT:    vmovq %rax, %xmm1
+; AVX512VLDQ-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512VLDQ-NEXT:    vcvtqq2pd %xmm0, %xmm0
+; AVX512VLDQ-NEXT:    retq
+;
+; AVX512VLBW-LABEL: uitofp_2i1_double:
+; AVX512VLBW:       ## BB#0:
+; AVX512VLBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX512VLBW-NEXT:    vpcmpltuq %xmm1, %xmm0, %k1
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm0 {%k1} {z}
+; AVX512VLBW-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512VLBW-NEXT:    vcvtusi2sdq %rax, %xmm2, %xmm1
+; AVX512VLBW-NEXT:    vmovq %xmm0, %rax
+; AVX512VLBW-NEXT:    vcvtusi2sdq %rax, %xmm2, %xmm0
+; AVX512VLBW-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX512VLBW-NEXT:    retq
   %mask = icmp ult <2 x i32> %a, zeroinitializer
   %1 = uitofp <2 x i1> %mask to <2 x double>
   ret <2 x double> %1




More information about the llvm-commits mailing list