[llvm] r324819 - [X86] Remove some check-prefixes from avx512-cvt.ll to prepare for an upcoming patch.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Feb 10 09:58:57 PST 2018


Author: ctopper
Date: Sat Feb 10 09:58:56 2018
New Revision: 324819

URL: http://llvm.org/viewvc/llvm-project?rev=324819&view=rev
Log:
[X86] Remove some check-prefixes from avx512-cvt.ll to prepare for an upcoming patch.

The update script sometimes has trouble when there are check-prefixes representing every possible combination of feature flags. I have a patch where the update script was generating something that didn't pass lit.

This patch just removes some check-prefixes and expands out some of the checks to workaround this.

Modified:
    llvm/trunk/test/CodeGen/X86/avx512-cvt.ll

Modified: llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-cvt.ll?rev=324819&r1=324818&r2=324819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-cvt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-cvt.ll Sat Feb 10 09:58:56 2018
@@ -1,11 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=NODQ --check-prefix=NOVLDQ --check-prefix=KNL
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=DQ --check-prefix=VL --check-prefix=VLDQ --check-prefix=VLBW --check-prefix=SKX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl  | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=NODQ --check-prefix=VL --check-prefix=VLNODQ --check-prefix=VLNOBW --check-prefix=AVX512VL
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512dq  | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=NOVL --check-prefix=DQ --check-prefix=AVX512DQ
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw  | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=NOVL --check-prefix=NODQ --check-prefix=NOVLDQ --check-prefix=AVX512BW
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl,avx512dq  | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=DQ --check-prefix=VL --check-prefix=VLDQ --check-prefix=VLNOBW --check-prefix=AVX512VLDQ
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl,avx512bw  | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=NODQ --check-prefix=VL --check-prefix=VLNODQ --check-prefix=VLBW --check-prefix=AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=VL --check-prefix=VLDQ --check-prefix=VLBW --check-prefix=SKX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl  | FileCheck %s --check-prefix=ALL --check-prefix=NODQ --check-prefix=VL --check-prefix=VLNODQ --check-prefix=VLNOBW --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512dq  | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=DQNOVL --check-prefix=AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw  | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=NODQ --check-prefix=NOVLDQ --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl,avx512dq  | FileCheck %s --check-prefix=ALL --check-prefix=VL --check-prefix=VLDQ --check-prefix=VLNOBW --check-prefix=AVX512VLDQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl,avx512bw  | FileCheck %s --check-prefix=ALL --check-prefix=NODQ --check-prefix=VL --check-prefix=VLNODQ --check-prefix=VLBW --check-prefix=AVX512VLBW
 
 
 define <16 x float> @sitof32(<16 x i32> %a) nounwind {
@@ -48,10 +48,15 @@ define <8 x double> @sltof864(<8 x i64>
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; NODQ-NEXT:    retq
 ;
-; DQ-LABEL: sltof864:
-; DQ:       # %bb.0:
-; DQ-NEXT:    vcvtqq2pd %zmm0, %zmm0
-; DQ-NEXT:    retq
+; VLDQ-LABEL: sltof864:
+; VLDQ:       # %bb.0:
+; VLDQ-NEXT:    vcvtqq2pd %zmm0, %zmm0
+; VLDQ-NEXT:    retq
+;
+; DQNOVL-LABEL: sltof864:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vcvtqq2pd %zmm0, %zmm0
+; DQNOVL-NEXT:    retq
   %b = sitofp <8 x i64> %a to <8 x double>
   ret <8 x double> %b
 }
@@ -78,12 +83,12 @@ define <4 x double> @slto4f64(<4 x i64>
 ; VLDQ-NEXT:    vcvtqq2pd %ymm0, %ymm0
 ; VLDQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: slto4f64:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512DQ-NEXT:    vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: slto4f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; DQNOVL-NEXT:    vcvtqq2pd %zmm0, %zmm0
+; DQNOVL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; DQNOVL-NEXT:    retq
   %b = sitofp <4 x i64> %a to <4 x double>
   ret <4 x double> %b
 }
@@ -103,13 +108,13 @@ define <2 x double> @slto2f64(<2 x i64>
 ; VLDQ-NEXT:    vcvtqq2pd %xmm0, %xmm0
 ; VLDQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: slto2f64:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512DQ-NEXT:    vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: slto2f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; DQNOVL-NEXT:    vcvtqq2pd %zmm0, %zmm0
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; DQNOVL-NEXT:    vzeroupper
+; DQNOVL-NEXT:    retq
   %b = sitofp <2 x i64> %a to <2 x double>
   ret <2 x double> %b
 }
@@ -131,13 +136,13 @@ define <2 x float> @sltof2f32(<2 x i64>
 ; VLDQ-NEXT:    vcvtqq2ps %xmm0, %xmm0
 ; VLDQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: sltof2f32:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512DQ-NEXT:    vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: sltof2f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; DQNOVL-NEXT:    vcvtqq2ps %zmm0, %ymm0
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; DQNOVL-NEXT:    vzeroupper
+; DQNOVL-NEXT:    retq
   %b = sitofp <2 x i64> %a to <2 x float>
   ret <2 x float>%b
 }
@@ -166,13 +171,13 @@ define <4 x float> @slto4f32_mem(<4 x i6
 ; VLDQ-NEXT:    vcvtqq2psy (%rdi), %xmm0
 ; VLDQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: slto4f32_mem:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    vmovups (%rdi), %ymm0
-; AVX512DQ-NEXT:    vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: slto4f32_mem:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vmovups (%rdi), %ymm0
+; DQNOVL-NEXT:    vcvtqq2ps %zmm0, %ymm0
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; DQNOVL-NEXT:    vzeroupper
+; DQNOVL-NEXT:    retq
   %a1 = load <4 x i64>, <4 x i64>* %a, align 8
   %b = sitofp <4 x i64> %a1 to <4 x float>
   ret <4 x float>%b
@@ -202,12 +207,12 @@ define <4 x i64> @f64to4sl(<4 x double>
 ; VLDQ-NEXT:    vcvttpd2qq %ymm0, %ymm0
 ; VLDQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: f64to4sl:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512DQ-NEXT:    vcvttpd2qq %zmm0, %zmm0
-; AVX512DQ-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: f64to4sl:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; DQNOVL-NEXT:    vcvttpd2qq %zmm0, %zmm0
+; DQNOVL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; DQNOVL-NEXT:    retq
   %b = fptosi <4 x double> %a to <4 x i64>
   ret <4 x i64> %b
 }
@@ -236,12 +241,12 @@ define <4 x i64> @f32to4sl(<4 x float> %
 ; VLDQ-NEXT:    vcvttps2qq %xmm0, %ymm0
 ; VLDQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: f32to4sl:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512DQ-NEXT:    vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: f32to4sl:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; DQNOVL-NEXT:    vcvttps2qq %ymm0, %zmm0
+; DQNOVL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; DQNOVL-NEXT:    retq
   %b = fptosi <4 x float> %a to <4 x i64>
   ret <4 x i64> %b
 }
@@ -270,13 +275,13 @@ define <4 x float> @slto4f32(<4 x i64> %
 ; VLDQ-NEXT:    vzeroupper
 ; VLDQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: slto4f32:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512DQ-NEXT:    vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: slto4f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; DQNOVL-NEXT:    vcvtqq2ps %zmm0, %ymm0
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; DQNOVL-NEXT:    vzeroupper
+; DQNOVL-NEXT:    retq
   %b = sitofp <4 x i64> %a to <4 x float>
   ret <4 x float> %b
 }
@@ -305,13 +310,13 @@ define <4 x float> @ulto4f32(<4 x i64> %
 ; VLDQ-NEXT:    vzeroupper
 ; VLDQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: ulto4f32:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512DQ-NEXT:    vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: ulto4f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; DQNOVL-NEXT:    vcvtuqq2ps %zmm0, %ymm0
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $ymm0
+; DQNOVL-NEXT:    vzeroupper
+; DQNOVL-NEXT:    retq
   %b = uitofp <4 x i64> %a to <4 x float>
   ret <4 x float> %b
 }
@@ -347,10 +352,15 @@ define <8 x double> @ulto8f64(<8 x i64>
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; NODQ-NEXT:    retq
 ;
-; DQ-LABEL: ulto8f64:
-; DQ:       # %bb.0:
-; DQ-NEXT:    vcvtuqq2pd %zmm0, %zmm0
-; DQ-NEXT:    retq
+; VLDQ-LABEL: ulto8f64:
+; VLDQ:       # %bb.0:
+; VLDQ-NEXT:    vcvtuqq2pd %zmm0, %zmm0
+; VLDQ-NEXT:    retq
+;
+; DQNOVL-LABEL: ulto8f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vcvtuqq2pd %zmm0, %zmm0
+; DQNOVL-NEXT:    retq
   %b = uitofp <8 x i64> %a to <8 x double>
   ret <8 x double> %b
 }
@@ -412,11 +422,17 @@ define <16 x double> @ulto16f64(<16 x i6
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm2, %zmm1, %zmm1
 ; NODQ-NEXT:    retq
 ;
-; DQ-LABEL: ulto16f64:
-; DQ:       # %bb.0:
-; DQ-NEXT:    vcvtuqq2pd %zmm0, %zmm0
-; DQ-NEXT:    vcvtuqq2pd %zmm1, %zmm1
-; DQ-NEXT:    retq
+; VLDQ-LABEL: ulto16f64:
+; VLDQ:       # %bb.0:
+; VLDQ-NEXT:    vcvtuqq2pd %zmm0, %zmm0
+; VLDQ-NEXT:    vcvtuqq2pd %zmm1, %zmm1
+; VLDQ-NEXT:    retq
+;
+; DQNOVL-LABEL: ulto16f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vcvtuqq2pd %zmm0, %zmm0
+; DQNOVL-NEXT:    vcvtuqq2pd %zmm1, %zmm1
+; DQNOVL-NEXT:    retq
   %b = uitofp <16 x i64> %a to <16 x double>
   ret <16 x double> %b
 }
@@ -606,11 +622,11 @@ define <8 x double> @i32to8f64_mask(<8 x
 ; VLNOBW-NEXT:    vcvtdq2pd %ymm1, %zmm0 {%k1}
 ; VLNOBW-NEXT:    retq
 ;
-; AVX512DQ-LABEL: i32to8f64_mask:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    kmovw %edi, %k1
-; AVX512DQ-NEXT:    vcvtdq2pd %ymm1, %zmm0 {%k1}
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: i32to8f64_mask:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    kmovw %edi, %k1
+; DQNOVL-NEXT:    vcvtdq2pd %ymm1, %zmm0 {%k1}
+; DQNOVL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: i32to8f64_mask:
 ; AVX512BW:       # %bb.0:
@@ -641,11 +657,11 @@ define <8 x double> @sito8f64_maskz(<8 x
 ; VLNOBW-NEXT:    vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
 ; VLNOBW-NEXT:    retq
 ;
-; AVX512DQ-LABEL: sito8f64_maskz:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    kmovw %edi, %k1
-; AVX512DQ-NEXT:    vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: sito8f64_maskz:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    kmovw %edi, %k1
+; DQNOVL-NEXT:    vcvtdq2pd %ymm0, %zmm0 {%k1} {z}
+; DQNOVL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: sito8f64_maskz:
 ; AVX512BW:       # %bb.0:
@@ -929,10 +945,15 @@ define <8 x float> @slto8f32(<8 x i64> %
 ; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; NODQ-NEXT:    retq
 ;
-; DQ-LABEL: slto8f32:
-; DQ:       # %bb.0:
-; DQ-NEXT:    vcvtqq2ps %zmm0, %ymm0
-; DQ-NEXT:    retq
+; VLDQ-LABEL: slto8f32:
+; VLDQ:       # %bb.0:
+; VLDQ-NEXT:    vcvtqq2ps %zmm0, %ymm0
+; VLDQ-NEXT:    retq
+;
+; DQNOVL-LABEL: slto8f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vcvtqq2ps %zmm0, %ymm0
+; DQNOVL-NEXT:    retq
   %b = sitofp <8 x i64> %a to <8 x float>
   ret <8 x float> %b
 }
@@ -995,12 +1016,19 @@ define <16 x float> @slto16f32(<16 x i64
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; NODQ-NEXT:    retq
 ;
-; DQ-LABEL: slto16f32:
-; DQ:       # %bb.0:
-; DQ-NEXT:    vcvtqq2ps %zmm0, %ymm0
-; DQ-NEXT:    vcvtqq2ps %zmm1, %ymm1
-; DQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
-; DQ-NEXT:    retq
+; VLDQ-LABEL: slto16f32:
+; VLDQ:       # %bb.0:
+; VLDQ-NEXT:    vcvtqq2ps %zmm0, %ymm0
+; VLDQ-NEXT:    vcvtqq2ps %zmm1, %ymm1
+; VLDQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; VLDQ-NEXT:    retq
+;
+; DQNOVL-LABEL: slto16f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vcvtqq2ps %zmm0, %ymm0
+; DQNOVL-NEXT:    vcvtqq2ps %zmm1, %ymm1
+; DQNOVL-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; DQNOVL-NEXT:    retq
   %b = sitofp <16 x i64> %a to <16 x float>
   ret <16 x float> %b
 }
@@ -1036,10 +1064,15 @@ define <8 x double> @slto8f64(<8 x i64>
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; NODQ-NEXT:    retq
 ;
-; DQ-LABEL: slto8f64:
-; DQ:       # %bb.0:
-; DQ-NEXT:    vcvtqq2pd %zmm0, %zmm0
-; DQ-NEXT:    retq
+; VLDQ-LABEL: slto8f64:
+; VLDQ:       # %bb.0:
+; VLDQ-NEXT:    vcvtqq2pd %zmm0, %zmm0
+; VLDQ-NEXT:    retq
+;
+; DQNOVL-LABEL: slto8f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vcvtqq2pd %zmm0, %zmm0
+; DQNOVL-NEXT:    retq
   %b = sitofp <8 x i64> %a to <8 x double>
   ret <8 x double> %b
 }
@@ -1101,11 +1134,17 @@ define <16 x double> @slto16f64(<16 x i6
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm2, %zmm1, %zmm1
 ; NODQ-NEXT:    retq
 ;
-; DQ-LABEL: slto16f64:
-; DQ:       # %bb.0:
-; DQ-NEXT:    vcvtqq2pd %zmm0, %zmm0
-; DQ-NEXT:    vcvtqq2pd %zmm1, %zmm1
-; DQ-NEXT:    retq
+; VLDQ-LABEL: slto16f64:
+; VLDQ:       # %bb.0:
+; VLDQ-NEXT:    vcvtqq2pd %zmm0, %zmm0
+; VLDQ-NEXT:    vcvtqq2pd %zmm1, %zmm1
+; VLDQ-NEXT:    retq
+;
+; DQNOVL-LABEL: slto16f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vcvtqq2pd %zmm0, %zmm0
+; DQNOVL-NEXT:    vcvtqq2pd %zmm1, %zmm1
+; DQNOVL-NEXT:    retq
   %b = sitofp <16 x i64> %a to <16 x double>
   ret <16 x double> %b
 }
@@ -1141,10 +1180,15 @@ define <8 x float> @ulto8f32(<8 x i64> %
 ; NODQ-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; NODQ-NEXT:    retq
 ;
-; DQ-LABEL: ulto8f32:
-; DQ:       # %bb.0:
-; DQ-NEXT:    vcvtuqq2ps %zmm0, %ymm0
-; DQ-NEXT:    retq
+; VLDQ-LABEL: ulto8f32:
+; VLDQ:       # %bb.0:
+; VLDQ-NEXT:    vcvtuqq2ps %zmm0, %ymm0
+; VLDQ-NEXT:    retq
+;
+; DQNOVL-LABEL: ulto8f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vcvtuqq2ps %zmm0, %ymm0
+; DQNOVL-NEXT:    retq
   %b = uitofp <8 x i64> %a to <8 x float>
   ret <8 x float> %b
 }
@@ -1207,12 +1251,19 @@ define <16 x float> @ulto16f32(<16 x i64
 ; NODQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
 ; NODQ-NEXT:    retq
 ;
-; DQ-LABEL: ulto16f32:
-; DQ:       # %bb.0:
-; DQ-NEXT:    vcvtuqq2ps %zmm0, %ymm0
-; DQ-NEXT:    vcvtuqq2ps %zmm1, %ymm1
-; DQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
-; DQ-NEXT:    retq
+; VLDQ-LABEL: ulto16f32:
+; VLDQ:       # %bb.0:
+; VLDQ-NEXT:    vcvtuqq2ps %zmm0, %ymm0
+; VLDQ-NEXT:    vcvtuqq2ps %zmm1, %ymm1
+; VLDQ-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; VLDQ-NEXT:    retq
+;
+; DQNOVL-LABEL: ulto16f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vcvtuqq2ps %zmm0, %ymm0
+; DQNOVL-NEXT:    vcvtuqq2ps %zmm1, %ymm1
+; DQNOVL-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; DQNOVL-NEXT:    retq
   %b = uitofp <16 x i64> %a to <16 x float>
   ret <16 x float> %b
 }
@@ -1236,11 +1287,11 @@ define <8 x double> @uito8f64_mask(<8 x
 ; VLNOBW-NEXT:    vcvtudq2pd %ymm1, %zmm0 {%k1}
 ; VLNOBW-NEXT:    retq
 ;
-; AVX512DQ-LABEL: uito8f64_mask:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    kmovw %edi, %k1
-; AVX512DQ-NEXT:    vcvtudq2pd %ymm1, %zmm0 {%k1}
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: uito8f64_mask:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    kmovw %edi, %k1
+; DQNOVL-NEXT:    vcvtudq2pd %ymm1, %zmm0 {%k1}
+; DQNOVL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: uito8f64_mask:
 ; AVX512BW:       # %bb.0:
@@ -1271,11 +1322,11 @@ define <8 x double> @uito8f64_maskz(<8 x
 ; VLNOBW-NEXT:    vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
 ; VLNOBW-NEXT:    retq
 ;
-; AVX512DQ-LABEL: uito8f64_maskz:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    kmovw %edi, %k1
-; AVX512DQ-NEXT:    vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: uito8f64_maskz:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    kmovw %edi, %k1
+; DQNOVL-NEXT:    vcvtudq2pd %ymm0, %zmm0 {%k1} {z}
+; DQNOVL-NEXT:    retq
 ;
 ; AVX512BW-LABEL: uito8f64_maskz:
 ; AVX512BW:       # %bb.0:
@@ -1400,12 +1451,19 @@ define <16 x float> @sbto16f32(<16 x i32
 ; NODQ-NEXT:    vcvtdq2ps %zmm0, %zmm0
 ; NODQ-NEXT:    retq
 ;
-; DQ-LABEL: sbto16f32:
-; DQ:       # %bb.0:
-; DQ-NEXT:    vpmovd2m %zmm0, %k0
-; DQ-NEXT:    vpmovm2d %k0, %zmm0
-; DQ-NEXT:    vcvtdq2ps %zmm0, %zmm0
-; DQ-NEXT:    retq
+; VLDQ-LABEL: sbto16f32:
+; VLDQ:       # %bb.0:
+; VLDQ-NEXT:    vpmovd2m %zmm0, %k0
+; VLDQ-NEXT:    vpmovm2d %k0, %zmm0
+; VLDQ-NEXT:    vcvtdq2ps %zmm0, %zmm0
+; VLDQ-NEXT:    retq
+;
+; DQNOVL-LABEL: sbto16f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vpmovd2m %zmm0, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vcvtdq2ps %zmm0, %zmm0
+; DQNOVL-NEXT:    retq
   %mask = icmp slt <16 x i32> %a, zeroinitializer
   %1 = sitofp <16 x i1> %mask to <16 x float>
   ret <16 x float> %1
@@ -1500,16 +1558,16 @@ define <16 x double> @sbto16f64(<16 x do
 ; VLNODQ-NEXT:    vcvtdq2pd %ymm1, %zmm1
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: sbto16f64:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
-; AVX512DQ-NEXT:    vcmpltpd %zmm1, %zmm2, %k0
-; AVX512DQ-NEXT:    vcmpltpd %zmm0, %zmm2, %k1
-; AVX512DQ-NEXT:    vpmovm2d %k1, %zmm0
-; AVX512DQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm1
-; AVX512DQ-NEXT:    vcvtdq2pd %ymm1, %zmm1
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: sbto16f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; DQNOVL-NEXT:    vcmpltpd %zmm1, %zmm2, %k0
+; DQNOVL-NEXT:    vcmpltpd %zmm0, %zmm2, %k1
+; DQNOVL-NEXT:    vpmovm2d %k1, %zmm0
+; DQNOVL-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm1
+; DQNOVL-NEXT:    vcvtdq2pd %ymm1, %zmm1
+; DQNOVL-NEXT:    retq
   %cmpres = fcmp ogt <16 x double> %a, zeroinitializer
   %1 = sitofp <16 x i1> %cmpres to <16 x double>
   ret <16 x double> %1
@@ -1541,13 +1599,13 @@ define <8 x double> @sbto8f64(<8 x doubl
 ; VLNODQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: sbto8f64:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; AVX512DQ-NEXT:    vcmpltpd %zmm0, %zmm1, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: sbto8f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; DQNOVL-NEXT:    vcmpltpd %zmm0, %zmm1, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; DQNOVL-NEXT:    retq
   %cmpres = fcmp ogt <8 x double> %a, zeroinitializer
   %1 = sitofp <8 x i1> %cmpres to <8 x double>
   ret <8 x double> %1
@@ -1580,14 +1638,14 @@ define <8 x float> @sbto8f32(<8 x float>
 ; VLNODQ-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: sbto8f32:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512DQ-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; AVX512DQ-NEXT:    vcmpltps %zmm0, %zmm1, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: sbto8f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; DQNOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; DQNOVL-NEXT:    vcmpltps %zmm0, %zmm1, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vcvtdq2ps %ymm0, %ymm0
+; DQNOVL-NEXT:    retq
   %cmpres = fcmp ogt <8 x float> %a, zeroinitializer
   %1 = sitofp <8 x i1> %cmpres to <8 x float>
   ret <8 x float> %1
@@ -1621,15 +1679,15 @@ define <4 x float> @sbto4f32(<4 x float>
 ; VLNODQ-NEXT:    vcvtdq2ps %xmm0, %xmm0
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: sbto4f32:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512DQ-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; AVX512DQ-NEXT:    vcmpltps %zmm0, %zmm1, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT:    vcvtdq2ps %xmm0, %xmm0
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: sbto4f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; DQNOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; DQNOVL-NEXT:    vcmpltps %zmm0, %zmm1, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; DQNOVL-NEXT:    vzeroupper
+; DQNOVL-NEXT:    retq
   %cmpres = fcmp ogt <4 x float> %a, zeroinitializer
   %1 = sitofp <4 x i1> %cmpres to <4 x float>
   ret <4 x float> %1
@@ -1662,14 +1720,14 @@ define <4 x double> @sbto4f64(<4 x doubl
 ; VLNODQ-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: sbto4f64:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512DQ-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; AVX512DQ-NEXT:    vcmpltpd %zmm0, %zmm1, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: sbto4f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; DQNOVL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; DQNOVL-NEXT:    vcmpltpd %zmm0, %zmm1, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vcvtdq2pd %xmm0, %ymm0
+; DQNOVL-NEXT:    retq
   %cmpres = fcmp ogt <4 x double> %a, zeroinitializer
   %1 = sitofp <4 x i1> %cmpres to <4 x double>
   ret <4 x double> %1
@@ -1703,15 +1761,15 @@ define <2 x float> @sbto2f32(<2 x float>
 ; VLNODQ-NEXT:    vcvtdq2ps %xmm0, %xmm0
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: sbto2f32:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512DQ-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; AVX512DQ-NEXT:    vcmpltps %zmm0, %zmm1, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT:    vcvtdq2ps %xmm0, %xmm0
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: sbto2f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; DQNOVL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; DQNOVL-NEXT:    vcmpltps %zmm0, %zmm1, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; DQNOVL-NEXT:    vzeroupper
+; DQNOVL-NEXT:    retq
   %cmpres = fcmp ogt <2 x float> %a, zeroinitializer
   %1 = sitofp <2 x i1> %cmpres to <2 x float>
   ret <2 x float> %1
@@ -1745,15 +1803,15 @@ define <2 x double> @sbto2f64(<2 x doubl
 ; VLNODQ-NEXT:    vcvtdq2pd %xmm0, %xmm0
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: sbto2f64:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512DQ-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
-; AVX512DQ-NEXT:    vcmpltpd %zmm0, %zmm1, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT:    vcvtdq2pd %xmm0, %xmm0
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: sbto2f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; DQNOVL-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
+; DQNOVL-NEXT:    vcmpltpd %zmm0, %zmm1, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vcvtdq2pd %xmm0, %xmm0
+; DQNOVL-NEXT:    vzeroupper
+; DQNOVL-NEXT:    retq
   %cmpres = fcmp ogt <2 x double> %a, zeroinitializer
   %1 = sitofp <2 x i1> %cmpres to <2 x double>
   ret <2 x double> %1
@@ -1897,13 +1955,21 @@ define <16 x float> @ubto16f32(<16 x i32
 ; NODQ-NEXT:    vcvtdq2ps %zmm0, %zmm0
 ; NODQ-NEXT:    retq
 ;
-; DQ-LABEL: ubto16f32:
-; DQ:       # %bb.0:
-; DQ-NEXT:    vpmovd2m %zmm0, %k0
-; DQ-NEXT:    vpmovm2d %k0, %zmm0
-; DQ-NEXT:    vpsrld $31, %zmm0, %zmm0
-; DQ-NEXT:    vcvtdq2ps %zmm0, %zmm0
-; DQ-NEXT:    retq
+; VLDQ-LABEL: ubto16f32:
+; VLDQ:       # %bb.0:
+; VLDQ-NEXT:    vpmovd2m %zmm0, %k0
+; VLDQ-NEXT:    vpmovm2d %k0, %zmm0
+; VLDQ-NEXT:    vpsrld $31, %zmm0, %zmm0
+; VLDQ-NEXT:    vcvtdq2ps %zmm0, %zmm0
+; VLDQ-NEXT:    retq
+;
+; DQNOVL-LABEL: ubto16f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vpmovd2m %zmm0, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vpsrld $31, %zmm0, %zmm0
+; DQNOVL-NEXT:    vcvtdq2ps %zmm0, %zmm0
+; DQNOVL-NEXT:    retq
   %mask = icmp slt <16 x i32> %a, zeroinitializer
   %1 = uitofp <16 x i1> %mask to <16 x float>
   ret <16 x float> %1
@@ -1949,17 +2015,17 @@ define <16 x double> @ubto16f64(<16 x i3
 ; VLNODQ-NEXT:    vcvtdq2pd %ymm1, %zmm1
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: ubto16f64:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    vpmovd2m %zmm0, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT:    vpsrld $31, %ymm0, %ymm0
-; AVX512DQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT:    kshiftrw $8, %k0, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm1
-; AVX512DQ-NEXT:    vpsrld $31, %ymm1, %ymm1
-; AVX512DQ-NEXT:    vcvtdq2pd %ymm1, %zmm1
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: ubto16f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vpmovd2m %zmm0, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vpsrld $31, %ymm0, %ymm0
+; DQNOVL-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; DQNOVL-NEXT:    kshiftrw $8, %k0, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm1
+; DQNOVL-NEXT:    vpsrld $31, %ymm1, %ymm1
+; DQNOVL-NEXT:    vcvtdq2pd %ymm1, %zmm1
+; DQNOVL-NEXT:    retq
   %mask = icmp slt <16 x i32> %a, zeroinitializer
   %1 = uitofp <16 x i1> %mask to <16 x double>
   ret <16 x double> %1
@@ -1994,14 +2060,14 @@ define <8 x float> @ubto8f32(<8 x i32> %
 ; VLNODQ-NEXT:    vcvtdq2ps %ymm0, %ymm0
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: ubto8f32:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512DQ-NEXT:    vpmovd2m %zmm0, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT:    vpsrld $31, %ymm0, %ymm0
-; AVX512DQ-NEXT:    vcvtdq2ps %ymm0, %ymm0
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: ubto8f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; DQNOVL-NEXT:    vpmovd2m %zmm0, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vpsrld $31, %ymm0, %ymm0
+; DQNOVL-NEXT:    vcvtdq2ps %ymm0, %ymm0
+; DQNOVL-NEXT:    retq
   %mask = icmp slt <8 x i32> %a, zeroinitializer
   %1 = uitofp <8 x i1> %mask to <8 x float>
   ret <8 x float> %1
@@ -2036,14 +2102,14 @@ define <8 x double> @ubto8f64(<8 x i32>
 ; VLNODQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: ubto8f64:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; AVX512DQ-NEXT:    vpmovd2m %zmm0, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT:    vpsrld $31, %ymm0, %ymm0
-; AVX512DQ-NEXT:    vcvtdq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: ubto8f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; DQNOVL-NEXT:    vpmovd2m %zmm0, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vpsrld $31, %ymm0, %ymm0
+; DQNOVL-NEXT:    vcvtdq2pd %ymm0, %zmm0
+; DQNOVL-NEXT:    retq
   %mask = icmp slt <8 x i32> %a, zeroinitializer
   %1 = uitofp <8 x i1> %mask to <8 x double>
   ret <8 x double> %1
@@ -2079,15 +2145,15 @@ define <4 x float> @ubto4f32(<4 x i32> %
 ; VLNODQ-NEXT:    vcvtdq2ps %xmm0, %xmm0
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: ubto4f32:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512DQ-NEXT:    vpmovd2m %zmm0, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT:    vpsrld $31, %xmm0, %xmm0
-; AVX512DQ-NEXT:    vcvtdq2ps %xmm0, %xmm0
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: ubto4f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; DQNOVL-NEXT:    vpmovd2m %zmm0, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vpsrld $31, %xmm0, %xmm0
+; DQNOVL-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; DQNOVL-NEXT:    vzeroupper
+; DQNOVL-NEXT:    retq
   %mask = icmp slt <4 x i32> %a, zeroinitializer
   %1 = uitofp <4 x i1> %mask to <4 x float>
   ret <4 x float> %1
@@ -2122,14 +2188,14 @@ define <4 x double> @ubto4f64(<4 x i32>
 ; VLNODQ-NEXT:    vcvtdq2pd %xmm0, %ymm0
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: ubto4f64:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; AVX512DQ-NEXT:    vpmovd2m %zmm0, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT:    vpsrld $31, %xmm0, %xmm0
-; AVX512DQ-NEXT:    vcvtdq2pd %xmm0, %ymm0
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: ubto4f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; DQNOVL-NEXT:    vpmovd2m %zmm0, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vpsrld $31, %xmm0, %xmm0
+; DQNOVL-NEXT:    vcvtdq2pd %xmm0, %ymm0
+; DQNOVL-NEXT:    retq
   %mask = icmp slt <4 x i32> %a, zeroinitializer
   %1 = uitofp <4 x i1> %mask to <4 x double>
   ret <4 x double> %1
@@ -2168,16 +2234,16 @@ define <2 x float> @ubto2f32(<2 x i32> %
 ; VLNODQ-NEXT:    vcvtdq2ps %xmm0, %xmm0
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: ubto2f32:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512DQ-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; AVX512DQ-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT:    vpsrld $31, %xmm0, %xmm0
-; AVX512DQ-NEXT:    vcvtdq2ps %xmm0, %xmm0
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: ubto2f32:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; DQNOVL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; DQNOVL-NEXT:    vptestmq %zmm0, %zmm0, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vpsrld $31, %xmm0, %xmm0
+; DQNOVL-NEXT:    vcvtdq2ps %xmm0, %xmm0
+; DQNOVL-NEXT:    vzeroupper
+; DQNOVL-NEXT:    retq
   %mask = icmp ne <2 x i32> %a, zeroinitializer
   %1 = uitofp <2 x i1> %mask to <2 x float>
   ret <2 x float> %1
@@ -2217,17 +2283,17 @@ define <2 x double> @ubto2f64(<2 x i32>
 ; VLNODQ-NEXT:    vcvtudq2pd %xmm0, %xmm0
 ; VLNODQ-NEXT:    retq
 ;
-; AVX512DQ-LABEL: ubto2f64:
-; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512DQ-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; AVX512DQ-NEXT:    vptestmq %zmm0, %zmm0, %k0
-; AVX512DQ-NEXT:    vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT:    vpsrld $31, %xmm0, %xmm0
-; AVX512DQ-NEXT:    vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; AVX512DQ-NEXT:    vzeroupper
-; AVX512DQ-NEXT:    retq
+; DQNOVL-LABEL: ubto2f64:
+; DQNOVL:       # %bb.0:
+; DQNOVL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; DQNOVL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; DQNOVL-NEXT:    vptestmq %zmm0, %zmm0, %k0
+; DQNOVL-NEXT:    vpmovm2d %k0, %zmm0
+; DQNOVL-NEXT:    vpsrld $31, %xmm0, %xmm0
+; DQNOVL-NEXT:    vcvtudq2pd %ymm0, %zmm0
+; DQNOVL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; DQNOVL-NEXT:    vzeroupper
+; DQNOVL-NEXT:    retq
   %mask = icmp ne <2 x i32> %a, zeroinitializer
   %1 = uitofp <2 x i1> %mask to <2 x double>
   ret <2 x double> %1




More information about the llvm-commits mailing list