[llvm] c926d96 - [X86] Make the AVX1 check lines in vec-strict-inttofp-256.ll test 'avx' instead of 'avx2'. Add AVX2 checks. NFC
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Dec 29 11:15:28 PST 2019
Author: Craig Topper
Date: 2019-12-29T11:11:26-08:00
New Revision: c926d96fcab1e965ada6e045f762a73928d58c82
URL: https://github.com/llvm/llvm-project/commit/c926d96fcab1e965ada6e045f762a73928d58c82
DIFF: https://github.com/llvm/llvm-project/commit/c926d96fcab1e965ada6e045f762a73928d58c82.diff
LOG: [X86] Make the AVX1 check lines in vec-strict-inttofp-256.ll test 'avx' instead of 'avx2'. Add AVX2 checks. NFC
Added:
Modified:
llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
index d61325e527ea..fe04449d2f54 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 -O3 -disable-strictnode-mutation | FileCheck %s --check-prefixes=CHECK,AVX1,AVX-32,AVX1-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 -O3 -disable-strictnode-mutation | FileCheck %s --check-prefixes=CHECK,AVX1,AVX-64,AVX1-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 -disable-strictnode-mutation | FileCheck %s --check-prefixes=CHECK,AVX1,AVX-32,AVX1-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 -disable-strictnode-mutation | FileCheck %s --check-prefixes=CHECK,AVX1,AVX-64,AVX1-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 -O3 -disable-strictnode-mutation | FileCheck %s --check-prefixes=CHECK,AVX2,AVX-32,AVX2-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 -O3 -disable-strictnode-mutation | FileCheck %s --check-prefixes=CHECK,AVX2,AVX-64,AVX2-64
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 -disable-strictnode-mutation | FileCheck %s --check-prefixes=CHECK,AVX512F,AVX-32,AVX512F-32
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 -disable-strictnode-mutation | FileCheck %s --check-prefixes=CHECK,AVX512F,AVX-64,AVX512F-64
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512vl -O3 -disable-strictnode-mutation | FileCheck %s --check-prefixes=CHECK,AVX512VL,AVX-32,AVX512VL-32
@@ -32,13 +34,57 @@ declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64>,
declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
define <8 x float> @sitofp_v8i1_v8f32(<8 x i1> %x) #0 {
-; CHECK-LABEL: sitofp_v8i1_v8f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; CHECK-NEXT: vpslld $31, %ymm0, %ymm0
-; CHECK-NEXT: vpsrad $31, %ymm0, %ymm0
-; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0
-; CHECK-NEXT: ret{{[l|q]}}
+; AVX1-LABEL: sitofp_v8i1_v8f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
+; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1-NEXT: ret{{[l|q]}}
+;
+; AVX2-LABEL: sitofp_v8i1_v8f32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX2-NEXT: ret{{[l|q]}}
+;
+; AVX512F-LABEL: sitofp_v8i1_v8f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512F-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX512F-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512F-NEXT: ret{{[l|q]}}
+;
+; AVX512VL-LABEL: sitofp_v8i1_v8f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512VL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX512VL-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512VL-NEXT: ret{{[l|q]}}
+;
+; AVX512DQ-LABEL: sitofp_v8i1_v8f32:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512DQ-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX512DQ-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512DQ-NEXT: ret{{[l|q]}}
+;
+; AVX512DQVL-LABEL: sitofp_v8i1_v8f32:
+; AVX512DQVL: # %bb.0:
+; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512DQVL-NEXT: vpslld $31, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpsrad $31, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512DQVL-NEXT: ret{{[l|q]}}
%result = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1> %x,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
@@ -46,19 +92,67 @@ define <8 x float> @sitofp_v8i1_v8f32(<8 x i1> %x) #0 {
}
define <8 x float> @uitofp_v8i1_v8f32(<8 x i1> %x) #0 {
-; AVX-32-LABEL: uitofp_v8i1_v8f32:
-; AVX-32: # %bb.0:
-; AVX-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
-; AVX-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX-32-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX-32-NEXT: retl
+; AVX1-32-LABEL: uitofp_v8i1_v8f32:
+; AVX1-32: # %bb.0:
+; AVX1-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX1-32-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-32-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-32-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-32-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1-32-NEXT: retl
+;
+; AVX1-64-LABEL: uitofp_v8i1_v8f32:
+; AVX1-64: # %bb.0:
+; AVX1-64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-64-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-64-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-64-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1-64-NEXT: retq
+;
+; AVX2-32-LABEL: uitofp_v8i1_v8f32:
+; AVX2-32: # %bb.0:
+; AVX2-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX2-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-32-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX2-32-NEXT: retl
+;
+; AVX2-64-LABEL: uitofp_v8i1_v8f32:
+; AVX2-64: # %bb.0:
+; AVX2-64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-64-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX2-64-NEXT: retq
+;
+; AVX512F-32-LABEL: uitofp_v8i1_v8f32:
+; AVX512F-32: # %bb.0:
+; AVX512F-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX512F-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512F-32-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512F-32-NEXT: retl
+;
+; AVX512F-64-LABEL: uitofp_v8i1_v8f32:
+; AVX512F-64: # %bb.0:
+; AVX512F-64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512F-64-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512F-64-NEXT: retq
+;
+; AVX512VL-32-LABEL: uitofp_v8i1_v8f32:
+; AVX512VL-32: # %bb.0:
+; AVX512VL-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX512VL-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512VL-32-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512VL-32-NEXT: retl
;
-; AVX-64-LABEL: uitofp_v8i1_v8f32:
-; AVX-64: # %bb.0:
-; AVX-64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX-64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX-64-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX-64-NEXT: retq
+; AVX512VL-64-LABEL: uitofp_v8i1_v8f32:
+; AVX512VL-64: # %bb.0:
+; AVX512VL-64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512VL-64-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512VL-64-NEXT: retq
;
; AVX512DQ-32-LABEL: uitofp_v8i1_v8f32:
; AVX512DQ-32: # %bb.0:
@@ -94,11 +188,44 @@ define <8 x float> @uitofp_v8i1_v8f32(<8 x i1> %x) #0 {
}
define <8 x float> @sitofp_v8i8_v8f32(<8 x i8> %x) #0 {
-; CHECK-LABEL: sitofp_v8i8_v8f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovsxbd %xmm0, %ymm0
-; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0
-; CHECK-NEXT: ret{{[l|q]}}
+; AVX1-LABEL: sitofp_v8i8_v8f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1-NEXT: ret{{[l|q]}}
+;
+; AVX2-LABEL: sitofp_v8i8_v8f32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
+; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX2-NEXT: ret{{[l|q]}}
+;
+; AVX512F-LABEL: sitofp_v8i8_v8f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmovsxbd %xmm0, %ymm0
+; AVX512F-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512F-NEXT: ret{{[l|q]}}
+;
+; AVX512VL-LABEL: sitofp_v8i8_v8f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpmovsxbd %xmm0, %ymm0
+; AVX512VL-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512VL-NEXT: ret{{[l|q]}}
+;
+; AVX512DQ-LABEL: sitofp_v8i8_v8f32:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmovsxbd %xmm0, %ymm0
+; AVX512DQ-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512DQ-NEXT: ret{{[l|q]}}
+;
+; AVX512DQVL-LABEL: sitofp_v8i8_v8f32:
+; AVX512DQVL: # %bb.0:
+; AVX512DQVL-NEXT: vpmovsxbd %xmm0, %ymm0
+; AVX512DQVL-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512DQVL-NEXT: ret{{[l|q]}}
%result = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8> %x,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
@@ -106,11 +233,44 @@ define <8 x float> @sitofp_v8i8_v8f32(<8 x i8> %x) #0 {
}
define <8 x float> @uitofp_v8i8_v8f32(<8 x i8> %x) #0 {
-; CHECK-LABEL: uitofp_v8i8_v8f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0
-; CHECK-NEXT: ret{{[l|q]}}
+; AVX1-LABEL: uitofp_v8i8_v8f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1-NEXT: ret{{[l|q]}}
+;
+; AVX2-LABEL: uitofp_v8i8_v8f32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX2-NEXT: ret{{[l|q]}}
+;
+; AVX512F-LABEL: uitofp_v8i8_v8f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX512F-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512F-NEXT: ret{{[l|q]}}
+;
+; AVX512VL-LABEL: uitofp_v8i8_v8f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX512VL-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512VL-NEXT: ret{{[l|q]}}
+;
+; AVX512DQ-LABEL: uitofp_v8i8_v8f32:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX512DQ-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512DQ-NEXT: ret{{[l|q]}}
+;
+; AVX512DQVL-LABEL: uitofp_v8i8_v8f32:
+; AVX512DQVL: # %bb.0:
+; AVX512DQVL-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
+; AVX512DQVL-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512DQVL-NEXT: ret{{[l|q]}}
%result = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8> %x,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
@@ -118,11 +278,44 @@ define <8 x float> @uitofp_v8i8_v8f32(<8 x i8> %x) #0 {
}
define <8 x float> @sitofp_v8i16_v8f32(<8 x i16> %x) #0 {
-; CHECK-LABEL: sitofp_v8i16_v8f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovsxwd %xmm0, %ymm0
-; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0
-; CHECK-NEXT: ret{{[l|q]}}
+; AVX1-LABEL: sitofp_v8i16_v8f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1-NEXT: ret{{[l|q]}}
+;
+; AVX2-LABEL: sitofp_v8i16_v8f32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX2-NEXT: ret{{[l|q]}}
+;
+; AVX512F-LABEL: sitofp_v8i16_v8f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX512F-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512F-NEXT: ret{{[l|q]}}
+;
+; AVX512VL-LABEL: sitofp_v8i16_v8f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX512VL-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512VL-NEXT: ret{{[l|q]}}
+;
+; AVX512DQ-LABEL: sitofp_v8i16_v8f32:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX512DQ-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512DQ-NEXT: ret{{[l|q]}}
+;
+; AVX512DQVL-LABEL: sitofp_v8i16_v8f32:
+; AVX512DQVL: # %bb.0:
+; AVX512DQVL-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX512DQVL-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512DQVL-NEXT: ret{{[l|q]}}
%result = call <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16> %x,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
@@ -130,11 +323,44 @@ define <8 x float> @sitofp_v8i16_v8f32(<8 x i16> %x) #0 {
}
define <8 x float> @uitofp_v8i16_v8f32(<8 x i16> %x) #0 {
-; CHECK-LABEL: uitofp_v8i16_v8f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0
-; CHECK-NEXT: ret{{[l|q]}}
+; AVX1-LABEL: uitofp_v8i16_v8f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1-NEXT: ret{{[l|q]}}
+;
+; AVX2-LABEL: uitofp_v8i16_v8f32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX2-NEXT: ret{{[l|q]}}
+;
+; AVX512F-LABEL: uitofp_v8i16_v8f32:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512F-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512F-NEXT: ret{{[l|q]}}
+;
+; AVX512VL-LABEL: uitofp_v8i16_v8f32:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512VL-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512VL-NEXT: ret{{[l|q]}}
+;
+; AVX512DQ-LABEL: uitofp_v8i16_v8f32:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512DQ-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512DQ-NEXT: ret{{[l|q]}}
+;
+; AVX512DQVL-LABEL: uitofp_v8i16_v8f32:
+; AVX512DQVL: # %bb.0:
+; AVX512DQVL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512DQVL-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX512DQVL-NEXT: ret{{[l|q]}}
%result = call <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16> %x,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
@@ -153,17 +379,43 @@ define <8 x float> @sitofp_v8i32_v8f32(<8 x i32> %x) #0 {
}
define <8 x float> @uitofp_v8i32_v8f32(<8 x i32> %x) #0 {
-; AVX1-LABEL: uitofp_v8i32_v8f32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
-; AVX1-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
-; AVX1-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX1-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928]
-; AVX1-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
-; AVX1-NEXT: vbroadcastss {{.*#+}} ymm2 = [-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11]
-; AVX1-NEXT: vaddps %ymm2, %ymm0, %ymm0
-; AVX1-NEXT: vaddps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: ret{{[l|q]}}
+; AVX1-32-LABEL: uitofp_v8i32_v8f32:
+; AVX1-32: # %bb.0:
+; AVX1-32-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-32-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-32-NEXT: vpsrld $16, %xmm2, %xmm2
+; AVX1-32-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-32-NEXT: vcvtdq2ps %ymm1, %ymm1
+; AVX1-32-NEXT: vmulps {{\.LCPI.*}}, %ymm1, %ymm1
+; AVX1-32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX1-32-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1-32-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX1-32-NEXT: retl
+;
+; AVX1-64-LABEL: uitofp_v8i32_v8f32:
+; AVX1-64: # %bb.0:
+; AVX1-64-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-64-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-64-NEXT: vpsrld $16, %xmm2, %xmm2
+; AVX1-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-64-NEXT: vcvtdq2ps %ymm1, %ymm1
+; AVX1-64-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
+; AVX1-64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-64-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1-64-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX1-64-NEXT: retq
+;
+; AVX2-LABEL: uitofp_v8i32_v8f32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
+; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11]
+; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: ret{{[l|q]}}
;
; AVX512F-LABEL: uitofp_v8i32_v8f32:
; AVX512F: # %bb.0:
@@ -208,12 +460,24 @@ define <4 x double> @sitofp_v4i1_v4f64(<4 x i1> %x) #0 {
}
define <4 x double> @uitofp_v4i1_v4f64(<4 x i1> %x) #0 {
-; AVX1-LABEL: uitofp_v4i1_v4f64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: ret{{[l|q]}}
+; AVX1-32-LABEL: uitofp_v4i1_v4f64:
+; AVX1-32: # %bb.0:
+; AVX1-32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX1-32-NEXT: vcvtdq2pd %xmm0, %ymm0
+; AVX1-32-NEXT: retl
+;
+; AVX1-64-LABEL: uitofp_v4i1_v4f64:
+; AVX1-64: # %bb.0:
+; AVX1-64-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-64-NEXT: vcvtdq2pd %xmm0, %ymm0
+; AVX1-64-NEXT: retq
+;
+; AVX2-LABEL: uitofp_v4i1_v4f64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
+; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
+; AVX2-NEXT: ret{{[l|q]}}
;
; AVX512F-LABEL: uitofp_v4i1_v4f64:
; AVX512F: # %bb.0:
@@ -318,17 +582,39 @@ define <4 x double> @sitofp_v4i32_v4f64(<4 x i32> %x) #0 {
}
define <4 x double> @uitofp_v4i32_v4f64(<4 x i32> %x) #0 {
-; AVX1-LABEL: uitofp_v4i32_v4f64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
-; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4]
-; AVX1-NEXT: vmulpd %ymm2, %ymm1, %ymm1
-; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
-; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: vaddpd %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: ret{{[l|q]}}
+; AVX1-32-LABEL: uitofp_v4i32_v4f64:
+; AVX1-32: # %bb.0:
+; AVX1-32-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; AVX1-32-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX1-32-NEXT: vcvtdq2pd %xmm0, %ymm0
+; AVX1-32-NEXT: vmulpd {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX1-32-NEXT: vcvtdq2pd %xmm1, %ymm1
+; AVX1-32-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX1-32-NEXT: retl
+;
+; AVX1-64-LABEL: uitofp_v4i32_v4f64:
+; AVX1-64: # %bb.0:
+; AVX1-64-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX1-64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
+; AVX1-64-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX1-64-NEXT: vcvtdq2pd %xmm0, %ymm0
+; AVX1-64-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-64-NEXT: vcvtdq2pd %xmm1, %ymm1
+; AVX1-64-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX1-64-NEXT: retq
+;
+; AVX2-LABEL: uitofp_v4i32_v4f64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX2-NEXT: vcvtdq2pd %xmm1, %ymm1
+; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4]
+; AVX2-NEXT: vmulpd %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
+; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
+; AVX2-NEXT: vaddpd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: ret{{[l|q]}}
;
; AVX512F-LABEL: uitofp_v4i32_v4f64:
; AVX512F: # %bb.0:
@@ -394,21 +680,69 @@ define <4 x double> @sitofp_v4i64_v4f64(<4 x i64> %x) #0 {
; AVX-32-NEXT: .cfi_def_cfa %esp, 4
; AVX-32-NEXT: retl
;
-; AVX-64-LABEL: sitofp_v4i64_v4f64:
-; AVX-64: # %bb.0:
-; AVX-64-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX-64-NEXT: vpextrq $1, %xmm1, %rax
-; AVX-64-NEXT: vcvtsi2sd %rax, %xmm2, %xmm2
-; AVX-64-NEXT: vmovq %xmm1, %rax
-; AVX-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm1
-; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX-64-NEXT: vpextrq $1, %xmm0, %rax
-; AVX-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm2
-; AVX-64-NEXT: vmovq %xmm0, %rax
-; AVX-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm0
-; AVX-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-64-NEXT: retq
+; AVX1-64-LABEL: sitofp_v4i64_v4f64:
+; AVX1-64: # %bb.0:
+; AVX1-64-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-64-NEXT: vpextrq $1, %xmm1, %rax
+; AVX1-64-NEXT: vcvtsi2sd %rax, %xmm2, %xmm2
+; AVX1-64-NEXT: vmovq %xmm1, %rax
+; AVX1-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm1
+; AVX1-64-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX1-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm2
+; AVX1-64-NEXT: vmovq %xmm0, %rax
+; AVX1-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm0
+; AVX1-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX1-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-64-NEXT: retq
+;
+; AVX2-64-LABEL: sitofp_v4i64_v4f64:
+; AVX2-64: # %bb.0:
+; AVX2-64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-64-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-64-NEXT: vcvtsi2sd %rax, %xmm2, %xmm2
+; AVX2-64-NEXT: vmovq %xmm1, %rax
+; AVX2-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm1
+; AVX2-64-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm2
+; AVX2-64-NEXT: vmovq %xmm0, %rax
+; AVX2-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm0
+; AVX2-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-64-NEXT: retq
+;
+; AVX512F-64-LABEL: sitofp_v4i64_v4f64:
+; AVX512F-64: # %bb.0:
+; AVX512F-64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512F-64-NEXT: vpextrq $1, %xmm1, %rax
+; AVX512F-64-NEXT: vcvtsi2sd %rax, %xmm2, %xmm2
+; AVX512F-64-NEXT: vmovq %xmm1, %rax
+; AVX512F-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm1
+; AVX512F-64-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX512F-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512F-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm2
+; AVX512F-64-NEXT: vmovq %xmm0, %rax
+; AVX512F-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm0
+; AVX512F-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX512F-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-64-NEXT: retq
+;
+; AVX512VL-64-LABEL: sitofp_v4i64_v4f64:
+; AVX512VL-64: # %bb.0:
+; AVX512VL-64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512VL-64-NEXT: vpextrq $1, %xmm1, %rax
+; AVX512VL-64-NEXT: vcvtsi2sd %rax, %xmm2, %xmm2
+; AVX512VL-64-NEXT: vmovq %xmm1, %rax
+; AVX512VL-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm1
+; AVX512VL-64-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX512VL-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512VL-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm2
+; AVX512VL-64-NEXT: vmovq %xmm0, %rax
+; AVX512VL-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm0
+; AVX512VL-64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX512VL-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-64-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_v4i64_v4f64:
; AVX512DQ: # %bb.0:
@@ -430,30 +764,58 @@ define <4 x double> @sitofp_v4i64_v4f64(<4 x i64> %x) #0 {
define <4 x double> @uitofp_v4i64_v4f64(<4 x i64> %x) #0 {
; AVX1-32-LABEL: uitofp_v4i64_v4f64:
; AVX1-32: # %bb.0:
-; AVX1-32-NEXT: vpsrlq $32, %ymm0, %ymm1
-; AVX1-32-NEXT: vpor {{\.LCPI.*}}, %ymm1, %ymm1
-; AVX1-32-NEXT: vbroadcastsd {{.*#+}} ymm2 = [1.9342813118337666E+25,1.9342813118337666E+25,1.9342813118337666E+25,1.9342813118337666E+25]
-; AVX1-32-NEXT: vsubpd %ymm2, %ymm1, %ymm1
-; AVX1-32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-32-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
-; AVX1-32-NEXT: vpor {{\.LCPI.*}}, %ymm0, %ymm0
-; AVX1-32-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX1-32-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-32-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX1-32-NEXT: vorps {{\.LCPI.*}}, %ymm1, %ymm1
+; AVX1-32-NEXT: vpsrlq $32, %xmm0, %xmm2
+; AVX1-32-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-32-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX1-32-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-32-NEXT: vorpd {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX1-32-NEXT: vsubpd {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX1-32-NEXT: vaddpd %ymm0, %ymm1, %ymm0
; AVX1-32-NEXT: retl
;
; AVX1-64-LABEL: uitofp_v4i64_v4f64:
; AVX1-64: # %bb.0:
-; AVX1-64-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-64-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
-; AVX1-64-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4841369599423283200,4841369599423283200,4841369599423283200,4841369599423283200]
-; AVX1-64-NEXT: vpor %ymm2, %ymm1, %ymm1
-; AVX1-64-NEXT: vpsrlq $32, %ymm0, %ymm0
-; AVX1-64-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4985484787499139072,4985484787499139072,4985484787499139072,4985484787499139072]
-; AVX1-64-NEXT: vpor %ymm2, %ymm0, %ymm0
-; AVX1-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [1.9342813118337666E+25,1.9342813118337666E+25,1.9342813118337666E+25,1.9342813118337666E+25]
-; AVX1-64-NEXT: vsubpd %ymm2, %ymm0, %ymm0
+; AVX1-64-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-64-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX1-64-NEXT: vorps {{.*}}(%rip), %ymm1, %ymm1
+; AVX1-64-NEXT: vpsrlq $32, %xmm0, %xmm2
+; AVX1-64-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-64-NEXT: vpsrlq $32, %xmm0, %xmm0
+; AVX1-64-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-64-NEXT: vorpd {{.*}}(%rip), %ymm0, %ymm0
+; AVX1-64-NEXT: vsubpd {{.*}}(%rip), %ymm0, %ymm0
; AVX1-64-NEXT: vaddpd %ymm0, %ymm1, %ymm0
; AVX1-64-NEXT: retq
;
+; AVX2-32-LABEL: uitofp_v4i64_v4f64:
+; AVX2-32: # %bb.0:
+; AVX2-32-NEXT: vpsrlq $32, %ymm0, %ymm1
+; AVX2-32-NEXT: vpor {{\.LCPI.*}}, %ymm1, %ymm1
+; AVX2-32-NEXT: vbroadcastsd {{.*#+}} ymm2 = [1.9342813118337666E+25,1.9342813118337666E+25,1.9342813118337666E+25,1.9342813118337666E+25]
+; AVX2-32-NEXT: vsubpd %ymm2, %ymm1, %ymm1
+; AVX2-32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; AVX2-32-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
+; AVX2-32-NEXT: vpor {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX2-32-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX2-32-NEXT: retl
+;
+; AVX2-64-LABEL: uitofp_v4i64_v4f64:
+; AVX2-64: # %bb.0:
+; AVX2-64-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-64-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX2-64-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4841369599423283200,4841369599423283200,4841369599423283200,4841369599423283200]
+; AVX2-64-NEXT: vpor %ymm2, %ymm1, %ymm1
+; AVX2-64-NEXT: vpsrlq $32, %ymm0, %ymm0
+; AVX2-64-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4985484787499139072,4985484787499139072,4985484787499139072,4985484787499139072]
+; AVX2-64-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX2-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [1.9342813118337666E+25,1.9342813118337666E+25,1.9342813118337666E+25,1.9342813118337666E+25]
+; AVX2-64-NEXT: vsubpd %ymm2, %ymm0, %ymm0
+; AVX2-64-NEXT: vaddpd %ymm0, %ymm1, %ymm0
+; AVX2-64-NEXT: retq
+;
; AVX512F-32-LABEL: uitofp_v4i64_v4f64:
; AVX512F-32: # %bb.0:
; AVX512F-32-NEXT: vpsrlq $32, %ymm0, %ymm1
@@ -552,22 +914,73 @@ define <4 x float> @sitofp_v4i64_v4f32(<4 x i64> %x) #0 {
; AVX-32-NEXT: vzeroupper
; AVX-32-NEXT: retl
;
-; AVX-64-LABEL: sitofp_v4i64_v4f32:
-; AVX-64: # %bb.0:
-; AVX-64-NEXT: vpextrq $1, %xmm0, %rax
-; AVX-64-NEXT: vcvtsi2ss %rax, %xmm1, %xmm1
-; AVX-64-NEXT: vmovq %xmm0, %rax
-; AVX-64-NEXT: vcvtsi2ss %rax, %xmm2, %xmm2
-; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
-; AVX-64-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX-64-NEXT: vmovq %xmm0, %rax
-; AVX-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm2
-; AVX-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; AVX-64-NEXT: vpextrq $1, %xmm0, %rax
-; AVX-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm0
-; AVX-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX-64-NEXT: vzeroupper
-; AVX-64-NEXT: retq
+; AVX1-64-LABEL: sitofp_v4i64_v4f32:
+; AVX1-64: # %bb.0:
+; AVX1-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-64-NEXT: vcvtsi2ss %rax, %xmm1, %xmm1
+; AVX1-64-NEXT: vmovq %xmm0, %rax
+; AVX1-64-NEXT: vcvtsi2ss %rax, %xmm2, %xmm2
+; AVX1-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX1-64-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-64-NEXT: vmovq %xmm0, %rax
+; AVX1-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm2
+; AVX1-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX1-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm0
+; AVX1-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX1-64-NEXT: vzeroupper
+; AVX1-64-NEXT: retq
+;
+; AVX2-64-LABEL: sitofp_v4i64_v4f32:
+; AVX2-64: # %bb.0:
+; AVX2-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-64-NEXT: vcvtsi2ss %rax, %xmm1, %xmm1
+; AVX2-64-NEXT: vmovq %xmm0, %rax
+; AVX2-64-NEXT: vcvtsi2ss %rax, %xmm2, %xmm2
+; AVX2-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX2-64-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-64-NEXT: vmovq %xmm0, %rax
+; AVX2-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm2
+; AVX2-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX2-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm0
+; AVX2-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX2-64-NEXT: vzeroupper
+; AVX2-64-NEXT: retq
+;
+; AVX512F-64-LABEL: sitofp_v4i64_v4f32:
+; AVX512F-64: # %bb.0:
+; AVX512F-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512F-64-NEXT: vcvtsi2ss %rax, %xmm1, %xmm1
+; AVX512F-64-NEXT: vmovq %xmm0, %rax
+; AVX512F-64-NEXT: vcvtsi2ss %rax, %xmm2, %xmm2
+; AVX512F-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX512F-64-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512F-64-NEXT: vmovq %xmm0, %rax
+; AVX512F-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm2
+; AVX512F-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512F-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512F-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm0
+; AVX512F-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512F-64-NEXT: vzeroupper
+; AVX512F-64-NEXT: retq
+;
+; AVX512VL-64-LABEL: sitofp_v4i64_v4f32:
+; AVX512VL-64: # %bb.0:
+; AVX512VL-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512VL-64-NEXT: vcvtsi2ss %rax, %xmm1, %xmm1
+; AVX512VL-64-NEXT: vmovq %xmm0, %rax
+; AVX512VL-64-NEXT: vcvtsi2ss %rax, %xmm2, %xmm2
+; AVX512VL-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX512VL-64-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512VL-64-NEXT: vmovq %xmm0, %rax
+; AVX512VL-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm2
+; AVX512VL-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512VL-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX512VL-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm0
+; AVX512VL-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512VL-64-NEXT: vzeroupper
+; AVX512VL-64-NEXT: retq
;
; AVX512DQ-LABEL: sitofp_v4i64_v4f32:
; AVX512DQ: # %bb.0:
@@ -672,7 +1085,7 @@ define <4 x float> @uitofp_v4i64_v4f32(<4 x i64> %x) #0 {
; AVX1-64-NEXT: vaddss %xmm2, %xmm2, %xmm2
; AVX1-64-NEXT: .LBB19_4:
; AVX1-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
-; AVX1-64-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX1-64-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-64-NEXT: vmovq %xmm0, %rax
; AVX1-64-NEXT: movq %rax, %rcx
; AVX1-64-NEXT: shrq %rcx
@@ -704,6 +1117,67 @@ define <4 x float> @uitofp_v4i64_v4f32(<4 x i64> %x) #0 {
; AVX1-64-NEXT: vzeroupper
; AVX1-64-NEXT: retq
;
+; AVX2-64-LABEL: uitofp_v4i64_v4f32:
+; AVX2-64: # %bb.0:
+; AVX2-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-64-NEXT: movq %rax, %rcx
+; AVX2-64-NEXT: shrq %rcx
+; AVX2-64-NEXT: movl %eax, %edx
+; AVX2-64-NEXT: andl $1, %edx
+; AVX2-64-NEXT: orq %rcx, %rdx
+; AVX2-64-NEXT: testq %rax, %rax
+; AVX2-64-NEXT: cmovnsq %rax, %rdx
+; AVX2-64-NEXT: vcvtsi2ss %rdx, %xmm1, %xmm1
+; AVX2-64-NEXT: jns .LBB19_2
+; AVX2-64-NEXT: # %bb.1:
+; AVX2-64-NEXT: vaddss %xmm1, %xmm1, %xmm1
+; AVX2-64-NEXT: .LBB19_2:
+; AVX2-64-NEXT: vmovq %xmm0, %rax
+; AVX2-64-NEXT: movq %rax, %rcx
+; AVX2-64-NEXT: shrq %rcx
+; AVX2-64-NEXT: movl %eax, %edx
+; AVX2-64-NEXT: andl $1, %edx
+; AVX2-64-NEXT: orq %rcx, %rdx
+; AVX2-64-NEXT: testq %rax, %rax
+; AVX2-64-NEXT: cmovnsq %rax, %rdx
+; AVX2-64-NEXT: vcvtsi2ss %rdx, %xmm2, %xmm2
+; AVX2-64-NEXT: jns .LBB19_4
+; AVX2-64-NEXT: # %bb.3:
+; AVX2-64-NEXT: vaddss %xmm2, %xmm2, %xmm2
+; AVX2-64-NEXT: .LBB19_4:
+; AVX2-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX2-64-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX2-64-NEXT: vmovq %xmm0, %rax
+; AVX2-64-NEXT: movq %rax, %rcx
+; AVX2-64-NEXT: shrq %rcx
+; AVX2-64-NEXT: movl %eax, %edx
+; AVX2-64-NEXT: andl $1, %edx
+; AVX2-64-NEXT: orq %rcx, %rdx
+; AVX2-64-NEXT: testq %rax, %rax
+; AVX2-64-NEXT: cmovnsq %rax, %rdx
+; AVX2-64-NEXT: vcvtsi2ss %rdx, %xmm3, %xmm2
+; AVX2-64-NEXT: jns .LBB19_6
+; AVX2-64-NEXT: # %bb.5:
+; AVX2-64-NEXT: vaddss %xmm2, %xmm2, %xmm2
+; AVX2-64-NEXT: .LBB19_6:
+; AVX2-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX2-64-NEXT: vpextrq $1, %xmm0, %rax
+; AVX2-64-NEXT: movq %rax, %rcx
+; AVX2-64-NEXT: shrq %rcx
+; AVX2-64-NEXT: movl %eax, %edx
+; AVX2-64-NEXT: andl $1, %edx
+; AVX2-64-NEXT: orq %rcx, %rdx
+; AVX2-64-NEXT: testq %rax, %rax
+; AVX2-64-NEXT: cmovnsq %rax, %rdx
+; AVX2-64-NEXT: vcvtsi2ss %rdx, %xmm3, %xmm0
+; AVX2-64-NEXT: jns .LBB19_8
+; AVX2-64-NEXT: # %bb.7:
+; AVX2-64-NEXT: vaddss %xmm0, %xmm0, %xmm0
+; AVX2-64-NEXT: .LBB19_8:
+; AVX2-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX2-64-NEXT: vzeroupper
+; AVX2-64-NEXT: retq
+;
; AVX512F-64-LABEL: uitofp_v4i64_v4f32:
; AVX512F-64: # %bb.0:
; AVX512F-64-NEXT: vpextrq $1, %xmm0, %rax
More information about the llvm-commits
mailing list