[llvm] bd46e29 - [X86] Re-enable lowerUINT_TO_FP_vXi32 under fast-math by using an FSUB instead of an FADD.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 2 22:00:44 PST 2020
Author: Craig Topper
Date: 2020-01-02T21:46:53-08:00
New Revision: bd46e29742759dd0d57feb0f33affa9adc7d2fbf
URL: https://github.com/llvm/llvm-project/commit/bd46e29742759dd0d57feb0f33affa9adc7d2fbf
DIFF: https://github.com/llvm/llvm-project/commit/bd46e29742759dd0d57feb0f33affa9adc7d2fbf.diff
LOG: [X86] Re-enable lowerUINT_TO_FP_vXi32 under fast-math by using an FSUB instead of an FADD.
Summary:
We previously disabled this under fast math due to aggressive
reassociation by the machine combiner. But I think we can work
around this by using a FSUB instead of FADD for the first
operation.
This matches the similar algorithm we do for uint_to_fp i64->f64
in TargetLowering::expandUINT_TO_FP. If reassociation hasn't
been a problem for that, hopefully its not a problem here.
Reviewers: RKSimon, spatel, scanon
Reviewed By: spatel
Subscribers: hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D71968
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/ftrunc.ll
llvm/test/CodeGen/X86/known-bits-vector.ll
llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
llvm/test/CodeGen/X86/vec_int_to_fp.ll
llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
llvm/test/CodeGen/X86/vec_uint_to_fp.ll
llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 6c3f2f934bd1..d58c7e95caaa 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -19083,15 +19083,6 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
// float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
// return (float4) lo + fhi;
- // We shouldn't use it when unsafe-fp-math is enabled though: we might later
- // reassociate the two FADDs, and if we do that, the algorithm fails
- // spectacularly (PR24512).
- // FIXME: If we ever have some kind of Machine FMF, this should be marked
- // as non-fast and always be enabled. Why isn't SDAG FMF enough? Because
- // there's also the MachineCombiner reassociations happening on Machine IR.
- if (DAG.getTarget().Options.UnsafeFPMath)
- return SDValue();
-
bool Is128 = VecIntVT == MVT::v4i32;
MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
// If we convert to something else than the supported type, e.g., to v4f64,
@@ -19143,25 +19134,28 @@ static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
}
- // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
- SDValue VecCstFAdd = DAG.getConstantFP(
- APFloat(APFloat::IEEEsingle(), APInt(32, 0xD3000080)), DL, VecFloatVT);
+ // Create the vector constant for (0x1.0p39f + 0x1.0p23f).
+ SDValue VecCstFSub = DAG.getConstantFP(
+ APFloat(APFloat::IEEEsingle(), APInt(32, 0x53000080)), DL, VecFloatVT);
// float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
+ // NOTE: By using fsub of a positive constant instead of fadd of a negative
+ // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is
+ // enabled. See PR24512.
SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
// TODO: Are there any fast-math-flags to propagate here?
// (float4) lo;
SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
// return (float4) lo + fhi;
if (IsStrict) {
- SDValue FHigh = DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
- {Op.getOperand(0), HighBitcast, VecCstFAdd});
+ SDValue FHigh = DAG.getNode(ISD::STRICT_FSUB, DL, {VecFloatVT, MVT::Other},
+ {Op.getOperand(0), HighBitcast, VecCstFSub});
return DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
{FHigh.getValue(1), LowBitcast, FHigh});
}
SDValue FHigh =
- DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
+ DAG.getNode(ISD::FSUB, DL, VecFloatVT, HighBitcast, VecCstFSub);
return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
}
diff --git a/llvm/test/CodeGen/X86/ftrunc.ll b/llvm/test/CodeGen/X86/ftrunc.ll
index 92118100bba8..0a1c1e2a851e 100644
--- a/llvm/test/CodeGen/X86/ftrunc.ll
+++ b/llvm/test/CodeGen/X86/ftrunc.ll
@@ -78,7 +78,7 @@ define <4 x float> @trunc_unsigned_v4f32(<4 x float> %x) #0 {
; SSE2-NEXT: orps {{.*}}(%rip), %xmm0
; SSE2-NEXT: psrld $16, %xmm1
; SSE2-NEXT: por {{.*}}(%rip), %xmm1
-; SSE2-NEXT: addps {{.*}}(%rip), %xmm1
+; SSE2-NEXT: subps {{.*}}(%rip), %xmm1
; SSE2-NEXT: addps %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll
index 067ac9a6f7ef..a918ab34042f 100644
--- a/llvm/test/CodeGen/X86/known-bits-vector.ll
+++ b/llvm/test/CodeGen/X86/known-bits-vector.ll
@@ -438,7 +438,7 @@ define <4 x float> @knownbits_smax_smin_shuffle_uitofp(<4 x i32> %a0) {
; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; X32-NEXT: vpsrld $16, %xmm0, %xmm0
; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X32-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vsubps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0
; X32-NEXT: retl
;
@@ -450,7 +450,7 @@ define <4 x float> @knownbits_smax_smin_shuffle_uitofp(<4 x i32> %a0) {
; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; X64-NEXT: vpsrld $16, %xmm0, %xmm0
; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X64-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vaddps %xmm0, %xmm1, %xmm0
; X64-NEXT: retq
%1 = call <4 x i32> @llvm.x86.sse41.pminsd(<4 x i32> %a0, <4 x i32> <i32 0, i32 -65535, i32 -65535, i32 0>)
@@ -548,7 +548,7 @@ define <4 x float> @knownbits_abs_uitofp(<4 x i32> %a0) {
; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; X32-NEXT: vpsrld $16, %xmm0, %xmm0
; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X32-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vsubps {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0
; X32-NEXT: retl
;
@@ -558,7 +558,7 @@ define <4 x float> @knownbits_abs_uitofp(<4 x i32> %a0) {
; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; X64-NEXT: vpsrld $16, %xmm0, %xmm0
; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X64-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vaddps %xmm0, %xmm1, %xmm0
; X64-NEXT: retq
%1 = sub <4 x i32> zeroinitializer, %a0
diff --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
index 81f146fd6349..1deefd17903a 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
@@ -577,7 +577,7 @@ define <4 x float> @uitofp_v4i32_v4f32(<4 x i32> %x) #0 {
; SSE-32-NEXT: por {{\.LCPI.*}}, %xmm1
; SSE-32-NEXT: psrld $16, %xmm0
; SSE-32-NEXT: por {{\.LCPI.*}}, %xmm0
-; SSE-32-NEXT: addps {{\.LCPI.*}}, %xmm0
+; SSE-32-NEXT: subps {{\.LCPI.*}}, %xmm0
; SSE-32-NEXT: addps %xmm1, %xmm0
; SSE-32-NEXT: retl
;
@@ -588,7 +588,7 @@ define <4 x float> @uitofp_v4i32_v4f32(<4 x i32> %x) #0 {
; SSE-64-NEXT: por {{.*}}(%rip), %xmm1
; SSE-64-NEXT: psrld $16, %xmm0
; SSE-64-NEXT: por {{.*}}(%rip), %xmm0
-; SSE-64-NEXT: addps {{.*}}(%rip), %xmm0
+; SSE-64-NEXT: subps {{.*}}(%rip), %xmm0
; SSE-64-NEXT: addps %xmm1, %xmm0
; SSE-64-NEXT: retq
;
@@ -597,7 +597,7 @@ define <4 x float> @uitofp_v4i32_v4f32(<4 x i32> %x) #0 {
; AVX1-32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; AVX1-32-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; AVX1-32-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX1-32-NEXT: vsubps {{\.LCPI.*}}, %xmm0, %xmm0
; AVX1-32-NEXT: vaddps %xmm0, %xmm1, %xmm0
; AVX1-32-NEXT: retl
;
@@ -606,7 +606,7 @@ define <4 x float> @uitofp_v4i32_v4f32(<4 x i32> %x) #0 {
; AVX1-64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; AVX1-64-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; AVX1-64-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-64-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; AVX1-64-NEXT: vaddps %xmm0, %xmm1, %xmm0
; AVX1-64-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
index 80a08f8aa53b..ffeaf3b85ac7 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
@@ -412,8 +412,8 @@ define <8 x float> @uitofp_v8i32_v8f32(<8 x i32> %x) #0 {
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928]
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
-; AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11]
-; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; AVX2-NEXT: vsubps %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0
; AVX2-NEXT: ret{{[l|q]}}
;
diff --git a/llvm/test/CodeGen/X86/vec_int_to_fp.ll b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
index d78914eb6940..ebda9494e8b6 100644
--- a/llvm/test/CodeGen/X86/vec_int_to_fp.ll
+++ b/llvm/test/CodeGen/X86/vec_int_to_fp.ll
@@ -2319,7 +2319,7 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
; SSE2-NEXT: por {{.*}}(%rip), %xmm1
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: por {{.*}}(%rip), %xmm0
-; SSE2-NEXT: addps {{.*}}(%rip), %xmm0
+; SSE2-NEXT: subps {{.*}}(%rip), %xmm0
; SSE2-NEXT: addps %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -2329,7 +2329,7 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; SSE41-NEXT: addps {{.*}}(%rip), %xmm0
+; SSE41-NEXT: subps {{.*}}(%rip), %xmm0
; SSE41-NEXT: addps %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -2338,7 +2338,7 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; AVX1-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vaddps %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
@@ -2349,8 +2349,8 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1392508928,1392508928,1392508928,1392508928]
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
-; AVX2-NEXT: vbroadcastss {{.*#+}} xmm2 = [-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11]
-; AVX2-NEXT: vaddps %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm2 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; AVX2-NEXT: vsubps %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vaddps %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
@@ -2857,14 +2857,14 @@ define <8 x float> @uitofp_8i32_to_8f32(<8 x i32> %a) {
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [1392508928,1392508928,1392508928,1392508928]
; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: movaps {{.*#+}} xmm6 = [-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11]
-; SSE2-NEXT: addps %xmm6, %xmm0
+; SSE2-NEXT: movaps {{.*#+}} xmm6 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; SSE2-NEXT: subps %xmm6, %xmm0
; SSE2-NEXT: addps %xmm3, %xmm0
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: por %xmm4, %xmm2
; SSE2-NEXT: psrld $16, %xmm1
; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: addps %xmm6, %xmm1
+; SSE2-NEXT: subps %xmm6, %xmm1
; SSE2-NEXT: addps %xmm2, %xmm1
; SSE2-NEXT: retq
;
@@ -2876,13 +2876,13 @@ define <8 x float> @uitofp_8i32_to_8f32(<8 x i32> %a) {
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1392508928,1392508928,1392508928,1392508928]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
-; SSE41-NEXT: movaps {{.*#+}} xmm5 = [-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11]
-; SSE41-NEXT: addps %xmm5, %xmm0
+; SSE41-NEXT: movaps {{.*#+}} xmm5 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; SSE41-NEXT: subps %xmm5, %xmm0
; SSE41-NEXT: addps %xmm3, %xmm0
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
; SSE41-NEXT: psrld $16, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
-; SSE41-NEXT: addps %xmm5, %xmm1
+; SSE41-NEXT: subps %xmm5, %xmm1
; SSE41-NEXT: addps %xmm2, %xmm1
; SSE41-NEXT: retq
;
@@ -2906,8 +2906,8 @@ define <8 x float> @uitofp_8i32_to_8f32(<8 x i32> %a) {
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928]
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
-; AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11]
-; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; AVX2-NEXT: vsubps %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
@@ -4842,7 +4842,7 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; SSE2-NEXT: por {{.*}}(%rip), %xmm1
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: por {{.*}}(%rip), %xmm0
-; SSE2-NEXT: addps {{.*}}(%rip), %xmm0
+; SSE2-NEXT: subps {{.*}}(%rip), %xmm0
; SSE2-NEXT: addps %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -4853,7 +4853,7 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; SSE41-NEXT: addps {{.*}}(%rip), %xmm0
+; SSE41-NEXT: subps {{.*}}(%rip), %xmm0
; SSE41-NEXT: addps %xmm1, %xmm0
; SSE41-NEXT: retq
;
@@ -4863,7 +4863,7 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; AVX1-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vaddps %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
@@ -4875,8 +4875,8 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1392508928,1392508928,1392508928,1392508928]
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
-; AVX2-NEXT: vbroadcastss {{.*#+}} xmm2 = [-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11]
-; AVX2-NEXT: vaddps %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm2 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; AVX2-NEXT: vsubps %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vaddps %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
@@ -5452,14 +5452,14 @@ define <8 x float> @uitofp_load_8i32_to_8f32(<8 x i32> *%a) {
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [1392508928,1392508928,1392508928,1392508928]
; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: movaps {{.*#+}} xmm6 = [-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11]
-; SSE2-NEXT: addps %xmm6, %xmm0
+; SSE2-NEXT: movaps {{.*#+}} xmm6 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; SSE2-NEXT: subps %xmm6, %xmm0
; SSE2-NEXT: addps %xmm3, %xmm0
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: por %xmm4, %xmm2
; SSE2-NEXT: psrld $16, %xmm1
; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: addps %xmm6, %xmm1
+; SSE2-NEXT: subps %xmm6, %xmm1
; SSE2-NEXT: addps %xmm2, %xmm1
; SSE2-NEXT: retq
;
@@ -5473,13 +5473,13 @@ define <8 x float> @uitofp_load_8i32_to_8f32(<8 x i32> *%a) {
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1392508928,1392508928,1392508928,1392508928]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
-; SSE41-NEXT: movaps {{.*#+}} xmm5 = [-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11]
-; SSE41-NEXT: addps %xmm5, %xmm0
+; SSE41-NEXT: movaps {{.*#+}} xmm5 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; SSE41-NEXT: subps %xmm5, %xmm0
; SSE41-NEXT: addps %xmm3, %xmm0
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
; SSE41-NEXT: psrld $16, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
-; SSE41-NEXT: addps %xmm5, %xmm1
+; SSE41-NEXT: subps %xmm5, %xmm1
; SSE41-NEXT: addps %xmm2, %xmm1
; SSE41-NEXT: retq
;
@@ -5506,8 +5506,8 @@ define <8 x float> @uitofp_load_8i32_to_8f32(<8 x i32> *%a) {
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928]
; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
-; AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11]
-; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; AVX2-NEXT: vsubps %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
index 5f4489c5ed25..cccd1d739f83 100644
--- a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
+++ b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
@@ -11,72 +11,90 @@
; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512vl \
; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL
+; Check that the constant used in the vectors are the right ones.
; SSE2: [[MASKCSTADDR:.LCPI[0-9_]+]]:
; SSE2-NEXT: .long 65535 # 0xffff
; SSE2-NEXT: .long 65535 # 0xffff
; SSE2-NEXT: .long 65535 # 0xffff
; SSE2-NEXT: .long 65535 # 0xffff
-; CST: [[FPMASKCSTADDR:.LCPI[0-9_]+]]:
-; CST-NEXT: .long 1199570944 # float 65536
-; CST-NEXT: .long 1199570944 # float 65536
-; CST-NEXT: .long 1199570944 # float 65536
-; CST-NEXT: .long 1199570944 # float 65536
+; CST: [[LOWCSTADDR:.LCPI[0-9_]+]]:
+; CST-NEXT: .long 1258291200 # 0x4b000000
+; CST-NEXT: .long 1258291200 # 0x4b000000
+; CST-NEXT: .long 1258291200 # 0x4b000000
+; CST-NEXT: .long 1258291200 # 0x4b000000
-; AVX2: [[FPMASKCSTADDR:.LCPI[0-9_]+]]:
-; AVX2-NEXT: .long 1199570944 # float 65536
+; CST: [[HIGHCSTADDR:.LCPI[0-9_]+]]:
+; CST-NEXT: .long 1392508928 # 0x53000000
+; CST-NEXT: .long 1392508928 # 0x53000000
+; CST-NEXT: .long 1392508928 # 0x53000000
+; CST-NEXT: .long 1392508928 # 0x53000000
+
+; CST: [[MAGICCSTADDR:.LCPI[0-9_]+]]:
+; CST-NEXT: .long 1392509056 # float 5.49764202E+11
+; CST-NEXT: .long 1392509056 # float 5.49764202E+11
+; CST-NEXT: .long 1392509056 # float 5.49764202E+11
+; CST-NEXT: .long 1392509056 # float 5.49764202E+11
+
+; AVX2: [[LOWCSTADDR:.LCPI[0-9_]+]]:
+; AVX2-NEXT: .long 1258291200 # 0x4b000000
+
+; AVX2: [[HIGHCSTADDR:.LCPI[0-9_]+]]:
+; AVX2-NEXT: .long 1392508928 # 0x53000000
+
+; AVX2: [[MAGICCSTADDR:.LCPI[0-9_]+]]:
+; AVX2-NEXT: .long 1392509056 # float 5.49764202E+11
define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
; SSE2-LABEL: test_uitofp_v4i32_to_v4f32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movaps {{.*#+}} xmm1 = [65535,65535,65535,65535]
-; SSE2-NEXT: andps %xmm0, %xmm1
-; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1
-; SSE2-NEXT: psrld $16, %xmm0
-; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
-; SSE2-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0
-; SSE2-NEXT: addps %xmm1, %xmm0
-; SSE2-NEXT: retq
+; SSE2: movdqa [[MASKCSTADDR]](%rip), [[MASK:%xmm[0-9]+]]
+; SSE2-NEXT: pand %xmm0, [[MASK]]
+; After this instruction, MASK will have the value of the low parts
+; of the vector.
+; SSE2-NEXT: por [[LOWCSTADDR]](%rip), [[MASK]]
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: por [[HIGHCSTADDR]](%rip), %xmm0
+; SSE2-NEXT: subps [[MAGICCSTADDR]](%rip), %xmm0
+; SSE2-NEXT: addps [[MASK]], %xmm0
+; SSE2-NEXT: retq
;
+; Currently we commute the arguments of the first blend, but this could be
+; improved to match the lowering of the second blend.
; SSE41-LABEL: test_uitofp_v4i32_to_v4f32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pxor %xmm1, %xmm1
-; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
-; SSE41-NEXT: cvtdq2ps %xmm1, %xmm1
-; SSE41-NEXT: psrld $16, %xmm0
-; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
-; SSE41-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0
-; SSE41-NEXT: addps %xmm1, %xmm0
-; SSE41-NEXT: retq
+; SSE41: movdqa [[LOWCSTADDR]](%rip), [[LOWVEC:%xmm[0-9]+]]
+; SSE41-NEXT: pblendw $85, %xmm0, [[LOWVEC]]
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: pblendw $170, [[HIGHCSTADDR]](%rip), %xmm0
+; SSE41-NEXT: subps [[MAGICCSTADDR]](%rip), %xmm0
+; SSE41-NEXT: addps [[LOWVEC]], %xmm0
+; SSE41-NEXT: retq
;
; AVX-LABEL: test_uitofp_v4i32_to_v4f32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
-; AVX-NEXT: vcvtdq2ps %xmm1, %xmm1
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
-; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
-; AVX-NEXT: vmulps [[FPMASKCSTADDR]](%rip), %xmm0, %xmm0
-; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX: vpblendw $170, [[LOWCSTADDR]](%rip), %xmm0, [[LOWVEC:%xmm[0-9]+]]
+; AVX-NEXT: vpsrld $16, %xmm0, [[SHIFTVEC:%xmm[0-9]+]]
+; AVX-NEXT: vpblendw $170, [[HIGHCSTADDR]](%rip), [[SHIFTVEC]], [[HIGHVEC:%xmm[0-9]+]]
+; AVX-NEXT: vsubps [[MAGICCSTADDR]](%rip), [[HIGHVEC]], [[TMP:%xmm[0-9]+]]
+; AVX-NEXT: vaddps [[TMP]], [[LOWVEC]], %xmm0
+; AVX-NEXT: retq
;
+; The lowering for AVX2 is a bit messy, because we select broadcast
+; instructions, instead of folding the constant loads.
; AVX2-LABEL: test_uitofp_v4i32_to_v4f32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX2-NEXT: vcvtdq2ps %xmm1, %xmm1
-; AVX2-NEXT: vbroadcastss [[FPMASKCSTADDR]](%rip), %xmm2
-; AVX2-NEXT: vmulps %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
-; AVX2-NEXT: vcvtdq2ps %xmm0, %xmm0
-; AVX2-NEXT: vaddps %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: retq
+; AVX2: vpbroadcastd [[LOWCSTADDR]](%rip), [[LOWCST:%xmm[0-9]+]]
+; AVX2-NEXT: vpblendw $170, [[LOWCST]], %xmm0, [[LOWVEC:%xmm[0-9]+]]
+; AVX2-NEXT: vpsrld $16, %xmm0, [[SHIFTVEC:%xmm[0-9]+]]
+; AVX2-NEXT: vpbroadcastd [[HIGHCSTADDR]](%rip), [[HIGHCST:%xmm[0-9]+]]
+; AVX2-NEXT: vpblendw $170, [[HIGHCST]], [[SHIFTVEC]], [[HIGHVEC:%xmm[0-9]+]]
+; AVX2-NEXT: vbroadcastss [[MAGICCSTADDR]](%rip), [[MAGICCST:%xmm[0-9]+]]
+; AVX2-NEXT: vsubps [[MAGICCST]], [[HIGHVEC]], [[TMP:%xmm[0-9]+]]
+; AVX2-NEXT: vaddps [[TMP]], [[LOWVEC]], %xmm0
+; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_uitofp_v4i32_to_v4f32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512F-NEXT: # kill
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -88,86 +106,76 @@ define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
ret <4 x float> %tmp
}
-; AVX: [[FPMASKCSTADDR_v8:.LCPI[0-9_]+]]:
-; AVX-NEXT: .long 1199570944 # float 65536
-; AVX-NEXT: .long 1199570944 # float 65536
-; AVX-NEXT: .long 1199570944 # float 65536
-; AVX-NEXT: .long 1199570944 # float 65536
+; Match the AVX2 constants used in the next function
+; AVX2: [[LOWCSTADDR:.LCPI[0-9_]+]]:
+; AVX2-NEXT: .long 1258291200 # 0x4b000000
-; AVX: [[MASKCSTADDR_v8:.LCPI[0-9_]+]]:
-; AVX-NEXT: .long 65535 # 0xffff
-; AVX-NEXT: .long 65535 # 0xffff
-; AVX-NEXT: .long 65535 # 0xffff
-; AVX-NEXT: .long 65535 # 0xffff
+; AVX2: [[HIGHCSTADDR:.LCPI[0-9_]+]]:
+; AVX2-NEXT: .long 1392508928 # 0x53000000
-; AVX2: [[FPMASKCSTADDR_v8:.LCPI[0-9_]+]]:
-; AVX2-NEXT: .long 1199570944 # float 65536
+; AVX2: [[MAGICCSTADDR:.LCPI[0-9_]+]]:
+; AVX2-NEXT: .long 1392509056 # float 5.49764202E+11
define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) {
+; Legalization will break the thing is 2 x <4 x i32> on anthing prior AVX.
+; The constant used for in the vector instruction are shared between the
+; two sequences of instructions.
+;
; SSE2-LABEL: test_uitofp_v8i32_to_v8f32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: psrld $16, %xmm2
-; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2
-; SSE2-NEXT: movaps {{.*#+}} xmm3 = [6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4]
-; SSE2-NEXT: mulps %xmm3, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535]
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
-; SSE2-NEXT: addps %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrld $16, %xmm2
-; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2
-; SSE2-NEXT: mulps %xmm3, %xmm2
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1
-; SSE2-NEXT: addps %xmm2, %xmm1
-; SSE2-NEXT: retq
+; SSE2: movdqa {{.*#+}} [[MASK:xmm[0-9]+]] = [65535,65535,65535,65535]
+; SSE2-NEXT: movdqa %xmm0, [[VECLOW:%xmm[0-9]+]]
+; SSE2-NEXT: pand %[[MASK]], [[VECLOW]]
+; SSE2-NEXT: movdqa {{.*#+}} [[LOWCST:xmm[0-9]+]] = [1258291200,1258291200,1258291200,1258291200]
+; SSE2-NEXT: por %[[LOWCST]], [[VECLOW]]
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} [[HIGHCST:xmm[0-9]+]] = [1392508928,1392508928,1392508928,1392508928]
+; SSE2-NEXT: por %[[HIGHCST]], %xmm0
+; SSE2-NEXT: movaps {{.*#+}} [[MAGICCST:xmm[0-9]+]] = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; SSE2-NEXT: subps %[[MAGICCST]], %xmm0
+; SSE2-NEXT: addps [[VECLOW]], %xmm0
+; MASK is the low vector of the second part after this point.
+; SSE2-NEXT: pand %xmm1, %[[MASK]]
+; SSE2-NEXT: por %[[LOWCST]], %[[MASK]]
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: por %[[HIGHCST]], %xmm1
+; SSE2-NEXT: subps %[[MAGICCST]], %xmm1
+; SSE2-NEXT: addps %[[MASK]], %xmm1
+; SSE2-NEXT: retq
;
; SSE41-LABEL: test_uitofp_v8i32_to_v8f32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psrld $16, %xmm2
-; SSE41-NEXT: cvtdq2ps %xmm2, %xmm2
-; SSE41-NEXT: movaps {{.*#+}} xmm3 = [6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4]
-; SSE41-NEXT: mulps %xmm3, %xmm2
-; SSE41-NEXT: pxor %xmm4, %xmm4
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
-; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
-; SSE41-NEXT: addps %xmm2, %xmm0
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrld $16, %xmm2
-; SSE41-NEXT: cvtdq2ps %xmm2, %xmm2
-; SSE41-NEXT: mulps %xmm3, %xmm2
-; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
-; SSE41-NEXT: cvtdq2ps %xmm1, %xmm1
-; SSE41-NEXT: addps %xmm2, %xmm1
-; SSE41-NEXT: retq
+; SSE41: movdqa {{.*#+}} [[LOWCST:xmm[0-9]+]] = [1258291200,1258291200,1258291200,1258291200]
+; SSE41-NEXT: movdqa %xmm0, [[VECLOW:%xmm[0-9]+]]
+; SSE41-NEXT: pblendw $170, %[[LOWCST]], [[VECLOW]]
+; SSE41-NEXT: psrld $16, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} [[HIGHCST:xmm[0-9]+]] = [1392508928,1392508928,1392508928,1392508928]
+; SSE41-NEXT: pblendw $170, %[[HIGHCST]], %xmm0
+; SSE41-NEXT: movaps {{.*#+}} [[MAGICCST:xmm[0-9]+]] = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; SSE41-NEXT: subps %[[MAGICCST]], %xmm0
+; SSE41-NEXT: addps [[VECLOW]], %xmm0
+; LOWCST is the low vector of the second part after this point.
+; The operands of the blend are inverted because we reuse xmm1
+; in the next shift.
+; SSE41-NEXT: pblendw $85, %xmm1, %[[LOWCST]]
+; SSE41-NEXT: psrld $16, %xmm1
+; SSE41-NEXT: pblendw $170, %[[HIGHCST]], %xmm1
+; SSE41-NEXT: subps %[[MAGICCST]], %xmm1
+; SSE41-NEXT: addps %[[LOWCST]], %xmm1
+; SSE41-NEXT: retq
;
-; AVX-LABEL: test_uitofp_v8i32_to_v8f32:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX-NEXT: vpsrld $16, %xmm2, %xmm2
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX-NEXT: vcvtdq2ps %ymm1, %ymm1
-; AVX-NEXT: vmulps [[FPMASKCSTADDR_v8]](%rip), %ymm1, %ymm1
-; AVX-NEXT: vandps [[MASKCSTADDR_v8]](%rip), %ymm0, %ymm0
-; AVX-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX-NEXT: vaddps %ymm0, %ymm1, %ymm0
-; AVX-NEXT: retq
+; Test that we are not lowering uinttofp to scalars
+; AVX-NOT: cvtsd2ss
+; AVX: retq
;
; AVX2-LABEL: test_uitofp_v8i32_to_v8f32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpsrld $16, %ymm0, %ymm1
-; AVX2-NEXT: vcvtdq2ps %ymm1, %ymm1
-; AVX2-NEXT: vbroadcastss [[FPMASKCSTADDR_v8]](%rip), %ymm2
-; AVX2-NEXT: vmulps %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
-; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: retq
+; AVX2: vpbroadcastd [[LOWCSTADDR]](%rip), [[LOWCST:%ymm[0-9]+]]
+; AVX2-NEXT: vpblendw $170, [[LOWCST]], %ymm0, [[LOWVEC:%ymm[0-9]+]]
+; AVX2-NEXT: vpsrld $16, %ymm0, [[SHIFTVEC:%ymm[0-9]+]]
+; AVX2-NEXT: vpbroadcastd [[HIGHCSTADDR]](%rip), [[HIGHCST:%ymm[0-9]+]]
+; AVX2-NEXT: vpblendw $170, [[HIGHCST]], [[SHIFTVEC]], [[HIGHVEC:%ymm[0-9]+]]
+; AVX2-NEXT: vbroadcastss [[MAGICCSTADDR]](%rip), [[MAGICCST:%ymm[0-9]+]]
+; AVX2-NEXT: vsubps [[MAGICCST]], [[HIGHVEC]], [[TMP:%ymm[0-9]+]]
+; AVX2-NEXT: vaddps [[TMP]], [[LOWVEC]], %ymm0
+; AVX2-NEXT: retq
;
; AVX512F-LABEL: test_uitofp_v8i32_to_v8f32:
; AVX512F: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/vec_uint_to_fp.ll b/llvm/test/CodeGen/X86/vec_uint_to_fp.ll
index c7871d86c6ea..a123a63a3bb8 100644
--- a/llvm/test/CodeGen/X86/vec_uint_to_fp.ll
+++ b/llvm/test/CodeGen/X86/vec_uint_to_fp.ll
@@ -23,10 +23,10 @@
; CST-NEXT: .long 1392508928 ## 0x53000000
; CST: [[MAGICCSTADDR:LCPI0_[0-9]+]]:
-; CST-NEXT: .long 3539992704 ## float -5.49764202E+11
-; CST-NEXT: .long 3539992704 ## float -5.49764202E+11
-; CST-NEXT: .long 3539992704 ## float -5.49764202E+11
-; CST-NEXT: .long 3539992704 ## float -5.49764202E+11
+; CST-NEXT: .long 1392509056 ## float 5.49764202E+11
+; CST-NEXT: .long 1392509056 ## float 5.49764202E+11
+; CST-NEXT: .long 1392509056 ## float 5.49764202E+11
+; CST-NEXT: .long 1392509056 ## float 5.49764202E+11
; AVX2: [[LOWCSTADDR:LCPI0_[0-9]+]]:
; AVX2-NEXT: .long 1258291200 ## 0x4b000000
@@ -35,7 +35,7 @@
; AVX2-NEXT: .long 1392508928 ## 0x53000000
; AVX2: [[MAGICCSTADDR:LCPI0_[0-9]+]]:
-; AVX2-NEXT: .long 3539992704 ## float -5.49764202E+11
+; AVX2-NEXT: .long 1392509056 ## float 5.49764202E+11
define <4 x float> @test1(<4 x i32> %A) nounwind {
; CHECK-LABEL: test1:
@@ -47,7 +47,7 @@ define <4 x float> @test1(<4 x i32> %A) nounwind {
; SSE-NEXT: por [[LOWCSTADDR]](%rip), [[MASK]]
; SSE-NEXT: psrld $16, %xmm0
; SSE-NEXT: por [[HIGHCSTADDR]](%rip), %xmm0
-; SSE-NEXT: addps [[MAGICCSTADDR]](%rip), %xmm0
+; SSE-NEXT: subps [[MAGICCSTADDR]](%rip), %xmm0
; SSE-NEXT: addps [[MASK]], %xmm0
; SSE-NEXT: retq
;
@@ -57,14 +57,14 @@ define <4 x float> @test1(<4 x i32> %A) nounwind {
; SSE41-NEXT: pblendw $85, %xmm0, [[LOWVEC]]
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: pblendw $170, [[HIGHCSTADDR]](%rip), %xmm0
-; SSE41-NEXT: addps [[MAGICCSTADDR]](%rip), %xmm0
+; SSE41-NEXT: subps [[MAGICCSTADDR]](%rip), %xmm0
; SSE41-NEXT: addps [[LOWVEC]], %xmm0
; SSE41-NEXT: retq
;
; AVX: vpblendw $170, [[LOWCSTADDR]](%rip), %xmm0, [[LOWVEC:%xmm[0-9]+]]
; AVX-NEXT: vpsrld $16, %xmm0, [[SHIFTVEC:%xmm[0-9]+]]
; AVX-NEXT: vpblendw $170, [[HIGHCSTADDR]](%rip), [[SHIFTVEC]], [[HIGHVEC:%xmm[0-9]+]]
-; AVX-NEXT: vaddps [[MAGICCSTADDR]](%rip), [[HIGHVEC]], [[TMP:%xmm[0-9]+]]
+; AVX-NEXT: vsubps [[MAGICCSTADDR]](%rip), [[HIGHVEC]], [[TMP:%xmm[0-9]+]]
; AVX-NEXT: vaddps [[TMP]], [[LOWVEC]], %xmm0
; AVX-NEXT: retq
;
@@ -76,7 +76,7 @@ define <4 x float> @test1(<4 x i32> %A) nounwind {
; AVX2-NEXT: vpbroadcastd [[HIGHCSTADDR]](%rip), [[HIGHCST:%xmm[0-9]+]]
; AVX2-NEXT: vpblendw $170, [[HIGHCST]], [[SHIFTVEC]], [[HIGHVEC:%xmm[0-9]+]]
; AVX2-NEXT: vbroadcastss [[MAGICCSTADDR]](%rip), [[MAGICCST:%xmm[0-9]+]]
-; AVX2-NEXT: vaddps [[MAGICCST]], [[HIGHVEC]], [[TMP:%xmm[0-9]+]]
+; AVX2-NEXT: vsubps [[MAGICCST]], [[HIGHVEC]], [[TMP:%xmm[0-9]+]]
; AVX2-NEXT: vaddps [[TMP]], [[LOWVEC]], %xmm0
; AVX2-NEXT: retq
%C = uitofp <4 x i32> %A to <4 x float>
@@ -91,7 +91,7 @@ define <4 x float> @test1(<4 x i32> %A) nounwind {
; AVX2-NEXT: .long 1392508928 ## 0x53000000
; AVX2: [[MAGICCSTADDR:LCPI1_[0-9]+]]:
-; AVX2-NEXT: .long 3539992704 ## float -5.49764202E+11
+; AVX2-NEXT: .long 1392509056 ## float 5.49764202E+11
define <8 x float> @test2(<8 x i32> %A) nounwind {
; CHECK-LABEL: test2:
@@ -107,15 +107,15 @@ define <8 x float> @test2(<8 x i32> %A) nounwind {
; SSE-NEXT: psrld $16, %xmm0
; SSE-NEXT: movdqa {{.*#+}} [[HIGHCST:xmm[0-9]+]] = [1392508928,1392508928,1392508928,1392508928]
; SSE-NEXT: por %[[HIGHCST]], %xmm0
-; SSE-NEXT: movaps {{.*#+}} [[MAGICCST:xmm[0-9]+]] = [-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11]
-; SSE-NEXT: addps %[[MAGICCST]], %xmm0
+; SSE-NEXT: movaps {{.*#+}} [[MAGICCST:xmm[0-9]+]] = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; SSE-NEXT: subps %[[MAGICCST]], %xmm0
; SSE-NEXT: addps [[VECLOW]], %xmm0
; MASK is the low vector of the second part after this point.
; SSE-NEXT: pand %xmm1, %[[MASK]]
; SSE-NEXT: por %[[LOWCST]], %[[MASK]]
; SSE-NEXT: psrld $16, %xmm1
; SSE-NEXT: por %[[HIGHCST]], %xmm1
-; SSE-NEXT: addps %[[MAGICCST]], %xmm1
+; SSE-NEXT: subps %[[MAGICCST]], %xmm1
; SSE-NEXT: addps %[[MASK]], %xmm1
; SSE-NEXT: retq
;
@@ -125,8 +125,8 @@ define <8 x float> @test2(<8 x i32> %A) nounwind {
; SSE41-NEXT: psrld $16, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} [[HIGHCST:xmm[0-9]+]] = [1392508928,1392508928,1392508928,1392508928]
; SSE41-NEXT: pblendw $170, %[[HIGHCST]], %xmm0
-; SSE41-NEXT: movaps {{.*#+}} [[MAGICCST:xmm[0-9]+]] = [-5.49764202E+11,-5.49764202E+11,-5.49764202E+11,-5.49764202E+11]
-; SSE41-NEXT: addps %[[MAGICCST]], %xmm0
+; SSE41-NEXT: movaps {{.*#+}} [[MAGICCST:xmm[0-9]+]] = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; SSE41-NEXT: subps %[[MAGICCST]], %xmm0
; SSE41-NEXT: addps [[VECLOW]], %xmm0
; LOWCST is the low vector of the second part after this point.
; The operands of the blend are inverted because we reuse xmm1
@@ -134,7 +134,7 @@ define <8 x float> @test2(<8 x i32> %A) nounwind {
; SSE41-NEXT: pblendw $85, %xmm1, %[[LOWCST]]
; SSE41-NEXT: psrld $16, %xmm1
; SSE41-NEXT: pblendw $170, %[[HIGHCST]], %xmm1
-; SSE41-NEXT: addps %[[MAGICCST]], %xmm1
+; SSE41-NEXT: subps %[[MAGICCST]], %xmm1
; SSE41-NEXT: addps %[[LOWCST]], %xmm1
; SSE41-NEXT: retq
;
@@ -148,7 +148,7 @@ define <8 x float> @test2(<8 x i32> %A) nounwind {
; AVX2-NEXT: vpbroadcastd [[HIGHCSTADDR]](%rip), [[HIGHCST:%ymm[0-9]+]]
; AVX2-NEXT: vpblendw $170, [[HIGHCST]], [[SHIFTVEC]], [[HIGHVEC:%ymm[0-9]+]]
; AVX2-NEXT: vbroadcastss [[MAGICCSTADDR]](%rip), [[MAGICCST:%ymm[0-9]+]]
-; AVX2-NEXT: vaddps [[MAGICCST]], [[HIGHVEC]], [[TMP:%ymm[0-9]+]]
+; AVX2-NEXT: vsubps [[MAGICCST]], [[HIGHVEC]], [[TMP:%ymm[0-9]+]]
; AVX2-NEXT: vaddps [[TMP]], [[LOWVEC]], %ymm0
; AVX2-NEXT: retq
%C = uitofp <8 x i32> %A to <8 x float>
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
index 631e826621fa..7b80dae0b656 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
@@ -7317,7 +7317,7 @@ define <4 x float> @constrained_vector_uitofp_v4f32_v4i32(<4 x i32> %x) #0 {
; CHECK-NEXT: por {{.*}}(%rip), %xmm1
; CHECK-NEXT: psrld $16, %xmm0
; CHECK-NEXT: por {{.*}}(%rip), %xmm0
-; CHECK-NEXT: addps {{.*}}(%rip), %xmm0
+; CHECK-NEXT: subps {{.*}}(%rip), %xmm0
; CHECK-NEXT: addps %xmm1, %xmm0
; CHECK-NEXT: retq
;
@@ -7326,7 +7326,7 @@ define <4 x float> @constrained_vector_uitofp_v4f32_v4i32(<4 x i32> %x) #0 {
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; AVX1-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vsubps {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vaddps %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
More information about the llvm-commits
mailing list