[llvm] 92be640 - [FPEnv][AMDGPU] Disable FSUB(-0,X)->FNEG(X) DAGCombine when subnormals are flushed

Cameron McInally via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 4 12:44:22 PST 2021


Author: Cameron McInally
Date: 2021-01-04T14:44:10-06:00
New Revision: 92be640bd7d4fbc8e032a0aa81381a0246efa0be

URL: https://github.com/llvm/llvm-project/commit/92be640bd7d4fbc8e032a0aa81381a0246efa0be
DIFF: https://github.com/llvm/llvm-project/commit/92be640bd7d4fbc8e032a0aa81381a0246efa0be.diff

LOG: [FPEnv][AMDGPU] Disable FSUB(-0,X)->FNEG(X) DAGCombine when subnormals are flushed

This patch disables the FSUB(-0,X)->FNEG(X) DAG combine when we're flushing subnormals. It requires updating the existing AMDGPU tests to use the fneg IR instruction, in place of the old fsub(-0,X) canonical form, since AMDGPU is the only backend currently checking the DenormalMode flags.

Note that this will require follow-up optimizations to make sure the FSUB(-0,X) form is handled appropriately

Differential Revision: https://reviews.llvm.org/D93243

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/AMDGPU/clamp-modifier.ll
    llvm/test/CodeGen/AMDGPU/clamp.ll
    llvm/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll
    llvm/test/CodeGen/AMDGPU/fma-combine.ll
    llvm/test/CodeGen/AMDGPU/fneg-combines.ll
    llvm/test/CodeGen/AMDGPU/fpext-free.ll
    llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
    llvm/test/CodeGen/AMDGPU/known-never-snan.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.ll
    llvm/test/CodeGen/AMDGPU/mad-combine.ll
    llvm/test/CodeGen/AMDGPU/mad-mix.ll
    llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
    llvm/test/CodeGen/AMDGPU/rsq.ll
    llvm/test/CodeGen/AMDGPU/v_mac.ll
    llvm/test/CodeGen/AMDGPU/v_mac_f16.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 92b23df9e3af..6b1bd721a993 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -13367,18 +13367,21 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
   }
 
   // (fsub -0.0, N1) -> -N1
-  // NOTE: It is safe to transform an FSUB(-0.0,X) into an FNEG(X), since the
-  //       FSUB does not specify the sign bit of a NaN. Also note that for
-  //       the same reason, the inverse transform is not safe, unless fast math
-  //       flags are in play.
   if (N0CFP && N0CFP->isZero()) {
     if (N0CFP->isNegative() ||
         (Options.NoSignedZerosFPMath || Flags.hasNoSignedZeros())) {
-      if (SDValue NegN1 =
-              TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize))
-        return NegN1;
-      if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
-        return DAG.getNode(ISD::FNEG, DL, VT, N1);
+      // We cannot replace an FSUB(+-0.0,X) with FNEG(X) when denormals are
+      // flushed to zero, unless all users treat denorms as zero (DAZ).
+      // FIXME: This transform will change the sign of a NaN and the behavior
+      // of a signaling NaN. It is only valid when a NoNaN flag is present.
+      DenormalMode DenormMode = DAG.getDenormalMode(VT);
+      if (DenormMode == DenormalMode::getIEEE()) {
+        if (SDValue NegN1 =
+                TLI.getNegatedExpression(N1, DAG, LegalOperations, ForCodeSize))
+          return NegN1;
+        if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
+          return DAG.getNode(ISD::FNEG, DL, VT, N1);
+      }
     }
   }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/clamp-modifier.ll b/llvm/test/CodeGen/AMDGPU/clamp-modifier.ll
index 5a56a1a264af..4f3d6442da44 100644
--- a/llvm/test/CodeGen/AMDGPU/clamp-modifier.ll
+++ b/llvm/test/CodeGen/AMDGPU/clamp-modifier.ll
@@ -62,7 +62,7 @@ define amdgpu_kernel void @v_clamp_add_neg_src_f32(float addrspace(1)* %out, flo
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
   %a = load float, float addrspace(1)* %gep0
   %floor = call float @llvm.floor.f32(float %a)
-  %neg.floor = fsub float -0.0, %floor
+  %neg.floor = fneg float %floor
   %max = call float @llvm.maxnum.f32(float %neg.floor, float 0.0)
   %clamp = call float @llvm.minnum.f32(float %max, float 1.0)
   store float %clamp, float addrspace(1)* %out.gep

diff  --git a/llvm/test/CodeGen/AMDGPU/clamp.ll b/llvm/test/CodeGen/AMDGPU/clamp.ll
index 1e18b2fa1c1b..256bea7fb7fb 100644
--- a/llvm/test/CodeGen/AMDGPU/clamp.ll
+++ b/llvm/test/CodeGen/AMDGPU/clamp.ll
@@ -25,7 +25,7 @@ define amdgpu_kernel void @v_clamp_neg_f32(float addrspace(1)* %out, float addrs
   %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
   %a = load float, float addrspace(1)* %gep0
-  %fneg.a = fsub float -0.0, %a
+  %fneg.a = fneg float %a
   %max = call float @llvm.maxnum.f32(float %fneg.a, float 0.0)
   %med = call float @llvm.minnum.f32(float %max, float 1.0)
 
@@ -42,7 +42,7 @@ define amdgpu_kernel void @v_clamp_negabs_f32(float addrspace(1)* %out, float ad
   %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
   %a = load float, float addrspace(1)* %gep0
   %fabs.a = call float @llvm.fabs.f32(float %a)
-  %fneg.fabs.a = fsub float -0.0, %fabs.a
+  %fneg.fabs.a = fneg float %fabs.a
 
   %max = call float @llvm.maxnum.f32(float %fneg.fabs.a, float 0.0)
   %med = call float @llvm.minnum.f32(float %max, float 1.0)

diff  --git a/llvm/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll b/llvm/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll
index 1a005731d3c7..15b751791d0a 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll
@@ -56,7 +56,7 @@ define amdgpu_kernel void @div_minus_1_by_x_25ulp(float addrspace(1)* %arg) {
 ; GCN:            global_store_dword v{{[0-9]+}}, [[OUT]], s{{\[[0-9]+:[0-9]+\]}}
 define amdgpu_kernel void @div_1_by_minus_x_25ulp(float addrspace(1)* %arg) {
   %load = load float, float addrspace(1)* %arg, align 4
-  %neg = fsub float -0.000000e+00, %load
+  %neg = fneg float %load
   %div = fdiv float 1.000000e+00, %neg, !fpmath !0
   store float %div, float addrspace(1)* %arg, align 4
   ret void
@@ -188,7 +188,7 @@ define amdgpu_kernel void @div_v4_minus_1_by_x_25ulp(<4 x float> addrspace(1)* %
 ; GCN-FLUSH:      global_store_dwordx4 v{{[0-9]+}}, v{{\[}}[[OUT0]]:[[OUT3]]], s{{\[[0-9]+:[0-9]+\]}}
 define amdgpu_kernel void @div_v4_1_by_minus_x_25ulp(<4 x float> addrspace(1)* %arg) {
   %load = load <4 x float>, <4 x float> addrspace(1)* %arg, align 16
-  %neg = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %load
+  %neg = fneg <4 x float> %load
   %div = fdiv <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %neg, !fpmath !0
   store <4 x float> %div, <4 x float> addrspace(1)* %arg, align 16
   ret void
@@ -226,7 +226,7 @@ define amdgpu_kernel void @div_v4_1_by_minus_x_25ulp(<4 x float> addrspace(1)* %
 ; GCN-FLUSH:      global_store_dwordx4 v{{[0-9]+}}, v{{\[}}[[OUT0]]:[[OUT3]]], s{{\[[0-9]+:[0-9]+\]}}
 define amdgpu_kernel void @div_v4_minus_1_by_minus_x_25ulp(<4 x float> addrspace(1)* %arg) {
   %load = load <4 x float>, <4 x float> addrspace(1)* %arg, align 16
-  %neg = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %load
+  %neg = fneg <4 x float> %load
   %div = fdiv <4 x float> <float -1.000000e+00, float -1.000000e+00, float -1.000000e+00, float -1.000000e+00>, %neg, !fpmath !0
   store <4 x float> %div, <4 x float> addrspace(1)* %arg, align 16
   ret void
@@ -372,7 +372,7 @@ define amdgpu_kernel void @div_minus_1_by_x_fast(float addrspace(1)* %arg) {
 ; GCN: global_store_dword v{{[0-9]+}}, [[RCP]], s{{\[[0-9]+:[0-9]+\]}}
 define amdgpu_kernel void @div_1_by_minus_x_fast(float addrspace(1)* %arg) {
   %load = load float, float addrspace(1)* %arg, align 4
-  %neg = fsub float -0.000000e+00, %load, !fpmath !0
+  %neg = fneg float %load, !fpmath !0
   %div = fdiv fast float 1.000000e+00, %neg
   store float %div, float addrspace(1)* %arg, align 4
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/fma-combine.ll b/llvm/test/CodeGen/AMDGPU/fma-combine.ll
index b624ddf7a6fd..8677ed547568 100644
--- a/llvm/test/CodeGen/AMDGPU/fma-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/fma-combine.ll
@@ -647,7 +647,7 @@ define amdgpu_kernel void @fma_neg_2.0_neg_a_b_f32(float addrspace(1)* %out, flo
   %r1 = load volatile float, float addrspace(1)* %gep.0
   %r2 = load volatile float, float addrspace(1)* %gep.1
 
-  %r1.fneg = fsub float -0.000000e+00, %r1
+  %r1.fneg = fneg float %r1
 
   %r3 = tail call float @llvm.fma.f32(float -2.0, float %r1.fneg, float %r2)
   store float %r3, float addrspace(1)* %gep.out
@@ -669,7 +669,7 @@ define amdgpu_kernel void @fma_2.0_neg_a_b_f32(float addrspace(1)* %out, float a
   %r1 = load volatile float, float addrspace(1)* %gep.0
   %r2 = load volatile float, float addrspace(1)* %gep.1
 
-  %r1.fneg = fsub float -0.000000e+00, %r1
+  %r1.fneg = fneg float %r1
 
   %r3 = tail call float @llvm.fma.f32(float 2.0, float %r1.fneg, float %r2)
   store float %r3, float addrspace(1)* %gep.out

diff  --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
index 01b0e6d17d1a..a111a343866e 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
@@ -482,7 +482,7 @@ define amdgpu_kernel void @v_fneg_self_minnum_f32_ieee(float addrspace(1)* %out,
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
   %a = load volatile float, float addrspace(1)* %a.gep
   %min = call float @llvm.minnum.f32(float %a, float %a)
-  %min.fneg = fsub float -0.0, %min
+  %min.fneg = fneg float %min
   store float %min.fneg, float addrspace(1)* %out.gep
   ret void
 }
@@ -493,7 +493,7 @@ define amdgpu_kernel void @v_fneg_self_minnum_f32_ieee(float addrspace(1)* %out,
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_self_minnum_f32_no_ieee(float %a) #0 {
   %min = call float @llvm.minnum.f32(float %a, float %a)
-  %min.fneg = fsub float -0.0, %min
+  %min.fneg = fneg float %min
   ret float %min.fneg
 }
 
@@ -887,7 +887,7 @@ define amdgpu_kernel void @v_fneg_self_maxnum_f32_ieee(float addrspace(1)* %out,
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
   %a = load volatile float, float addrspace(1)* %a.gep
   %max = call float @llvm.maxnum.f32(float %a, float %a)
-  %max.fneg = fsub float -0.0, %max
+  %max.fneg = fneg float %max
   store float %max.fneg, float addrspace(1)* %out.gep
   ret void
 }
@@ -898,7 +898,7 @@ define amdgpu_kernel void @v_fneg_self_maxnum_f32_ieee(float addrspace(1)* %out,
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_self_maxnum_f32_no_ieee(float %a) #0 {
   %max = call float @llvm.maxnum.f32(float %a, float %a)
-  %max.fneg = fsub float -0.0, %max
+  %max.fneg = fneg float %max
   ret float %max.fneg
 }
 
@@ -2039,7 +2039,7 @@ define amdgpu_kernel void @v_fneg_amdgcn_sin_f32(float addrspace(1)* %out, float
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
   %a = load volatile float, float addrspace(1)* %a.gep
   %sin = call float @llvm.amdgcn.sin.f32(float %a)
-  %fneg = fsub float -0.0, %sin
+  %fneg = fneg float %sin
   store float %fneg, float addrspace(1)* %out.gep
   ret void
 }
@@ -2059,7 +2059,7 @@ define amdgpu_kernel void @v_fneg_trunc_f32(float addrspace(1)* %out, float addr
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
   %a = load volatile float, float addrspace(1)* %a.gep
   %trunc = call float @llvm.trunc.f32(float %a)
-  %fneg = fsub float -0.0, %trunc
+  %fneg = fneg float %trunc
   store float %fneg, float addrspace(1)* %out.gep
   ret void
 }
@@ -2086,7 +2086,7 @@ define amdgpu_kernel void @v_fneg_round_f32(float addrspace(1)* %out, float addr
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
   %a = load volatile float, float addrspace(1)* %a.gep
   %round = call float @llvm.round.f32(float %a)
-  %fneg = fsub float -0.0, %round
+  %fneg = fneg float %round
   store float %fneg, float addrspace(1)* %out.gep
   ret void
 }
@@ -2106,7 +2106,7 @@ define amdgpu_kernel void @v_fneg_rint_f32(float addrspace(1)* %out, float addrs
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
   %a = load volatile float, float addrspace(1)* %a.gep
   %rint = call float @llvm.rint.f32(float %a)
-  %fneg = fsub float -0.0, %rint
+  %fneg = fneg float %rint
   store float %fneg, float addrspace(1)* %out.gep
   ret void
 }
@@ -2126,7 +2126,7 @@ define amdgpu_kernel void @v_fneg_nearbyint_f32(float addrspace(1)* %out, float
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
   %a = load volatile float, float addrspace(1)* %a.gep
   %nearbyint = call float @llvm.nearbyint.f32(float %a)
-  %fneg = fsub float -0.0, %nearbyint
+  %fneg = fneg float %nearbyint
   store float %fneg, float addrspace(1)* %out.gep
   ret void
 }
@@ -2146,7 +2146,7 @@ define amdgpu_kernel void @v_fneg_canonicalize_f32(float addrspace(1)* %out, flo
   %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
   %a = load volatile float, float addrspace(1)* %a.gep
   %trunc = call float @llvm.canonicalize.f32(float %a)
-  %fneg = fsub float -0.0, %trunc
+  %fneg = fneg float %trunc
   store float %fneg, float addrspace(1)* %out.gep
   ret void
 }
@@ -2170,7 +2170,7 @@ define amdgpu_kernel void @v_fneg_interp_p1_f32(float addrspace(1)* %out, float
   %a = load volatile float, float addrspace(1)* %a.gep
   %b = load volatile float, float addrspace(1)* %b.gep
   %mul = fmul float %a, %b
-  %fneg = fsub float -0.0, %mul
+  %fneg = fneg float %mul
   %intrp0 = call float @llvm.amdgcn.interp.p1(float %fneg, i32 0, i32 0, i32 0)
   %intrp1 = call float @llvm.amdgcn.interp.p1(float %fneg, i32 1, i32 0, i32 0)
   store volatile float %intrp0, float addrspace(1)* %out.gep
@@ -2193,7 +2193,7 @@ define amdgpu_kernel void @v_fneg_interp_p2_f32(float addrspace(1)* %out, float
   %a = load volatile float, float addrspace(1)* %a.gep
   %b = load volatile float, float addrspace(1)* %b.gep
   %mul = fmul float %a, %b
-  %fneg = fsub float -0.0, %mul
+  %fneg = fneg float %mul
   %intrp0 = call float @llvm.amdgcn.interp.p2(float 4.0, float %fneg, i32 0, i32 0, i32 0)
   %intrp1 = call float @llvm.amdgcn.interp.p2(float 4.0, float %fneg, i32 1, i32 0, i32 0)
   store volatile float %intrp0, float addrspace(1)* %out.gep
@@ -2230,7 +2230,7 @@ define amdgpu_kernel void @v_fneg_copytoreg_f32(float addrspace(1)* %out, float
   %b = load volatile float, float addrspace(1)* %b.gep
   %c = load volatile float, float addrspace(1)* %c.gep
   %mul = fmul float %a, %b
-  %fneg = fsub float -0.0, %mul
+  %fneg = fneg float %mul
   %cmp0 = icmp eq i32 %d, 0
   br i1 %cmp0, label %if, label %endif
 
@@ -2266,7 +2266,7 @@ define amdgpu_kernel void @v_fneg_inlineasm_f32(float addrspace(1)* %out, float
   %b = load volatile float, float addrspace(1)* %b.gep
   %c = load volatile float, float addrspace(1)* %c.gep
   %mul = fmul float %a, %b
-  %fneg = fsub float -0.0, %mul
+  %fneg = fneg float %mul
   call void asm sideeffect "; use $0", "v"(float %fneg) #0
   store volatile float %fneg, float addrspace(1)* %out.gep
   ret void
@@ -2295,7 +2295,7 @@ define amdgpu_kernel void @v_fneg_inlineasm_multi_use_src_f32(float addrspace(1)
   %b = load volatile float, float addrspace(1)* %b.gep
   %c = load volatile float, float addrspace(1)* %c.gep
   %mul = fmul float %a, %b
-  %fneg = fsub float -0.0, %mul
+  %fneg = fneg float %mul
   call void asm sideeffect "; use $0", "v"(float %fneg) #0
   store volatile float %mul, float addrspace(1)* %out.gep
   ret void
@@ -2328,7 +2328,7 @@ define amdgpu_kernel void @multiuse_fneg_2_vop3_users_f32(float addrspace(1)* %o
   %b = load volatile float, float addrspace(1)* %b.gep
   %c = load volatile float, float addrspace(1)* %c.gep
 
-  %fneg.a = fsub float -0.0, %a
+  %fneg.a = fneg float %a
   %fma0 = call float @llvm.fma.f32(float %fneg.a, float %b, float %c)
   %fma1 = call float @llvm.fma.f32(float %fneg.a, float %c, float 2.0)
 
@@ -2360,7 +2360,7 @@ define amdgpu_kernel void @multiuse_fneg_2_vop2_users_f32(float addrspace(1)* %o
   %b = load volatile float, float addrspace(1)* %b.gep
   %c = load volatile float, float addrspace(1)* %c.gep
 
-  %fneg.a = fsub float -0.0, %a
+  %fneg.a = fneg float %a
   %mul0 = fmul float %fneg.a, %b
   %mul1 = fmul float %fneg.a, %c
 
@@ -2391,7 +2391,7 @@ define amdgpu_kernel void @multiuse_fneg_vop2_vop3_users_f32(float addrspace(1)*
   %b = load volatile float, float addrspace(1)* %b.gep
   %c = load volatile float, float addrspace(1)* %c.gep
 
-  %fneg.a = fsub float -0.0, %a
+  %fneg.a = fneg float %a
   %fma0 = call float @llvm.fma.f32(float %fneg.a, float %b, float 2.0)
   %mul1 = fmul float %fneg.a, %c
 
@@ -2433,7 +2433,7 @@ define amdgpu_kernel void @free_fold_src_code_size_cost_use_f32(float addrspace(
   %d = load volatile float, float addrspace(1)* %d.gep
 
   %fma0 = call float @llvm.fma.f32(float %a, float %b, float 2.0)
-  %fneg.fma0 = fsub float -0.0, %fma0
+  %fneg.fma0 = fneg float %fma0
   %mul1 = fmul float %fneg.fma0, %c
   %mul2 = fmul float %fneg.fma0, %d
 
@@ -2501,7 +2501,7 @@ define amdgpu_kernel void @one_use_cost_to_fold_into_src_f32(float addrspace(1)*
   %d = load volatile float, float addrspace(1)* %d.gep
 
   %trunc.a = call float @llvm.trunc.f32(float %a)
-  %trunc.fneg.a = fsub float -0.0, %trunc.a
+  %trunc.fneg.a = fneg float %trunc.a
   %fma0 = call float @llvm.fma.f32(float %trunc.fneg.a, float %b, float %c)
   store volatile float %fma0, float addrspace(1)* %out
   ret void
@@ -2531,7 +2531,7 @@ define amdgpu_kernel void @multi_use_cost_to_fold_into_src(float addrspace(1)* %
   %d = load volatile float, float addrspace(1)* %d.gep
 
   %trunc.a = call float @llvm.trunc.f32(float %a)
-  %trunc.fneg.a = fsub float -0.0, %trunc.a
+  %trunc.fneg.a = fneg float %trunc.a
   %fma0 = call float @llvm.fma.f32(float %trunc.fneg.a, float %b, float %c)
   %mul1 = fmul float %trunc.a, %d
   store volatile float %fma0, float addrspace(1)* %out

diff  --git a/llvm/test/CodeGen/AMDGPU/fpext-free.ll b/llvm/test/CodeGen/AMDGPU/fpext-free.ll
index fd9843400ba5..5dbcc6ca6095 100644
--- a/llvm/test/CodeGen/AMDGPU/fpext-free.ll
+++ b/llvm/test/CodeGen/AMDGPU/fpext-free.ll
@@ -288,7 +288,7 @@ define float @fsub_fneg_fpext_fmul_f16_to_f32(half %x, half %y, float %z) #0 {
 entry:
   %mul = fmul half %x, %y
   %mul.ext = fpext half %mul to float
-  %neg.mul.ext = fsub float -0.0, %mul.ext
+  %neg.mul.ext = fneg float %mul.ext
   %add = fsub float %neg.mul.ext, %z
   ret float %add
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
index d9d39fab1731..a722042d7c50 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
@@ -99,7 +99,7 @@ define amdgpu_kernel void @fneg_fptrunc_f32_to_f16(
     float addrspace(1)* %a) {
 entry:
   %a.val = load float, float addrspace(1)* %a
-  %a.fneg = fsub float -0.0, %a.val
+  %a.fneg = fneg float %a.val
   %r.val = fptrunc float %a.fneg to half
   store half %r.val, half addrspace(1)* %r
   ret void
@@ -132,7 +132,7 @@ define amdgpu_kernel void @fneg_fabs_fptrunc_f32_to_f16(
 entry:
   %a.val = load float, float addrspace(1)* %a
   %a.fabs = call float @llvm.fabs.f32(float %a.val)
-  %a.fneg.fabs = fsub float -0.0, %a.fabs
+  %a.fneg.fabs = fneg float %a.fabs
   %r.val = fptrunc float %a.fneg.fabs to half
   store half %r.val, half addrspace(1)* %r
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/known-never-snan.ll b/llvm/test/CodeGen/AMDGPU/known-never-snan.ll
index ecd71887f8e3..5ee6eda85f0a 100644
--- a/llvm/test/CodeGen/AMDGPU/known-never-snan.ll
+++ b/llvm/test/CodeGen/AMDGPU/known-never-snan.ll
@@ -26,7 +26,7 @@ define float @v_test_known_not_snan_fneg_input_fmed3_r_i_i_f32(float %a) #0 {
 ; GCN-NEXT:    v_med3_f32 v0, -v0, 2.0, 4.0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %a.nnan.add = fdiv nnan float 1.0, %a, !fpmath !0
-  %known.not.snan = fsub float -0.0, %a.nnan.add
+  %known.not.snan = fneg float %a.nnan.add
   %max = call float @llvm.maxnum.f32(float %known.not.snan, float 2.0)
   %med = call float @llvm.minnum.f32(float %max, float 4.0)
   ret float %med

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.ll
index 38d1212a70cc..8a56c334d9a7 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fmad.ftz.ll
@@ -74,7 +74,7 @@ define amdgpu_kernel void @mad_f32_neg_b(
   %a.val = load float, float addrspace(1)* %a
   %b.val = load float, float addrspace(1)* %b
   %c.val = load float, float addrspace(1)* %c
-  %neg.b = fsub float -0.0, %b.val
+  %neg.b = fneg float %b.val
   %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float %neg.b, float %c.val)
   store float %r.val, float addrspace(1)* %r
   ret void
@@ -107,7 +107,7 @@ define amdgpu_kernel void @mad_f32_neg_abs_b(
   %b.val = load float, float addrspace(1)* %b
   %c.val = load float, float addrspace(1)* %c
   %abs.b = call float @llvm.fabs.f32(float %b.val)
-  %neg.abs.b = fsub float -0.0, %abs.b
+  %neg.abs.b = fneg float %abs.b
   %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float %neg.abs.b, float %c.val)
   store float %r.val, float addrspace(1)* %r
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/mad-combine.ll b/llvm/test/CodeGen/AMDGPU/mad-combine.ll
index c90970a93f9f..a4a89659aecd 100644
--- a/llvm/test/CodeGen/AMDGPU/mad-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad-combine.ll
@@ -296,7 +296,7 @@ define amdgpu_kernel void @combine_to_mad_fsub_2_f32(float addrspace(1)* noalias
   %c = load volatile float, float addrspace(1)* %gep.2
 
   %mul = fmul float %a, %b
-  %mul.neg = fsub float -0.0, %mul
+  %mul.neg = fneg float %mul
   %fma = fsub float %mul.neg, %c
 
   store float %fma, float addrspace(1)* %gep.out
@@ -338,7 +338,7 @@ define amdgpu_kernel void @combine_to_mad_fsub_2_f32_2uses_neg(float addrspace(1
   %d = load volatile float, float addrspace(1)* %gep.3
 
   %mul = fmul float %a, %b
-  %mul.neg = fsub float -0.0, %mul
+  %mul.neg = fneg float %mul
   %fma0 = fsub float %mul.neg, %c
   %fma1 = fsub float %mul.neg, %d
 
@@ -382,7 +382,7 @@ define amdgpu_kernel void @combine_to_mad_fsub_2_f32_2uses_mul(float addrspace(1
   %d = load volatile float, float addrspace(1)* %gep.3
 
   %mul = fmul float %a, %b
-  %mul.neg = fsub float -0.0, %mul
+  %mul.neg = fneg float %mul
   %fma0 = fsub float %mul.neg, %c
   %fma1 = fsub float %mul, %d
 

diff  --git a/llvm/test/CodeGen/AMDGPU/mad-mix.ll b/llvm/test/CodeGen/AMDGPU/mad-mix.ll
index a7126121f09f..fa3df0262400 100644
--- a/llvm/test/CodeGen/AMDGPU/mad-mix.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad-mix.ll
@@ -109,7 +109,7 @@ define float @v_mad_mix_f32_negf16lo_f16lo_f16lo(half %src0, half %src1, half %s
   %src0.ext = fpext half %src0 to float
   %src1.ext = fpext half %src1 to float
   %src2.ext = fpext half %src2 to float
-  %src0.ext.neg = fsub float -0.0, %src0.ext
+  %src0.ext.neg = fneg float %src0.ext
   %result = tail call float @llvm.fmuladd.f32(float %src0.ext.neg, float %src1.ext, float %src2.ext)
   ret float %result
 }
@@ -143,7 +143,7 @@ define float @v_mad_mix_f32_negabsf16lo_f16lo_f16lo(half %src0, half %src1, half
   %src1.ext = fpext half %src1 to float
   %src2.ext = fpext half %src2 to float
   %src0.ext.abs = call float @llvm.fabs.f32(float %src0.ext)
-  %src0.ext.neg.abs = fsub float -0.0, %src0.ext.abs
+  %src0.ext.neg.abs = fneg float %src0.ext.abs
   %result = tail call float @llvm.fmuladd.f32(float %src0.ext.neg.abs, float %src1.ext, float %src2.ext)
   ret float %result
 }
@@ -172,7 +172,7 @@ define float @v_mad_mix_f32_f16lo_f16lo_f32(half %src0, half %src1, float %src2)
 define float @v_mad_mix_f32_f16lo_f16lo_negf32(half %src0, half %src1, float %src2) #0 {
   %src0.ext = fpext half %src0 to float
   %src1.ext = fpext half %src1 to float
-  %src2.neg = fsub float -0.0, %src2
+  %src2.neg = fneg float %src2
   %result = tail call float @llvm.fmuladd.f32(float %src0.ext, float %src1.ext, float %src2.neg)
   ret float %result
 }
@@ -203,7 +203,7 @@ define float @v_mad_mix_f32_f16lo_f16lo_negabsf32(half %src0, half %src1, float
   %src0.ext = fpext half %src0 to float
   %src1.ext = fpext half %src1 to float
   %src2.abs = call float @llvm.fabs.f32(float %src2)
-  %src2.neg.abs = fsub float -0.0, %src2.abs
+  %src2.neg.abs = fneg float %src2.abs
   %result = tail call float @llvm.fmuladd.f32(float %src0.ext, float %src1.ext, float %src2.neg.abs)
   ret float %result
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
index 4702cc7000a6..d68cee3ca7e9 100644
--- a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
+++ b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
@@ -94,7 +94,7 @@ define amdgpu_kernel void @neg_rcp_pat_f32(float addrspace(1)* %out, float %src)
 ; GCN: buffer_store_dword [[RCP]]
 define amdgpu_kernel void @rcp_fabs_fneg_pat_f32(float addrspace(1)* %out, float %src) #0 {
   %src.fabs = call float @llvm.fabs.f32(float %src)
-  %src.fabs.fneg = fsub float -0.0, %src.fabs
+  %src.fabs.fneg = fneg float %src.fabs
   %rcp = fdiv float 1.0, %src.fabs.fneg, !fpmath !0
   store float %rcp, float addrspace(1)* %out, align 4
   ret void
@@ -108,7 +108,7 @@ define amdgpu_kernel void @rcp_fabs_fneg_pat_f32(float addrspace(1)* %out, float
 ; GCN: buffer_store_dword [[MUL]]
 define amdgpu_kernel void @rcp_fabs_fneg_pat_multi_use_f32(float addrspace(1)* %out, float %src) #0 {
   %src.fabs = call float @llvm.fabs.f32(float %src)
-  %src.fabs.fneg = fsub float -0.0, %src.fabs
+  %src.fabs.fneg = fneg float %src.fabs
   %rcp = fdiv float 1.0, %src.fabs.fneg, !fpmath !0
   store volatile float %rcp, float addrspace(1)* %out, align 4
 

diff  --git a/llvm/test/CodeGen/AMDGPU/rsq.ll b/llvm/test/CodeGen/AMDGPU/rsq.ll
index 4dd5b5517074..88c73f17ceb2 100644
--- a/llvm/test/CodeGen/AMDGPU/rsq.ll
+++ b/llvm/test/CodeGen/AMDGPU/rsq.ll
@@ -116,7 +116,7 @@ define amdgpu_kernel void @neg_rsq_f64(double addrspace(1)* noalias %out, double
 ; SI-UNSAFE: buffer_store_dword [[RSQ]]
 define amdgpu_kernel void @neg_rsq_neg_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #0 {
   %val = load float, float addrspace(1)* %in, align 4
-  %val.fneg = fsub float -0.0, %val
+  %val.fneg = fneg float %val
   %sqrt = call float @llvm.sqrt.f32(float %val.fneg)
   %div = fdiv float -1.0, %sqrt, !fpmath !0
   store float %div, float addrspace(1)* %out, align 4

diff  --git a/llvm/test/CodeGen/AMDGPU/v_mac.ll b/llvm/test/CodeGen/AMDGPU/v_mac.ll
index 1c4cf37ab080..9827377fc368 100644
--- a/llvm/test/CodeGen/AMDGPU/v_mac.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_mac.ll
@@ -105,7 +105,7 @@ entry:
   %b = load float, float addrspace(1)* %b_ptr
   %c = load float, float addrspace(1)* %c_ptr
 
-  %neg_a = fsub float -0.0, %a
+  %neg_a = fneg float %a
   %tmp0 = fmul float %neg_a, %b
   %tmp1 = fadd float %tmp0, %c
 
@@ -165,7 +165,7 @@ entry:
   %b = load float, float addrspace(1)* %b_ptr
   %c = load float, float addrspace(1)* %c_ptr
 
-  %neg_b = fsub float -0.0, %b
+  %neg_b = fneg float %b
   %tmp0 = fmul float %a, %neg_b
   %tmp1 = fadd float %tmp0, %c
 
@@ -205,7 +205,7 @@ entry:
   %b = load float, float addrspace(1)* %b_ptr
   %c = load float, float addrspace(1)* %c_ptr
 
-  %neg_c = fsub float -0.0, %c
+  %neg_c = fneg float %c
   %tmp0 = fmul float %a, %b
   %tmp1 = fadd float %tmp0, %neg_c
 

diff  --git a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll
index e10d2d389c69..86f67e085cd1 100644
--- a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll
@@ -83,7 +83,7 @@ entry:
   %b.val = load half, half addrspace(1)* %b
   %c.val = load half, half addrspace(1)* %c
 
-  %a.neg = fsub half -0.0, %a.val
+  %a.neg = fneg half %a.val
   %t.val = fmul half %a.neg, %b.val
   %r.val = fadd half %t.val, %c.val
 
@@ -110,7 +110,7 @@ entry:
   %b.val = load half, half addrspace(1)* %b
   %c.val = load half, half addrspace(1)* %c
 
-  %b.neg = fsub half -0.0, %b.val
+  %b.neg = fneg half %b.val
   %t.val = fmul half %a.val, %b.neg
   %r.val = fadd half %t.val, %c.val
 
@@ -137,7 +137,7 @@ entry:
   %b.val = load half, half addrspace(1)* %b
   %c.val = load half, half addrspace(1)* %c
 
-  %c.neg = fsub half -0.0, %c.val
+  %c.neg = fneg half %c.val
   %t.val = fmul half %a.val, %b.val
   %r.val = fadd half %t.val, %c.neg
 
@@ -410,7 +410,7 @@ entry:
   %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
   %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
 
-  %a.neg = fsub <2 x half> <half -0.0, half -0.0>, %a.val
+  %a.neg = fneg <2 x half> %a.val
   %t.val = fmul <2 x half> %a.neg, %b.val
   %r.val = fadd <2 x half> %t.val, %c.val
 
@@ -439,7 +439,7 @@ entry:
   %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
   %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
 
-  %b.neg = fsub <2 x half> <half -0.0, half -0.0>, %b.val
+  %b.neg = fneg <2 x half> %b.val
   %t.val = fmul <2 x half> %a.val, %b.neg
   %r.val = fadd <2 x half> %t.val, %c.val
 
@@ -472,7 +472,7 @@ entry:
   %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
   %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
 
-  %c.neg = fsub <2 x half> <half -0.0, half -0.0>, %c.val
+  %c.neg = fneg <2 x half> %c.val
   %t.val = fmul <2 x half> %a.val, %b.val
   %r.val = fadd <2 x half> %t.val, %c.neg
 


        


More information about the llvm-commits mailing list