[llvm] 2449931 - AMDGPU: Don't use old form of fneg in some tests

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 23 06:11:14 PDT 2023


Author: Matt Arsenault
Date: 2023-06-23T09:11:06-04:00
New Revision: 2449931b0176456fd6139a358befafb90c8ae3f6

URL: https://github.com/llvm/llvm-project/commit/2449931b0176456fd6139a358befafb90c8ae3f6
DIFF: https://github.com/llvm/llvm-project/commit/2449931b0176456fd6139a358befafb90c8ae3f6.diff

LOG: AMDGPU: Don't use old form of fneg in some tests

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/clamp.ll
    llvm/test/CodeGen/AMDGPU/fract.f64.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.ll
    llvm/test/CodeGen/AMDGPU/rsq.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/clamp.ll b/llvm/test/CodeGen/AMDGPU/clamp.ll
index 0585de22320be..45946ff497acb 100644
--- a/llvm/test/CodeGen/AMDGPU/clamp.ll
+++ b/llvm/test/CodeGen/AMDGPU/clamp.ll
@@ -577,7 +577,7 @@ define amdgpu_kernel void @v_clamp_neg_f16(ptr addrspace(1) %out, ptr addrspace(
   %gep0 = getelementptr half, ptr addrspace(1) %aptr, i32 %tid
   %out.gep = getelementptr half, ptr addrspace(1) %out, i32 %tid
   %a = load half, ptr addrspace(1) %gep0
-  %fneg.a = fsub half -0.0, %a
+  %fneg.a = fneg half %a
   %max = call half @llvm.maxnum.f16(half %fneg.a, half 0.0)
   %med = call half @llvm.minnum.f16(half %max, half 1.0)
 
@@ -647,7 +647,7 @@ define amdgpu_kernel void @v_clamp_negabs_f16(ptr addrspace(1) %out, ptr addrspa
   %out.gep = getelementptr half, ptr addrspace(1) %out, i32 %tid
   %a = load half, ptr addrspace(1) %gep0
   %fabs.a = call half @llvm.fabs.f16(half %a)
-  %fneg.fabs.a = fsub half -0.0, %fabs.a
+  %fneg.fabs.a = fneg half %fabs.a
 
   %max = call half @llvm.maxnum.f16(half %fneg.fabs.a, half 0.0)
   %med = call half @llvm.minnum.f16(half %max, half 1.0)
@@ -783,7 +783,7 @@ define amdgpu_kernel void @v_clamp_neg_f64(ptr addrspace(1) %out, ptr addrspace(
   %gep0 = getelementptr double, ptr addrspace(1) %aptr, i32 %tid
   %out.gep = getelementptr double, ptr addrspace(1) %out, i32 %tid
   %a = load double, ptr addrspace(1) %gep0
-  %fneg.a = fsub double -0.0, %a
+  %fneg.a = fneg double %a
   %max = call double @llvm.maxnum.f64(double %fneg.a, double 0.0)
   %med = call double @llvm.minnum.f64(double %max, double 1.0)
 
@@ -852,7 +852,7 @@ define amdgpu_kernel void @v_clamp_negabs_f64(ptr addrspace(1) %out, ptr addrspa
   %out.gep = getelementptr double, ptr addrspace(1) %out, i32 %tid
   %a = load double, ptr addrspace(1) %gep0
   %fabs.a = call double @llvm.fabs.f64(double %a)
-  %fneg.fabs.a = fsub double -0.0, %fabs.a
+  %fneg.fabs.a = fneg double %fabs.a
 
   %max = call double @llvm.maxnum.f64(double %fneg.fabs.a, double 0.0)
   %med = call double @llvm.minnum.f64(double %max, double 1.0)
@@ -2787,7 +2787,7 @@ define amdgpu_kernel void @v_clamp_neg_v2f16(ptr addrspace(1) %out, ptr addrspac
   %gep0 = getelementptr <2 x half>, ptr addrspace(1) %aptr, i32 %tid
   %out.gep = getelementptr <2 x half>, ptr addrspace(1) %out, i32 %tid
   %a = load <2 x half>, ptr addrspace(1) %gep0
-  %fneg.a = fsub <2 x half> <half -0.0, half -0.0>, %a
+  %fneg.a = fneg <2 x half> %a
   %max = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %fneg.a, <2 x half> zeroinitializer)
   %med = call <2 x half> @llvm.minnum.v2f16(<2 x half> %max, <2 x half> <half 1.0, half 1.0>)
 
@@ -2868,7 +2868,7 @@ define amdgpu_kernel void @v_clamp_negabs_v2f16(ptr addrspace(1) %out, ptr addrs
   %out.gep = getelementptr <2 x half>, ptr addrspace(1) %out, i32 %tid
   %a = load <2 x half>, ptr addrspace(1) %gep0
   %fabs.a = call <2 x half> @llvm.fabs.v2f16(<2 x half> %a)
-  %fneg.fabs.a = fsub <2 x half> <half -0.0, half -0.0>, %fabs.a
+  %fneg.fabs.a = fneg <2 x half> %fabs.a
 
   %max = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %fneg.fabs.a, <2 x half> zeroinitializer)
   %med = call <2 x half> @llvm.minnum.v2f16(<2 x half> %max, <2 x half> <half 1.0, half 1.0>)
@@ -2947,7 +2947,7 @@ define amdgpu_kernel void @v_clamp_neglo_v2f16(ptr addrspace(1) %out, ptr addrsp
   %out.gep = getelementptr <2 x half>, ptr addrspace(1) %out, i32 %tid
   %a = load <2 x half>, ptr addrspace(1) %gep0
   %lo = extractelement <2 x half> %a, i32 0
-  %neg.lo = fsub half -0.0, %lo
+  %neg.lo = fneg half %lo
   %neg.lo.vec = insertelement <2 x half> %a, half %neg.lo, i32 0
   %max = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %neg.lo.vec, <2 x half> zeroinitializer)
   %med = call <2 x half> @llvm.minnum.v2f16(<2 x half> %max, <2 x half> <half 1.0, half 1.0>)
@@ -3025,7 +3025,7 @@ define amdgpu_kernel void @v_clamp_neghi_v2f16(ptr addrspace(1) %out, ptr addrsp
   %out.gep = getelementptr <2 x half>, ptr addrspace(1) %out, i32 %tid
   %a = load <2 x half>, ptr addrspace(1) %gep0
   %hi = extractelement <2 x half> %a, i32 1
-  %neg.hi = fsub half -0.0, %hi
+  %neg.hi = fneg half %hi
   %neg.hi.vec = insertelement <2 x half> %a, half %neg.hi, i32 1
   %max = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %neg.hi.vec, <2 x half> zeroinitializer)
   %med = call <2 x half> @llvm.minnum.v2f16(<2 x half> %max, <2 x half> <half 1.0, half 1.0>)

diff  --git a/llvm/test/CodeGen/AMDGPU/fract.f64.ll b/llvm/test/CodeGen/AMDGPU/fract.f64.ll
index 522fd2e946a01..e89db2f89e53f 100644
--- a/llvm/test/CodeGen/AMDGPU/fract.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/fract.f64.ll
@@ -50,7 +50,7 @@ define amdgpu_kernel void @fract_f64(ptr addrspace(1) %out, ptr addrspace(1) %sr
 ; GCN: buffer_store_dwordx2 [[FRACT]]
 define amdgpu_kernel void @fract_f64_neg(ptr addrspace(1) %out, ptr addrspace(1) %src) #1 {
   %x = load double, ptr addrspace(1) %src
-  %neg.x = fsub double -0.0, %x
+  %neg.x = fneg double %x
   %floor.neg.x = call double @llvm.floor.f64(double %neg.x)
   %fract = fsub double %neg.x, %floor.neg.x
   store double %fract, ptr addrspace(1) %out
@@ -76,7 +76,7 @@ define amdgpu_kernel void @fract_f64_neg(ptr addrspace(1) %out, ptr addrspace(1)
 define amdgpu_kernel void @fract_f64_neg_abs(ptr addrspace(1) %out, ptr addrspace(1) %src) #1 {
   %x = load double, ptr addrspace(1) %src
   %abs.x = call double @llvm.fabs.f64(double %x)
-  %neg.abs.x = fsub double -0.0, %abs.x
+  %neg.abs.x = fneg double %abs.x
   %floor.neg.abs.x = call double @llvm.floor.f64(double %neg.abs.x)
   %fract = fsub double %neg.abs.x, %floor.neg.abs.x
   store double %fract, ptr addrspace(1) %out

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.ll
index d821def2fc985..fc4b75ba1bf9f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.ll
@@ -27,7 +27,7 @@ define amdgpu_kernel void @s_test_fabs_frexp_exp_f32(ptr addrspace(1) %out, floa
 ; GCN: v_frexp_exp_i32_f32_e64 {{v[0-9]+}}, -|{{s[0-9]+}}|
 define amdgpu_kernel void @s_test_fneg_fabs_frexp_exp_f32(ptr addrspace(1) %out, float %src) #1 {
   %fabs.src = call float @llvm.fabs.f32(float %src)
-  %fneg.fabs.src = fsub float -0.0, %fabs.src
+  %fneg.fabs.src = fneg float %fabs.src
   %frexp.exp = call i32 @llvm.amdgcn.frexp.exp.i32.f32(float %fneg.fabs.src)
   store i32 %frexp.exp, ptr addrspace(1) %out
   ret void
@@ -54,7 +54,7 @@ define amdgpu_kernel void @s_test_fabs_frexp_exp_f64(ptr addrspace(1) %out, doub
 ; GCN: v_frexp_exp_i32_f64_e64 {{v[0-9]+}}, -|{{s\[[0-9]+:[0-9]+\]}}|
 define amdgpu_kernel void @s_test_fneg_fabs_frexp_exp_f64(ptr addrspace(1) %out, double %src) #1 {
   %fabs.src = call double @llvm.fabs.f64(double %src)
-  %fneg.fabs.src = fsub double -0.0, %fabs.src
+  %fneg.fabs.src = fneg double %fabs.src
   %frexp.exp = call i32 @llvm.amdgcn.frexp.exp.i32.f64(double %fneg.fabs.src)
   store i32 %frexp.exp, ptr addrspace(1) %out
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.ll
index 0bc50b8a1d94a..a9b5ef2e8df59 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.ll
@@ -27,7 +27,7 @@ define amdgpu_kernel void @s_test_fabs_frexp_mant_f32(ptr addrspace(1) %out, flo
 ; GCN: v_frexp_mant_f32_e64 {{v[0-9]+}}, -|{{s[0-9]+}}|
 define amdgpu_kernel void @s_test_fneg_fabs_frexp_mant_f32(ptr addrspace(1) %out, float %src) #1 {
   %fabs.src = call float @llvm.fabs.f32(float %src)
-  %fneg.fabs.src = fsub float -0.0, %fabs.src
+  %fneg.fabs.src = fneg float %fabs.src
   %frexp.mant = call float @llvm.amdgcn.frexp.mant.f32(float %fneg.fabs.src)
   store float %frexp.mant, ptr addrspace(1) %out
   ret void
@@ -54,7 +54,7 @@ define amdgpu_kernel void @s_test_fabs_frexp_mant_f64(ptr addrspace(1) %out, dou
 ; GCN: v_frexp_mant_f64_e64 {{v\[[0-9]+:[0-9]+\]}}, -|{{s\[[0-9]+:[0-9]+\]}}|
 define amdgpu_kernel void @s_test_fneg_fabs_frexp_mant_f64(ptr addrspace(1) %out, double %src) #1 {
   %fabs.src = call double @llvm.fabs.f64(double %src)
-  %fneg.fabs.src = fsub double -0.0, %fabs.src
+  %fneg.fabs.src = fneg double %fabs.src
   %frexp.mant = call double @llvm.amdgcn.frexp.mant.f64(double %fneg.fabs.src)
   store double %frexp.mant, ptr addrspace(1) %out
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/rsq.ll b/llvm/test/CodeGen/AMDGPU/rsq.ll
index 5f4a888e1a0ee..a8ae0b9920f32 100644
--- a/llvm/test/CodeGen/AMDGPU/rsq.ll
+++ b/llvm/test/CodeGen/AMDGPU/rsq.ll
@@ -144,7 +144,7 @@ define amdgpu_kernel void @neg_rsq_neg_f32(ptr addrspace(1) noalias %out, ptr ad
 ; SI-UNSAFE: v_fma_f64
 define amdgpu_kernel void @neg_rsq_neg_f64(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) #0 {
   %val = load double, ptr addrspace(1) %in, align 4
-  %val.fneg = fsub double -0.0, %val
+  %val.fneg = fneg double %val
   %sqrt = call double @llvm.sqrt.f64(double %val.fneg)
   %div = fdiv double -1.0, %sqrt
   store double %div, ptr addrspace(1) %out, align 4


        


More information about the llvm-commits mailing list