[llvm] r277535 - AMDGPU: fdiv -1, x -> rcp -x

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 2 15:25:04 PDT 2016


Author: arsenm
Date: Tue Aug  2 17:25:04 2016
New Revision: 277535

URL: http://llvm.org/viewvc/llvm-project?rev=277535&view=rev
Log:
AMDGPU: fdiv -1, x -> rcp -x

Modified:
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/trunk/test/CodeGen/AMDGPU/rcp-pattern.ll
    llvm/trunk/test/CodeGen/AMDGPU/rsq.ll

Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=277535&r1=277534&r2=277535&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Tue Aug  2 17:25:04 2016
@@ -2464,22 +2464,31 @@ SDValue SITargetLowering::lowerFastUnsaf
   bool Unsafe = DAG.getTarget().Options.UnsafeFPMath;
 
   if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
-    if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals())) &&
-        CLHS->isExactlyValue(1.0)) {
-      // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
-      // the CI documentation has a worst case error of 1 ulp.
-      // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
-      // use it as long as we aren't trying to use denormals.
-
-      // 1.0 / sqrt(x) -> rsq(x)
-      //
-      // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
-      // error seems really high at 2^29 ULP.
-      if (RHS.getOpcode() == ISD::FSQRT)
-        return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
+    if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals()))) {
 
-      // 1.0 / x -> rcp(x)
-      return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
+      if (CLHS->isExactlyValue(1.0)) {
+        // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
+        // the CI documentation has a worst case error of 1 ulp.
+        // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
+        // use it as long as we aren't trying to use denormals.
+
+        // 1.0 / sqrt(x) -> rsq(x)
+        //
+        // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
+        // error seems really high at 2^29 ULP.
+        if (RHS.getOpcode() == ISD::FSQRT)
+          return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
+
+        // 1.0 / x -> rcp(x)
+        return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
+      }
+
+      // Same as for 1.0, but expand the sign out of the constant.
+      if (CLHS->isExactlyValue(-1.0)) {
+        // -1.0 / x -> rcp (fneg x)
+        SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
+        return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
+      }
     }
   }
 

Modified: llvm/trunk/test/CodeGen/AMDGPU/rcp-pattern.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/rcp-pattern.ll?rev=277535&r1=277534&r2=277535&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/rcp-pattern.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/rcp-pattern.ll Tue Aug  2 17:25:04 2016
@@ -76,8 +76,22 @@ define void @rcp_fabs_pat_f32(float addr
   ret void
 }
 
-; FIXME: fneg folded into constant 1
+; FUNC-LABEL: {{^}}neg_rcp_pat_f32:
+; GCN: s_load_dword [[SRC:s[0-9]+]]
+; GCN: v_rcp_f32_e64 [[RCP:v[0-9]+]], -[[SRC]]
+; GCN: buffer_store_dword [[RCP]]
+
+; EG: RECIP_IEEE
+define void @neg_rcp_pat_f32(float addrspace(1)* %out, float %src) #0 {
+  %rcp = fdiv float -1.0, %src
+  store float %rcp, float addrspace(1)* %out, align 4
+  ret void
+}
+
 ; FUNC-LABEL: {{^}}rcp_fabs_fneg_pat_f32:
+; GCN: s_load_dword [[SRC:s[0-9]+]]
+; GCN: v_rcp_f32_e64 [[RCP:v[0-9]+]], -|[[SRC]]|
+; GCN: buffer_store_dword [[RCP]]
 define void @rcp_fabs_fneg_pat_f32(float addrspace(1)* %out, float %src) #0 {
   %src.fabs = call float @llvm.fabs.f32(float %src)
   %src.fabs.fneg = fsub float -0.0, %src.fabs
@@ -86,8 +100,27 @@ define void @rcp_fabs_fneg_pat_f32(float
   ret void
 }
 
+; FUNC-LABEL: {{^}}rcp_fabs_fneg_pat_multi_use_f32:
+; GCN: s_load_dword [[SRC:s[0-9]+]]
+; GCN: v_rcp_f32_e64 [[RCP:v[0-9]+]], -|[[SRC]]|
+; GCN: buffer_store_dword [[RCP]]
+
+; GCN: v_mul_f32_e64 [[MUL:v[0-9]+]], [[SRC]], -|[[SRC]]|
+; GCN: buffer_store_dword [[MUL]]
+define void @rcp_fabs_fneg_pat_multi_use_f32(float addrspace(1)* %out, float %src) #0 {
+  %src.fabs = call float @llvm.fabs.f32(float %src)
+  %src.fabs.fneg = fsub float -0.0, %src.fabs
+  %rcp = fdiv float 1.0, %src.fabs.fneg
+  store volatile float %rcp, float addrspace(1)* %out, align 4
+
+  %other = fmul float %src, %src.fabs.fneg
+  store volatile float %other, float addrspace(1)* %out, align 4
+  ret void
+}
+
 
 declare float @llvm.fabs.f32(float) #1
+declare float @llvm.sqrt.f32(float) #1
 
 attributes #0 = { nounwind "unsafe-fp-math"="false" }
 attributes #1 = { nounwind readnone }

Modified: llvm/trunk/test/CodeGen/AMDGPU/rsq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/rsq.ll?rev=277535&r1=277534&r2=277535&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/rsq.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/rsq.ll Tue Aug  2 17:25:04 2016
@@ -72,3 +72,67 @@ define void @rsqrt_fmul(float addrspace(
   store float %z, float addrspace(1)* %out.gep
   ret void
 }
+
+; SI-LABEL: {{^}}neg_rsq_f32:
+; SI-SAFE: v_sqrt_f32_e32 [[SQRT:v[0-9]+]], v{{[0-9]+}}
+; SI-SAFE: v_rcp_f32_e64 [[RSQ:v[0-9]+]], -[[SQRT]]
+; SI-SAFE: buffer_store_dword [[RSQ]]
+
+; SI-UNSAFE: v_rsq_f32_e32 [[RSQ:v[0-9]+]], v{{[0-9]+}}
+; SI-UNSAFE: v_xor_b32_e32 [[NEG_RSQ:v[0-9]+]], 0x80000000, [[RSQ]]
+; SI-UNSAFE: buffer_store_dword [[NEG_RSQ]]
+define void @neg_rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+  %val = load float, float addrspace(1)* %in, align 4
+  %sqrt = call float @llvm.sqrt.f32(float %val)
+  %div = fdiv float -1.0, %sqrt
+  store float %div, float addrspace(1)* %out, align 4
+  ret void
+}
+
+; SI-LABEL: {{^}}neg_rsq_f64:
+; SI-SAFE: v_sqrt_f64_e32
+; SI-SAFE: v_div_scale_f64
+
+; SI-UNSAFE: v_sqrt_f64_e32 [[SQRT:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}
+; SI-UNSAFE: v_rcp_f64_e64 [[RCP:v\[[0-9]+:[0-9]+\]]], -[[SQRT]]
+; SI-UNSAFE: buffer_store_dwordx2 [[RCP]]
+define void @neg_rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
+  %val = load double, double addrspace(1)* %in, align 4
+  %sqrt = call double @llvm.sqrt.f64(double %val)
+  %div = fdiv double -1.0, %sqrt
+  store double %div, double addrspace(1)* %out, align 4
+  ret void
+}
+
+; SI-LABEL: {{^}}neg_rsq_neg_f32:
+; SI-SAFE: v_sqrt_f32_e64 [[SQRT:v[0-9]+]], -v{{[0-9]+}}
+; SI-SAFE: v_rcp_f32_e64 [[RSQ:v[0-9]+]], -[[SQRT]]
+; SI-SAFE: buffer_store_dword [[RSQ]]
+
+; SI-UNSAFE: v_rsq_f32_e64 [[RSQ:v[0-9]+]], -v{{[0-9]+}}
+; SI-UNSAFE: v_xor_b32_e32 [[NEG_RSQ:v[0-9]+]], 0x80000000, [[RSQ]]
+; SI-UNSAFE: buffer_store_dword [[NEG_RSQ]]
+define void @neg_rsq_neg_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
+  %val = load float, float addrspace(1)* %in, align 4
+  %val.fneg = fsub float -0.0, %val
+  %sqrt = call float @llvm.sqrt.f32(float %val.fneg)
+  %div = fdiv float -1.0, %sqrt
+  store float %div, float addrspace(1)* %out, align 4
+  ret void
+}
+
+; SI-LABEL: {{^}}neg_rsq_neg_f64:
+; SI-SAFE: v_sqrt_f64_e64 v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
+; SI-SAFE: v_div_scale_f64
+
+; SI-UNSAFE: v_sqrt_f64_e64 [[SQRT:v\[[0-9]+:[0-9]+\]]], -v{{\[[0-9]+:[0-9]+\]}}
+; SI-UNSAFE: v_rcp_f64_e64 [[RCP:v\[[0-9]+:[0-9]+\]]], -[[SQRT]]
+; SI-UNSAFE: buffer_store_dwordx2 [[RCP]]
+define void @neg_rsq_neg_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
+  %val = load double, double addrspace(1)* %in, align 4
+  %val.fneg = fsub double -0.0, %val
+  %sqrt = call double @llvm.sqrt.f64(double %val.fneg)
+  %div = fdiv double -1.0, %sqrt
+  store double %div, double addrspace(1)* %out, align 4
+  ret void
+}




More information about the llvm-commits mailing list