[llvm] r334142 - [AMDGPU] Improve reciprocal handling

Stanislav Mekhanoshin via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 6 15:22:32 PDT 2018


Author: rampitec
Date: Wed Jun  6 15:22:32 2018
New Revision: 334142

URL: http://llvm.org/viewvc/llvm-project?rev=334142&view=rev
Log:
[AMDGPU] Improve reciprocal handling

When denormals are supported we are producing a full division for
1.0f / x. That still can be replaced by the faster version:

    bool c = fabs(x) > 0x1.0p+96f;
    float s = c ? 0x1.0p-32f : 1.0f;
    x *= s;
    return s * v_rcp_f32(x)

in case if requested accuracy is 2.5ulp or less. The same version
is used if denormals are not supported for non 1.0 numerators, where
just v_rcp_f32 is then used for 1.0 numerator.

The optimization of 1/x is extended to the case -1/x, which is the
same except for the resulting sign bit.

OpenCL conformance passed with both enabled and disabled denorms.

Differential Revision: https://reviews.llvm.org/D47805

Added:
    llvm/trunk/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp?rev=334142&r1=334141&r2=334142&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp Wed Jun  6 15:22:32 2018
@@ -372,13 +372,18 @@ bool AMDGPUCodeGenPrepare::promoteUnifor
   return true;
 }
 
-static bool shouldKeepFDivF32(Value *Num, bool UnsafeDiv) {
+static bool shouldKeepFDivF32(Value *Num, bool UnsafeDiv, bool HasDenormals) {
   const ConstantFP *CNum = dyn_cast<ConstantFP>(Num);
   if (!CNum)
-    return false;
+    return HasDenormals;
+
+  if (UnsafeDiv)
+    return true;
+
+  bool IsOne = CNum->isExactlyValue(+1.0) || CNum->isExactlyValue(-1.0);
 
   // Reciprocal f32 is handled separately without denormals.
-  return UnsafeDiv || CNum->isExactlyValue(+1.0);
+  return HasDenormals ^ IsOne;
 }
 
 // Insert an intrinsic for fast fdiv for safe math situations where we can
@@ -404,7 +409,7 @@ bool AMDGPUCodeGenPrepare::visitFDiv(Bin
                                       FMF.allowReciprocal();
 
   // With UnsafeDiv node will be optimized to just rcp and mul.
-  if (ST->hasFP32Denormals() || UnsafeDiv)
+  if (UnsafeDiv)
     return false;
 
   IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()), FPMath);
@@ -418,6 +423,7 @@ bool AMDGPUCodeGenPrepare::visitFDiv(Bin
 
   Value *NewFDiv = nullptr;
 
+  bool HasDenormals = ST->hasFP32Denormals();
   if (VectorType *VT = dyn_cast<VectorType>(Ty)) {
     NewFDiv = UndefValue::get(VT);
 
@@ -428,7 +434,7 @@ bool AMDGPUCodeGenPrepare::visitFDiv(Bin
       Value *DenEltI = Builder.CreateExtractElement(Den, I);
       Value *NewElt;
 
-      if (shouldKeepFDivF32(NumEltI, UnsafeDiv)) {
+      if (shouldKeepFDivF32(NumEltI, UnsafeDiv, HasDenormals)) {
         NewElt = Builder.CreateFDiv(NumEltI, DenEltI);
       } else {
         NewElt = Builder.CreateCall(Decl, { NumEltI, DenEltI });
@@ -437,7 +443,7 @@ bool AMDGPUCodeGenPrepare::visitFDiv(Bin
       NewFDiv = Builder.CreateInsertElement(NewFDiv, NewElt, I);
     }
   } else {
-    if (!shouldKeepFDivF32(Num, UnsafeDiv))
+    if (!shouldKeepFDivF32(Num, UnsafeDiv, HasDenormals))
       NewFDiv = Builder.CreateCall(Decl, { Num, Den });
   }
 
@@ -447,7 +453,7 @@ bool AMDGPUCodeGenPrepare::visitFDiv(Bin
     FDiv.eraseFromParent();
   }
 
-  return true;
+  return !!NewFDiv;
 }
 
 static bool hasUnsafeFPMath(const Function &F) {

Added: llvm/trunk/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll?rev=334142&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/fdiv32-to-rcp-folding.ll Wed Jun  6 15:22:32 2018
@@ -0,0 +1,459 @@
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=+fp32-denormals < %s | FileCheck --check-prefixes=GCN,GCN-DENORM %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-fp32-denormals < %s | FileCheck --check-prefixes=GCN,GCN-FLUSH %s
+
+; GCN-LABEL: {{^}}div_1_by_x_25ulp:
+; GCN-DENORM-DAG: v_mov_b32_e32 [[L:v[0-9]+]], 0x6f800000
+; GCN-DENORM-DAG: v_mov_b32_e32 [[S:v[0-9]+]], 0x2f800000
+; GCN-DAG:        s_load_dword [[VAL:s[0-9]+]], s[{{[0-9:]+}}], 0x0{{$}}
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |[[VAL]]|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 [[SCALE:v[0-9]+]], 1.0, [[S]], vcc
+; GCN-DENORM:     v_mul_f32_e32 [[PRESCALED:v[0-9]+]], [[VAL]], [[SCALE]]
+; GCN-DENORM:     v_rcp_f32_e32 [[RCP:v[0-9]+]], [[PRESCALED]]
+; GCN-DENORM:     v_mul_f32_e32 [[OUT:v[0-9]+]], [[SCALE]], [[RCP]]
+
+; GCN-FLUSH:      v_rcp_f32_e32 [[OUT:v[0-9]+]], [[VAL]]
+
+; GCN:            global_store_dword v[{{[0-9:]+}}], [[OUT]], off
+define amdgpu_kernel void @div_1_by_x_25ulp(float addrspace(1)* %arg) {
+  %load = load float, float addrspace(1)* %arg, align 4
+  %div = fdiv float 1.000000e+00, %load, !fpmath !0
+  store float %div, float addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_minus_1_by_x_25ulp:
+; GCN-DENORM-DAG: v_mov_b32_e32 [[L:v[0-9]+]], 0x6f800000
+; GCN-DENORM-DAG: v_mov_b32_e32 [[S:v[0-9]+]], 0x2f800000
+; GCN-DAG:        s_load_dword [[VAL:s[0-9]+]], s[{{[0-9:]+}}], 0x0{{$}}
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |[[VAL]]|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 [[SCALE:v[0-9]+]], 1.0, [[S]], vcc
+; GCN-DENORM:     v_mul_f32_e64 [[PRESCALED:v[0-9]+]], [[VAL]], -[[SCALE]]
+; GCN-DENORM:     v_rcp_f32_e32 [[RCP:v[0-9]+]], [[PRESCALED]]
+; GCN-DENORM:     v_mul_f32_e32 [[OUT:v[0-9]+]], [[SCALE]], [[RCP]]
+
+; GCN-FLUSH:      v_rcp_f32_e64 [[OUT:v[0-9]+]], -[[VAL]]
+
+; GCN:            global_store_dword v[{{[0-9:]+}}], [[OUT]], off
+define amdgpu_kernel void @div_minus_1_by_x_25ulp(float addrspace(1)* %arg) {
+  %load = load float, float addrspace(1)* %arg, align 4
+  %div = fdiv float -1.000000e+00, %load, !fpmath !0
+  store float %div, float addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_1_by_minus_x_25ulp:
+; GCN-DENORM-DAG: v_mov_b32_e32 [[L:v[0-9]+]], 0x6f800000
+; GCN-DENORM-DAG: v_mov_b32_e32 [[S:v[0-9]+]], 0x2f800000
+; GCN-DAG:        s_load_dword [[VAL:s[0-9]+]], s[{{[0-9:]+}}], 0x0{{$}}
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |[[VAL]]|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 [[SCALE:v[0-9]+]], 1.0, [[S]], vcc
+; GCN-DENORM:     v_mul_f32_e64 [[PRESCALED:v[0-9]+]], -[[VAL]], [[SCALE]]
+; GCN-DENORM:     v_rcp_f32_e32 [[RCP:v[0-9]+]], [[PRESCALED]]
+; GCN-DENORM:     v_mul_f32_e32 [[OUT:v[0-9]+]], [[SCALE]], [[RCP]]
+
+; GCN-FLUSH:      v_rcp_f32_e64 [[OUT:v[0-9]+]], -[[VAL]]
+
+; GCN:            global_store_dword v[{{[0-9:]+}}], [[OUT]], off
+define amdgpu_kernel void @div_1_by_minus_x_25ulp(float addrspace(1)* %arg) {
+  %load = load float, float addrspace(1)* %arg, align 4
+  %neg = fsub float -0.000000e+00, %load
+  %div = fdiv float 1.000000e+00, %neg, !fpmath !0
+  store float %div, float addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_minus_1_by_minus_x_25ulp:
+; GCN-DENORM-DAG: v_mov_b32_e32 [[L:v[0-9]+]], 0x6f800000
+; GCN-DENORM-DAG: v_mov_b32_e32 [[S:v[0-9]+]], 0x2f800000
+; GCN-DAG:        s_load_dword [[VAL:s[0-9]+]], s[{{[0-9:]+}}], 0x0{{$}}
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |[[VAL]]|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 [[SCALE:v[0-9]+]], 1.0, [[S]], vcc
+; GCN-DENORM:     v_mul_f32_e32 [[PRESCALED:v[0-9]+]], [[VAL]], [[SCALE]]
+; GCN-DENORM:     v_rcp_f32_e32 [[RCP:v[0-9]+]], [[PRESCALED]]
+; GCN-DENORM:     v_mul_f32_e32 [[OUT:v[0-9]+]], [[SCALE]], [[RCP]]
+
+; GCN-FLUSH:      v_rcp_f32_e32 [[OUT:v[0-9]+]], [[VAL]]
+
+; GCN:            global_store_dword v[{{[0-9:]+}}], [[OUT]], off
+define amdgpu_kernel void @div_minus_1_by_minus_x_25ulp(float addrspace(1)* %arg) {
+  %load = load float, float addrspace(1)* %arg, align 4
+  %neg = fsub float -0.000000e+00, %load
+  %div = fdiv float -1.000000e+00, %neg, !fpmath !0
+  store float %div, float addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_v4_1_by_x_25ulp:
+; GCN-DAG:        s_load_dwordx4 s{{\[}}[[VAL0:[0-9]+]]:[[VAL3:[0-9]+]]], s[{{[0-9:]+}}], 0x0{{$}}
+; GCN-DENORM-DAG: v_mov_b32_e32 [[L:v[0-9]+]], 0x6f800000
+; GCN-DENORM-DAG: v_mov_b32_e32 [[S:v[0-9]+]], 0x2f800000
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+
+; GCN-FLUSH:      v_rcp_f32_e32 v[[OUT0:[0-9]+]], s[[VAL0]]
+; GCN-FLUSH:      v_rcp_f32_e32
+; GCN-FLUSH:      v_rcp_f32_e32
+; GCN-FLUSH:      v_rcp_f32_e32 v[[OUT3:[0-9]+]], s[[VAL3]]
+; GCN-FLUSH:      global_store_dwordx4 v[{{[0-9:]+}}], v{{\[}}[[OUT0]]:[[OUT3]]], off
+define amdgpu_kernel void @div_v4_1_by_x_25ulp(<4 x float> addrspace(1)* %arg) {
+  %load = load <4 x float>, <4 x float> addrspace(1)* %arg, align 16
+  %div = fdiv <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %load, !fpmath !0
+  store <4 x float> %div, <4 x float> addrspace(1)* %arg, align 16
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_v4_minus_1_by_x_25ulp:
+; GCN-DENORM-DAG: v_mov_b32_e32 [[L:v[0-9]+]], 0x6f800000
+; GCN-DENORM-DAG: v_mov_b32_e32 [[S:v[0-9]+]], 0x2f800000
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -v{{[0-9]+}}
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+
+; GCN-FLUSH:      v_rcp_f32_e64 v[[OUT0:[0-9]+]], -s[[VAL0]]
+; GCN-FLUSH:      v_rcp_f32_e64
+; GCN-FLUSH:      v_rcp_f32_e64
+; GCN-FLUSH:      v_rcp_f32_e64 v[[OUT3:[0-9]+]], -s[[VAL3]]
+define amdgpu_kernel void @div_v4_minus_1_by_x_25ulp(<4 x float> addrspace(1)* %arg) {
+  %load = load <4 x float>, <4 x float> addrspace(1)* %arg, align 16
+  %div = fdiv <4 x float> <float -1.000000e+00, float -1.000000e+00, float -1.000000e+00, float -1.000000e+00>, %load, !fpmath !0
+  store <4 x float> %div, <4 x float> addrspace(1)* %arg, align 16
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_v4_1_by_minus_x_25ulp:
+; GCN-DENORM-DAG: v_mov_b32_e32 [[L:v[0-9]+]], 0x6f800000
+; GCN-DENORM-DAG: v_mov_b32_e32 [[S:v[0-9]+]], 0x2f800000
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+
+; GCN-FLUSH:      v_rcp_f32_e64 v[[OUT0:[0-9]+]], -s[[VAL0]]
+; GCN-FLUSH:      v_rcp_f32_e64
+; GCN-FLUSH:      v_rcp_f32_e64
+; GCN-FLUSH:      v_rcp_f32_e64 v[[OUT3:[0-9]+]], -s[[VAL3]]
+; GCN-FLUSH:      global_store_dwordx4 v[{{[0-9:]+}}], v{{\[}}[[OUT0]]:[[OUT3]]], off
+define amdgpu_kernel void @div_v4_1_by_minus_x_25ulp(<4 x float> addrspace(1)* %arg) {
+  %load = load <4 x float>, <4 x float> addrspace(1)* %arg, align 16
+  %neg = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %load
+  %div = fdiv <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %neg, !fpmath !0
+  store <4 x float> %div, <4 x float> addrspace(1)* %arg, align 16
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_v4_minus_1_by_minus_x_25ulp:
+; GCN-DAG:        s_load_dwordx4 s{{\[}}[[VAL0:[0-9]+]]:[[VAL3:[0-9]+]]], s[{{[0-9:]+}}], 0x0{{$}}
+; GCN-DENORM-DAG: v_mov_b32_e32 [[L:v[0-9]+]], 0x6f800000
+; GCN-DENORM-DAG: v_mov_b32_e32 [[S:v[0-9]+]], 0x2f800000
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DENORM-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+; GCN-DENORM-DAG: v_mul_f32_e32
+
+; GCN-FLUSH:      v_rcp_f32_e32 v[[OUT0:[0-9]+]], s[[VAL0]]
+; GCN-FLUSH:      v_rcp_f32_e32
+; GCN-FLUSH:      v_rcp_f32_e32
+; GCN-FLUSH:      v_rcp_f32_e32 v[[OUT3:[0-9]+]], s[[VAL3]]
+; GCN-FLUSH:      global_store_dwordx4 v[{{[0-9:]+}}], v{{\[}}[[OUT0]]:[[OUT3]]], off
+define amdgpu_kernel void @div_v4_minus_1_by_minus_x_25ulp(<4 x float> addrspace(1)* %arg) {
+  %load = load <4 x float>, <4 x float> addrspace(1)* %arg, align 16
+  %neg = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %load
+  %div = fdiv <4 x float> <float -1.000000e+00, float -1.000000e+00, float -1.000000e+00, float -1.000000e+00>, %neg, !fpmath !0
+  store <4 x float> %div, <4 x float> addrspace(1)* %arg, align 16
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_v4_c_by_x_25ulp:
+; GCN-DAG:        v_mov_b32_e32 [[L:v[0-9]+]], 0x6f800000
+; GCN-DAG:        v_mov_b32_e32 [[S:v[0-9]+]], 0x2f800000
+; GCN-DENORM-DAG: v_div_scale_f32 {{.*}}, 2.0{{$}}
+; GCN-DENORM-DAG: v_div_scale_f32 {{.*}}, 2.0{{$}}
+; GCN-DENORM-DAG: v_div_scale_f32 {{.*}}, -2.0{{$}}
+; GCN-DENORM-DAG: v_div_scale_f32 {{.*}}, -2.0{{$}}
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+
+; GCN-DAG:        v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DAG:        v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DAG:        v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DAG:        v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, s{{[0-9]+}}, -v{{[0-9]+}}
+; GCN-DENORM-DAG: v_rcp_f32_e32 [[RCP1:v[0-9]+]], v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[RCP1]]
+; GCN-DENORM-DAG: v_rcp_f32_e32 [[RCP2:v[0-9]+]], v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[RCP2]]
+
+; GCN-DENORM-DAG: v_div_fmas_f32
+; GCN-DENORM-DAG: v_div_fmas_f32
+; GCN-DENORM-DAG: v_div_fixup_f32 {{.*}}, 2.0{{$}}
+; GCN-DENORM-DAG: v_div_fixup_f32 {{.*}}, -2.0{{$}}
+
+; GCN-FLUSH-DAG:  v_rcp_f32_e32
+; GCN-FLUSH-DAG:  v_rcp_f32_e64
+
+; GCN-NOT:        v_cmp_gt_f32_e64
+; GCN-NOT:        v_cndmask_b32_e32
+; GCN-FLUSH-NOT:  v_div
+
+; GCN:            global_store_dwordx4
+define amdgpu_kernel void @div_v4_c_by_x_25ulp(<4 x float> addrspace(1)* %arg) {
+  %load = load <4 x float>, <4 x float> addrspace(1)* %arg, align 16
+  %div = fdiv <4 x float> <float 2.000000e+00, float 1.000000e+00, float -1.000000e+00, float -2.000000e+00>, %load, !fpmath !0
+  store <4 x float> %div, <4 x float> addrspace(1)* %arg, align 16
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_v4_c_by_minus_x_25ulp:
+; GCN-DAG:        v_mov_b32_e32 [[L:v[0-9]+]], 0x6f800000
+; GCN-DAG:        v_mov_b32_e32 [[S:v[0-9]+]], 0x2f800000
+; GCN-DENORM-DAG: v_div_scale_f32 {{.*}}, -2.0{{$}}
+; GCN-DENORM-DAG: v_div_scale_f32 {{.*}}, -2.0{{$}}
+; GCN-DENORM-DAG: v_div_scale_f32 {{.*}}, -2.0{{$}}
+; GCN-DENORM-DAG: v_div_scale_f32 {{.*}}, -2.0{{$}}
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+
+; GCN-DAG:        v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DAG:        v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+; GCN-DAG:        v_cmp_gt_f32_e64 vcc, |s{{[0-9]+}}|, [[L]]
+; GCN-DAG:        v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, [[S]], vcc
+
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e64 v{{[0-9]+}}, -s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DENORM-DAG: v_rcp_f32_e32 [[RCP1:v[0-9]+]], v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[RCP1]]
+; GCN-DENORM-DAG: v_rcp_f32_e32 [[RCP2:v[0-9]+]], v{{[0-9]+}}
+; GCN-DENORM-DAG: v_mul_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, [[RCP2]]
+
+; GCN-DENORM-DAG: v_div_fmas_f32
+; GCN-DENORM-DAG: v_div_fmas_f32
+; GCN-DENORM-DAG: v_div_fixup_f32 {{.*}}, -2.0{{$}}
+; GCN-DENORM-DAG: v_div_fixup_f32 {{.*}}, -2.0{{$}}
+
+; GCN-FLUSH-DAG:  v_rcp_f32_e32
+; GCN-FLUSH-DAG:  v_rcp_f32_e64
+
+; GCN-NOT:        v_cmp_gt_f32_e64
+; GCN-NOT:        v_cndmask_b32_e32
+; GCN-FLUSH-NOT:  v_div
+
+; GCN:            global_store_dwordx4
+define amdgpu_kernel void @div_v4_c_by_minus_x_25ulp(<4 x float> addrspace(1)* %arg) {
+  %load = load <4 x float>, <4 x float> addrspace(1)* %arg, align 16
+  %neg = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %load
+  %div = fdiv <4 x float> <float 2.000000e+00, float 1.000000e+00, float -1.000000e+00, float -2.000000e+00>, %neg, !fpmath !0
+  store <4 x float> %div, <4 x float> addrspace(1)* %arg, align 16
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_v_by_x_25ulp:
+; GCN-DAG:        s_load_dword [[VAL:s[0-9]+]], s[{{[0-9:]+}}], 0x0{{$}}
+
+; GCN-DENORM-DAG: v_div_scale_f32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_div_scale_f32
+; GCN-DENORM:     v_div_fmas_f32
+; GCN-DENORM:     v_div_fixup_f32 [[OUT:v[0-9]+]],
+
+; GCN-FLUSF-DAG:  v_mov_b32_e32 [[L:v[0-9]+]], 0x6f800000
+; GCN-FLUSH-DAG:  v_mov_b32_e32 [[S:v[0-9]+]], 0x2f800000
+; GCN-FLUSH-DAG:  v_cmp_gt_f32_e64 vcc, |[[VAL]]|, [[L]]
+; GCN-FLUSH-DAG:  v_cndmask_b32_e32 [[SCALE:v[0-9]+]], 1.0, [[S]], vcc
+; GCN-FLUSH:      v_mul_f32_e32 [[PRESCALED:v[0-9]+]], [[VAL]], [[SCALE]]
+; GCN-FLUSH:      v_rcp_f32_e32 [[RCP:v[0-9]+]], [[PRESCALED]]
+; GCN-FLUSH:      v_mul_f32_e32 [[OUT:v[0-9]+]], [[SCALE]], [[RCP]]
+
+; GCN:            global_store_dword v[{{[0-9:]+}}], [[OUT]], off
+define amdgpu_kernel void @div_v_by_x_25ulp(float addrspace(1)* %arg, float %num) {
+  %load = load float, float addrspace(1)* %arg, align 4
+  %div = fdiv float %num, %load, !fpmath !0
+  store float %div, float addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_1_by_x_fast:
+; GCN: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x0
+; GCN: v_rcp_f32_e32 [[RCP:v[0-9]+]], [[VAL]]
+; GCN: global_store_dword v[{{[0-9:]+}}], [[RCP]], off
+define amdgpu_kernel void @div_1_by_x_fast(float addrspace(1)* %arg) {
+  %load = load float, float addrspace(1)* %arg, align 4
+  %div = fdiv fast float 1.000000e+00, %load
+  store float %div, float addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_minus_1_by_x_fast:
+; GCN: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x0
+; GCN: v_rcp_f32_e64 [[RCP:v[0-9]+]], -[[VAL]]
+; GCN: global_store_dword v[{{[0-9:]+}}], [[RCP]], off
+define amdgpu_kernel void @div_minus_1_by_x_fast(float addrspace(1)* %arg) {
+  %load = load float, float addrspace(1)* %arg, align 4
+  %div = fdiv fast float -1.000000e+00, %load
+  store float %div, float addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_1_by_minus_x_fast:
+; GCN: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x0
+; GCN: v_rcp_f32_e64 [[RCP:v[0-9]+]], -[[VAL]]
+; GCN: global_store_dword v[{{[0-9:]+}}], [[RCP]], off
+define amdgpu_kernel void @div_1_by_minus_x_fast(float addrspace(1)* %arg) {
+  %load = load float, float addrspace(1)* %arg, align 4
+  %neg = fsub float -0.000000e+00, %load
+  %div = fdiv fast float 1.000000e+00, %neg
+  store float %div, float addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_minus_1_by_minus_x_fast:
+; GCN: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x0
+; GCN: v_rcp_f32_e32 [[RCP:v[0-9]+]], [[VAL]]
+; GCN: global_store_dword v[{{[0-9:]+}}], [[RCP]], off
+define amdgpu_kernel void @div_minus_1_by_minus_x_fast(float addrspace(1)* %arg) {
+  %load = load float, float addrspace(1)* %arg, align 4
+  %neg = fsub float -0.000000e+00, %load
+  %div = fdiv fast float -1.000000e+00, %neg
+  store float %div, float addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_1_by_x_correctly_rounded:
+; GCN-DENORM-DAG: v_div_scale_f32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_div_scale_f32
+; GCN-DENORM:     v_div_fmas_f32
+; GCN-DENORM:     v_div_fixup_f32
+
+; GCN-FLUSH: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x0
+; GCN-FLUSH: v_rcp_f32_e32 [[RCP:v[0-9]+]], [[VAL]]
+; GCN-FLUSH: global_store_dword v[{{[0-9:]+}}], [[RCP]], off
+define amdgpu_kernel void @div_1_by_x_correctly_rounded(float addrspace(1)* %arg) {
+  %load = load float, float addrspace(1)* %arg, align 4
+  %div = fdiv float 1.000000e+00, %load
+  store float %div, float addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_minus_1_by_x_correctly_rounded:
+; GCN-DENORM-DAG: v_div_scale_f32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_div_scale_f32
+; GCN-DENORM:     v_div_fmas_f32
+; GCN-DENORM:     v_div_fixup_f32
+
+; GCN-FLUSH: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x0
+; GCN-FLUSH: v_rcp_f32_e64 [[RCP:v[0-9]+]], -[[VAL]]
+; GCN-FLUSH: global_store_dword v[{{[0-9:]+}}], [[RCP]], off
+define amdgpu_kernel void @div_minus_1_by_x_correctly_rounded(float addrspace(1)* %arg) {
+  %load = load float, float addrspace(1)* %arg, align 4
+  %div = fdiv float -1.000000e+00, %load
+  store float %div, float addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_1_by_minus_x_correctly_rounded:
+; GCN-DENORM-DAG: v_div_scale_f32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_div_scale_f32
+; GCN-DENORM:     v_div_fmas_f32
+; GCN-DENORM:     v_div_fixup_f32
+
+; GCN-FLUSH: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x0
+; GCN-FLUSH: v_rcp_f32_e64 [[RCP:v[0-9]+]], -[[VAL]]
+; GCN-FLUSH: global_store_dword v[{{[0-9:]+}}], [[RCP]], off
+define amdgpu_kernel void @div_1_by_minus_x_correctly_rounded(float addrspace(1)* %arg) {
+  %load = load float, float addrspace(1)* %arg, align 4
+  %neg = fsub float -0.000000e+00, %load
+  %div = fdiv float 1.000000e+00, %neg
+  store float %div, float addrspace(1)* %arg, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}div_minus_1_by_minus_x_correctly_rounded:
+; GCN-DENORM-DAG: v_div_scale_f32
+; GCN-DENORM-DAG: v_rcp_f32_e32
+; GCN-DENORM-DAG: v_div_scale_f32
+; GCN-DENORM:     v_div_fmas_f32
+; GCN-DENORM:     v_div_fixup_f32
+
+; GCN-FLUSH: s_load_dword [[VAL:s[0-9]+]], s[0:1], 0x0
+; GCN-FLUSH: v_rcp_f32_e32 [[RCP:v[0-9]+]], [[VAL]]
+; GCN-FLUSH: global_store_dword v[{{[0-9:]+}}], [[RCP]], off
+define amdgpu_kernel void @div_minus_1_by_minus_x_correctly_rounded(float addrspace(1)* %arg) {
+  %load = load float, float addrspace(1)* %arg, align 4
+  %neg = fsub float -0.000000e+00, %load
+  %div = fdiv float -1.000000e+00, %neg
+  store float %div, float addrspace(1)* %arg, align 4
+  ret void
+}
+
+!0 = !{float 2.500000e+00}




More information about the llvm-commits mailing list