[llvm] r312213 - AMDGPU: Don't assert in TTI with fp32 denorms enabled

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 30 22:47:00 PDT 2017


Author: arsenm
Date: Wed Aug 30 22:47:00 2017
New Revision: 312213

URL: http://llvm.org/viewvc/llvm-project?rev=312213&view=rev
Log:
AMDGPU: Don't assert in TTI with fp32 denorms enabled

Also refine for f16 and rcp cases.

Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
    llvm/trunk/test/Analysis/CostModel/AMDGPU/fdiv.ll

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp?rev=312213&r1=312212&r2=312213&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp Wed Aug 30 22:47:00 2017
@@ -35,6 +35,7 @@
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/Module.h"
+#include "llvm/IR/PatternMatch.h"
 #include "llvm/IR/Type.h"
 #include "llvm/IR/Value.h"
 #include "llvm/MC/SubtargetFeature.h"
@@ -353,7 +354,6 @@ int AMDGPUTTIImpl::getArithmeticInstrCos
     // but the current lowering is also not entirely correct.
     if (SLT == MVT::f64) {
       int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
-
       // Add cost of workaround.
       if (ST->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
         Cost += 3 * getFullRateInstrCost();
@@ -361,10 +361,32 @@ int AMDGPUTTIImpl::getArithmeticInstrCos
       return LT.first * Cost * NElts;
     }
 
-    // Assuming no fp32 denormals lowering.
+    if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
+      // TODO: This is more complicated, unsafe flags etc.
+      if ((SLT == MVT::f32 && !ST->hasFP32Denormals()) ||
+          (SLT == MVT::f16 && ST->has16BitInsts())) {
+        return LT.first * getQuarterRateInstrCost() * NElts;
+      }
+    }
+
+    if (SLT == MVT::f16 && ST->has16BitInsts()) {
+      // 2 x v_cvt_f32_f16
+      // f32 rcp
+      // f32 fmul
+      // v_cvt_f16_f32
+      // f16 div_fixup
+      int Cost = 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost();
+      return LT.first * Cost * NElts;
+    }
+
     if (SLT == MVT::f32 || SLT == MVT::f16) {
-      assert(!ST->hasFP32Denormals() && "will change when supported");
       int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
+
+      if (!ST->hasFP32Denormals()) {
+        // FP mode switches.
+        Cost += 2 * getFullRateInstrCost();
+      }
+
       return LT.first * NElts * Cost;
     }
     break;

Modified: llvm/trunk/test/Analysis/CostModel/AMDGPU/fdiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/AMDGPU/fdiv.ll?rev=312213&r1=312212&r2=312213&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/AMDGPU/fdiv.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/AMDGPU/fdiv.ll Wed Aug 30 22:47:00 2017
@@ -1,10 +1,13 @@
-; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=hawaii -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=CIFASTF64 %s
-; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=CISLOWF64 %s
-; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=tahiti -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=SIFASTF64 %s
-; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=verde -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=SISLOWF64 %s
-
-; CHECK: 'fdiv_f32'
-; ALL: estimated cost of 10 for {{.*}} fdiv float
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=hawaii -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=ALL,CIFASTF64,NOFP32DENORM,NOFP16,NOFP16-NOFP32DENORM %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=ALL,CISLOWF64,NOFP32DENORM,NOFP16,NOFP16-NOFP32DENORM  %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=tahiti -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=ALL,SIFASTF64,NOFP32DENORM,NOFP16,NOFP16-NOFP32DENORM  %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=verde -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=ALL,SISLOWF64,NOFP32DENORM,NOFP16,NOFP16-NOFP32DENORM  %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=hawaii -mattr=+fp32-denormals < %s | FileCheck -check-prefixes=ALL,FP32DENORMS,SLOWFP32DENORMS,NOFP16,NOFP16-FP32DENORM %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -mattr=+fp32-denormals < %s | FileCheck -check-prefixes=ALL,FP32DENORMS,FASTFP32DENORMS,FP16 %s
+
+; ALL: 'fdiv_f32'
+; NOFP32DENORM: estimated cost of 12 for {{.*}} fdiv float
+; FP32DENORMS: estimated cost of 10 for {{.*}} fdiv float
 define amdgpu_kernel void @fdiv_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
   %vec = load float, float addrspace(1)* %vaddr
   %add = fdiv float %vec, %b
@@ -13,7 +16,8 @@ define amdgpu_kernel void @fdiv_f32(floa
 }
 
 ; ALL: 'fdiv_v2f32'
-; ALL: estimated cost of 20 for {{.*}} fdiv <2 x float>
+; NOFP32DENORM: estimated cost of 24 for {{.*}} fdiv <2 x float>
+; FP32DENORMS: estimated cost of 20 for {{.*}} fdiv <2 x float>
 define amdgpu_kernel void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
   %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
   %add = fdiv <2 x float> %vec, %b
@@ -22,7 +26,8 @@ define amdgpu_kernel void @fdiv_v2f32(<2
 }
 
 ; ALL: 'fdiv_v3f32'
-; ALL: estimated cost of 30 for {{.*}} fdiv <3 x float>
+; NOFP32DENORM: estimated cost of 36 for {{.*}} fdiv <3 x float>
+; FP32DENORMS: estimated cost of 30 for {{.*}} fdiv <3 x float>
 define amdgpu_kernel void @fdiv_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
   %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
   %add = fdiv <3 x float> %vec, %b
@@ -67,7 +72,9 @@ define amdgpu_kernel void @fdiv_v3f64(<3
 }
 
 ; ALL: 'fdiv_f16'
-; ALL: estimated cost of 10 for {{.*}} fdiv half
+; NOFP16-NOFP32DENORM: estimated cost of 12 for {{.*}} fdiv half
+; NOFP16-FP32DENORM: estimated cost of 10 for {{.*}} fdiv half
+; FP16: estimated cost of 10 for {{.*}} fdiv half
 define amdgpu_kernel void @fdiv_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
   %vec = load half, half addrspace(1)* %vaddr
   %add = fdiv half %vec, %b
@@ -76,7 +83,9 @@ define amdgpu_kernel void @fdiv_f16(half
 }
 
 ; ALL: 'fdiv_v2f16'
-; ALL: estimated cost of 20 for {{.*}} fdiv <2 x half>
+; NOFP16-NOFP32DENORM: estimated cost of 24 for {{.*}} fdiv <2 x half>
+; NOFP16-FP32DENORM: estimated cost of 20 for {{.*}} fdiv <2 x half>
+; FP16: estimated cost of 20 for {{.*}} fdiv <2 x half>
 define amdgpu_kernel void @fdiv_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
   %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
   %add = fdiv <2 x half> %vec, %b
@@ -85,7 +94,9 @@ define amdgpu_kernel void @fdiv_v2f16(<2
 }
 
 ; ALL: 'fdiv_v4f16'
-; ALL: estimated cost of 40 for {{.*}} fdiv <4 x half>
+; NOFP16-NOFP32DENORM: estimated cost of 48 for {{.*}} fdiv <4 x half>
+; NOFP16-FP32DENORM: estimated cost of 40 for {{.*}} fdiv <4 x half>
+; FP16: estimated cost of 40 for {{.*}} fdiv <4 x half>
 define amdgpu_kernel void @fdiv_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
   %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
   %add = fdiv <4 x half> %vec, %b
@@ -93,4 +104,60 @@ define amdgpu_kernel void @fdiv_v4f16(<4
   ret void
 }
 
+; ALL: 'rcp_f32'
+; NOFP32DENORM: estimated cost of 3 for {{.*}} fdiv float
+; SLOWFP32DENORMS: estimated cost of 10 for {{.*}} fdiv float
+; FASTFP32DENORMS: estimated cost of 10 for {{.*}} fdiv float
+define amdgpu_kernel void @rcp_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 {
+  %vec = load float, float addrspace(1)* %vaddr
+  %add = fdiv float 1.0, %vec
+  store float %add, float addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'rcp_f16'
+; NOFP16-NOFP32DENORM: estimated cost of 3 for {{.*}} fdiv half
+; NOFP16-FP32DENORM: estimated cost of 10 for {{.*}} fdiv half
+; FP16: estimated cost of 3 for {{.*}} fdiv half
+define amdgpu_kernel void @rcp_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 {
+  %vec = load half, half addrspace(1)* %vaddr
+  %add = fdiv half 1.0, %vec
+  store half %add, half addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'rcp_f64'
+; CIFASTF64: estimated cost of 29 for {{.*}} fdiv double
+; CISLOWF64: estimated cost of 33 for {{.*}} fdiv double
+; SIFASTF64: estimated cost of 32 for {{.*}} fdiv double
+; SISLOWF64: estimated cost of 36 for {{.*}} fdiv double
+define amdgpu_kernel void @rcp_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 {
+  %vec = load double, double addrspace(1)* %vaddr
+  %add = fdiv double 1.0, %vec
+  store double %add, double addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'rcp_v2f32'
+; NOFP32DENORM: estimated cost of 6 for {{.*}} fdiv <2 x float>
+; SLOWFP32DENORMS: estimated cost of 20 for {{.*}} fdiv <2 x float>
+; FASTFP32DENORMS: estimated cost of 20 for {{.*}} fdiv <2 x float>
+define amdgpu_kernel void @rcp_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 {
+  %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
+  %add = fdiv <2 x float> <float 1.0, float 1.0>, %vec
+  store <2 x float> %add, <2 x float> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'rcp_v2f16'
+; NOFP16-NOFP32DENORM: estimated cost of 6 for {{.*}} fdiv <2 x half>
+; NOFP16-FP32DENORM: estimated cost of 20 for {{.*}} fdiv <2 x half>
+; FP16: estimated cost of 6 for {{.*}} fdiv <2 x half>
+define amdgpu_kernel void @rcp_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr) #0 {
+  %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
+  %add = fdiv <2 x half> <half 1.0, half 1.0>, %vec
+  store <2 x half> %add, <2 x half> addrspace(1)* %out
+  ret void
+}
+
 attributes #0 = { nounwind }




More information about the llvm-commits mailing list