[llvm] 58578f7 - [AMDGPU] Implemented fma cost analysis

Stanislav Mekhanoshin via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 19 00:05:48 PST 2019


Author: Stanislav Mekhanoshin
Date: 2019-12-18T23:54:20-08:00
New Revision: 58578f705663a9f31b906a341f0a61ce51f7dcb2

URL: https://github.com/llvm/llvm-project/commit/58578f705663a9f31b906a341f0a61ce51f7dcb2
DIFF: https://github.com/llvm/llvm-project/commit/58578f705663a9f31b906a341f0a61ce51f7dcb2.diff

LOG: [AMDGPU] Implemented fma cost analysis

Differential Revision: https://reviews.llvm.org/D71676

Added: 
    llvm/test/Analysis/CostModel/AMDGPU/fma.ll

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
    llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index e08dd058402e..c4eeb81c5133 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -463,6 +463,49 @@ int GCNTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
                                        Opd1PropInfo, Opd2PropInfo);
 }
 
+template <typename T>
+int GCNTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+                                      ArrayRef<T *> Args,
+                                      FastMathFlags FMF, unsigned VF) {
+  if (ID != Intrinsic::fma)
+    return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
+
+  EVT OrigTy = TLI->getValueType(DL, RetTy);
+  if (!OrigTy.isSimple()) {
+    return BaseT::getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
+  }
+
+  // Legalize the type.
+  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
+
+  unsigned NElts = LT.second.isVector() ?
+    LT.second.getVectorNumElements() : 1;
+
+  MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
+
+  if (SLT == MVT::f64)
+    return LT.first * NElts * get64BitInstrCost();
+
+  if (ST->has16BitInsts() && SLT == MVT::f16)
+    NElts = (NElts + 1) / 2;
+
+  return LT.first * NElts * (ST->hasFastFMAF32() ? getHalfRateInstrCost()
+                                                 : getQuarterRateInstrCost());
+}
+
+int GCNTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+                                      ArrayRef<Value*> Args, FastMathFlags FMF,
+                                      unsigned VF) {
+  return getIntrinsicInstrCost<Value>(ID, RetTy, Args, FMF, VF);
+}
+
+int GCNTTIImpl::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+                                      ArrayRef<Type *> Tys, FastMathFlags FMF,
+                                      unsigned ScalarizationCostPassed) {
+  return getIntrinsicInstrCost<Type>(ID, RetTy, Tys, FMF,
+                                     ScalarizationCostPassed);
+}
+
 unsigned GCNTTIImpl::getCFInstrCost(unsigned Opcode) {
   // XXX - For some reason this isn't called for switch.
   switch (Opcode) {

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
index 1dba8bbde66f..0b48f9f602b7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
@@ -214,6 +214,16 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
   int getArithmeticReductionCost(unsigned Opcode,
                                  Type *Ty,
                                  bool IsPairwise);
+  template <typename T>
+  int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+                            ArrayRef<T *> Args, FastMathFlags FMF,
+                            unsigned VF);
+  int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+                            ArrayRef<Type *> Tys, FastMathFlags FMF,
+                            unsigned ScalarizationCostPassed = UINT_MAX);
+  int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+                            ArrayRef<Value *> Args, FastMathFlags FMF,
+                            unsigned VF = 1);
   int getMinMaxReductionCost(Type *Ty, Type *CondTy,
                              bool IsPairwiseForm,
                              bool IsUnsigned);

diff  --git a/llvm/test/Analysis/CostModel/AMDGPU/fma.ll b/llvm/test/Analysis/CostModel/AMDGPU/fma.ll
new file mode 100644
index 000000000000..462163d2f03e
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/AMDGPU/fma.ll
@@ -0,0 +1,120 @@
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900  -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=FASTF64,FAST32,FASTF16,ALL %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=SLOWF64,SLOW32,SLOWF16,ALL %s
+; RUN: opt -cost-model -cost-kind=code-size -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=gfx900 -mattr=+half-rate-64-ops < %s | FileCheck -check-prefixes=FASTF64,FAST32,FASTF16,ALL %s
+; RUN: opt -cost-model -cost-kind=code-size -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefixes=SLOWF64,SLOW32,SLOWF16,ALL %s
+
+; ALL-LABEL: 'fma_f32'
+; SLOW32: estimated cost of 3 for {{.*}} call float @llvm.fma.f32
+; FAST32: estimated cost of 2 for {{.*}} call float @llvm.fma.f32
+define amdgpu_kernel void @fma_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr) #0 {
+  %vec = load float, float addrspace(1)* %vaddr
+  %fma = call float @llvm.fma.f32(float %vec, float %vec, float %vec) #1
+  store float %fma, float addrspace(1)* %out
+  ret void
+}
+
+; ALL-LABEL: 'fma_v2f32'
+; SLOW32: estimated cost of 6 for {{.*}} call <2 x float> @llvm.fma.v2f32
+; FAST32: estimated cost of 4 for {{.*}} call <2 x float> @llvm.fma.v2f32
+define amdgpu_kernel void @fma_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr) #0 {
+  %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
+  %fma = call <2 x float> @llvm.fma.v2f32(<2 x float> %vec, <2 x float> %vec, <2 x float> %vec) #1
+  store <2 x float> %fma, <2 x float> addrspace(1)* %out
+  ret void
+}
+
+; ALL-LABEL: 'fma_v3f32'
+; SLOW32: estimated cost of 9 for {{.*}} call <3 x float> @llvm.fma.v3f32
+; FAST32: estimated cost of 6 for {{.*}} call <3 x float> @llvm.fma.v3f32
+define amdgpu_kernel void @fma_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr) #0 {
+  %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
+  %fma = call <3 x float> @llvm.fma.v3f32(<3 x float> %vec, <3 x float> %vec, <3 x float> %vec) #1
+  store <3 x float> %fma, <3 x float> addrspace(1)* %out
+  ret void
+}
+
+; ALL-LABEL: 'fma_v5f32'
+; SLOW32: estimated cost of 15 for {{.*}} call <5 x float> @llvm.fma.v5f32
+; FAST32: estimated cost of 10 for {{.*}} call <5 x float> @llvm.fma.v5f32
+define amdgpu_kernel void @fma_v5f32(<5 x float> addrspace(1)* %out, <5 x float> addrspace(1)* %vaddr) #0 {
+  %vec = load <5 x float>, <5 x float> addrspace(1)* %vaddr
+  %fma = call <5 x float> @llvm.fma.v5f32(<5 x float> %vec, <5 x float> %vec, <5 x float> %vec) #1
+  store <5 x float> %fma, <5 x float> addrspace(1)* %out
+  ret void
+}
+
+; ALL-LABEL: 'fma_f64'
+; SLOW64: estimated cost of 3 for {{.*}} call double @llvm.fma.f64
+; FAST64: estimated cost of 2 for {{.*}} call double @llvm.fma.f64
+define amdgpu_kernel void @fma_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr) #0 {
+  %vec = load double, double addrspace(1)* %vaddr
+  %fma = call double @llvm.fma.f64(double %vec, double %vec, double %vec) #1
+  store double %fma, double addrspace(1)* %out
+  ret void
+}
+
+; ALL-LABEL: 'fma_v2f64'
+; SLOW64: estimated cost of 6 for {{.*}} call <2 x double> @llvm.fma.v2f64
+; FAST64: estimated cost of 4 for {{.*}} call <2 x double> @llvm.fma.v2f64
+define amdgpu_kernel void @fma_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr) #0 {
+  %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
+  %fma = call <2 x double> @llvm.fma.v2f64(<2 x double> %vec, <2 x double> %vec, <2 x double> %vec) #1
+  store <2 x double> %fma, <2 x double> addrspace(1)* %out
+  ret void
+}
+
+; ALL-LABEL: 'fma_v3f64'
+; SLOW64: estimated cost of 9 for {{.*}} call <3 x double> @llvm.fma.v3f64
+; FAST64: estimated cost of 6 for {{.*}} call <3 x double> @llvm.fma.v3f64
+define amdgpu_kernel void @fma_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr) #0 {
+  %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
+  %fma = call <3 x double> @llvm.fma.v3f64(<3 x double> %vec, <3 x double> %vec, <3 x double> %vec) #1
+  store <3 x double> %fma, <3 x double> addrspace(1)* %out
+  ret void
+}
+
+; ALL-LABEL: 'fma_f16'
+; SLOW16: estimated cost of 3 for {{.*}} call half @llvm.fma.f16
+; FAST16: estimated cost of 2 for {{.*}} call half @llvm.fma.f16
+define amdgpu_kernel void @fma_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr) #0 {
+  %vec = load half, half addrspace(1)* %vaddr
+  %fma = call half @llvm.fma.f16(half %vec, half %vec, half %vec) #1
+  store half %fma, half addrspace(1)* %out
+  ret void
+}
+
+; ALL-LABEL: 'fma_v2f16'
+; SLOW16: estimated cost of 6 for {{.*}} call <2 x half> @llvm.fma.v2f16
+; FAST16: estimated cost of 2 for {{.*}} call <2 x half> @llvm.fma.v2f16
+define amdgpu_kernel void @fma_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr) #0 {
+  %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
+  %fma = call <2 x half> @llvm.fma.v2f16(<2 x half> %vec, <2 x half> %vec, <2 x half> %vec) #1
+  store <2 x half> %fma, <2 x half> addrspace(1)* %out
+  ret void
+}
+
+; ALL-LABEL: 'fma_v3f16'
+; SLOW16: estimated cost of 12 for {{.*}} call <3 x half> @llvm.fma.v3f16
+; FAST16: estimated cost of 4 for {{.*}} call <3 x half> @llvm.fma.v3f16
+define amdgpu_kernel void @fma_v3f16(<3 x half> addrspace(1)* %out, <3 x half> addrspace(1)* %vaddr) #0 {
+  %vec = load <3 x half>, <3 x half> addrspace(1)* %vaddr
+  %fma = call <3 x half> @llvm.fma.v3f16(<3 x half> %vec, <3 x half> %vec, <3 x half> %vec) #1
+  store <3 x half> %fma, <3 x half> addrspace(1)* %out
+  ret void
+}
+
+declare float @llvm.fma.f32(float, float, float) #1
+declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) #1
+declare <3 x float> @llvm.fma.v3f32(<3 x float>, <3 x float>, <3 x float>) #1
+declare <5 x float> @llvm.fma.v5f32(<5 x float>, <5 x float>, <5 x float>) #1
+
+declare double @llvm.fma.f64(double, double, double) #1
+declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #1
+declare <3 x double> @llvm.fma.v3f64(<3 x double>, <3 x double>, <3 x double>) #1
+
+declare half @llvm.fma.f16(half, half, half) #1
+declare <2 x half> @llvm.fma.v2f16(<2 x half>, <2 x half>, <2 x half>) #1
+declare <3 x half> @llvm.fma.v3f16(<3 x half>, <3 x half>, <3 x half>) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }


        


More information about the llvm-commits mailing list