[llvm] r264374 - AMDGPU: Partially implement getArithmeticInstrCost for FP ops

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 24 18:00:32 PDT 2016


Author: arsenm
Date: Thu Mar 24 20:00:32 2016
New Revision: 264374

URL: http://llvm.org/viewvc/llvm-project?rev=264374&view=rev
Log:
AMDGPU: Partially implement getArithmeticInstrCost for FP ops

Added:
    llvm/trunk/test/Analysis/CostModel/AMDGPU/fadd.ll
    llvm/trunk/test/Analysis/CostModel/AMDGPU/fdiv.ll
    llvm/trunk/test/Analysis/CostModel/AMDGPU/fmul.ll
    llvm/trunk/test/Analysis/CostModel/AMDGPU/fsub.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp?rev=264374&r1=264373&r2=264374&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp Thu Mar 24 20:00:32 2016
@@ -29,6 +29,7 @@ using namespace llvm;
 
 #define DEBUG_TYPE "AMDGPUtti"
 
+
 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L,
                                             TTI::UnrollingPreferences &UP) {
   UP.Threshold = 300; // Twice the default.
@@ -84,6 +85,69 @@ unsigned AMDGPUTTIImpl::getMaxInterleave
   return 64;
 }
 
+int AMDGPUTTIImpl::getArithmeticInstrCost(
+    unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
+    TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
+    TTI::OperandValueProperties Opd2PropInfo) {
+
+  EVT OrigTy = TLI->getValueType(DL, Ty);
+  if (!OrigTy.isSimple()) {
+    return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
+                                         Opd1PropInfo, Opd2PropInfo);
+  }
+
+  // Legalize the type.
+  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
+  int ISD = TLI->InstructionOpcodeToISD(Opcode);
+
+  // Because we don't have any legal vector operations, but the legal types, we
+  // need to account for split vectors.
+  unsigned NElts = LT.second.isVector() ?
+    LT.second.getVectorNumElements() : 1;
+
+  MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
+
+  switch (ISD) {
+  case ISD::FADD:
+  case ISD::FSUB:
+  case ISD::FMUL:
+    if (SLT == MVT::f64)
+      return LT.first * NElts * get64BitInstrCost();
+
+    if (SLT == MVT::f32 || SLT == MVT::f16)
+      return LT.first * NElts * getFullRateInstrCost();
+    break;
+
+  case ISD::FDIV:
+  case ISD::FREM:
+    // FIXME: frem should be handled separately. The fdiv in it is most of it,
+    // but the current lowering is also not entirely correct.
+    if (SLT == MVT::f64) {
+      int Cost = 4 * get64BitInstrCost() + 7 * getQuarterRateInstrCost();
+
+      // Add cost of workaround.
+      if (ST->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
+        Cost += 3 * getFullRateInstrCost();
+
+      return LT.first * Cost * NElts;
+    }
+
+    // Assuming no fp32 denormals lowering.
+    if (SLT == MVT::f32 || SLT == MVT::f16) {
+      assert(!ST->hasFP32Denormals() && "will change when supported");
+      int Cost = 7 * getFullRateInstrCost() + 1 * getQuarterRateInstrCost();
+      return LT.first * NElts * Cost;
+    }
+
+    break;
+  default:
+    break;
+  }
+
+  return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
+                                       Opd1PropInfo, Opd2PropInfo);
+}
+
 unsigned AMDGPUTTIImpl::getCFInstrCost(unsigned Opcode) {
   // XXX - For some reason this isn't called for switch.
   switch (Opcode) {

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h?rev=264374&r1=264373&r2=264374&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h Thu Mar 24 20:00:32 2016
@@ -21,9 +21,9 @@
 #include "AMDGPUTargetMachine.h"
 #include "llvm/Analysis/TargetTransformInfo.h"
 #include "llvm/CodeGen/BasicTTIImpl.h"
-#include "llvm/Target/TargetLowering.h"
 
 namespace llvm {
+class AMDGPUTargetLowering;
 
 class AMDGPUTTIImpl final : public BasicTTIImplBase<AMDGPUTTIImpl> {
   typedef BasicTTIImplBase<AMDGPUTTIImpl> BaseT;
@@ -36,6 +36,28 @@ class AMDGPUTTIImpl final : public Basic
   const AMDGPUSubtarget *getST() const { return ST; }
   const AMDGPUTargetLowering *getTLI() const { return TLI; }
 
+
+  static inline int getFullRateInstrCost() {
+    return TargetTransformInfo::TCC_Basic;
+  }
+
+  static inline int getHalfRateInstrCost() {
+    return 2 * TargetTransformInfo::TCC_Basic;
+  }
+
+  // TODO: The size is usually 8 bytes, but takes 4x as many cycles. Maybe
+  // should be 2 or 4.
+  static inline int getQuarterRateInstrCost() {
+    return 3 * TargetTransformInfo::TCC_Basic;
+  }
+
+   // On some parts, normal fp64 operations are half rate, and others
+   // quarter. This also applies to some integer operations.
+  inline int get64BitInstrCost() const {
+    return ST->hasHalfRate64Ops() ?
+      getHalfRateInstrCost() : getQuarterRateInstrCost();
+  }
+
 public:
   explicit AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const DataLayout &DL)
       : BaseT(TM, DL), ST(TM->getSubtargetImpl()),
@@ -61,6 +83,13 @@ public:
   unsigned getRegisterBitWidth(bool Vector);
   unsigned getMaxInterleaveFactor(unsigned VF);
 
+  int getArithmeticInstrCost(
+    unsigned Opcode, Type *Ty,
+    TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
+    TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
+    TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
+    TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
+
   unsigned getCFInstrCost(unsigned Opcode);
 
   int getVectorInstrCost(unsigned Opcode, Type *ValTy, unsigned Index);

Added: llvm/trunk/test/Analysis/CostModel/AMDGPU/fadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/AMDGPU/fadd.ll?rev=264374&view=auto
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/AMDGPU/fadd.ll (added)
+++ llvm/trunk/test/Analysis/CostModel/AMDGPU/fadd.ll Thu Mar 24 20:00:32 2016
@@ -0,0 +1,88 @@
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FASTF64 -check-prefix=ALL %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=SLOWF64 -check-prefix=ALL %s
+
+; ALL: 'fadd_f32'
+; ALL: estimated cost of 1 for {{.*}} fadd float
+define void @fadd_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
+  %vec = load float, float addrspace(1)* %vaddr
+  %add = fadd float %vec, %b
+  store float %add, float addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fadd_v2f32'
+; ALL: estimated cost of 2 for {{.*}} fadd <2 x float>
+define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
+  %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
+  %add = fadd <2 x float> %vec, %b
+  store <2 x float> %add, <2 x float> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fadd_v3f32'
+; ALL: estimated cost of 3 for {{.*}} fadd <3 x float>
+define void @fadd_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
+  %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
+  %add = fadd <3 x float> %vec, %b
+  store <3 x float> %add, <3 x float> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fadd_f64'
+; FASTF64: estimated cost of 2 for {{.*}} fadd double
+; SLOWF64: estimated cost of 3 for {{.*}} fadd double
+define void @fadd_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
+  %vec = load double, double addrspace(1)* %vaddr
+  %add = fadd double %vec, %b
+  store double %add, double addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fadd_v2f64'
+; FASTF64: estimated cost of 4 for {{.*}} fadd <2 x double>
+; SLOWF64: estimated cost of 6 for {{.*}} fadd <2 x double>
+define void @fadd_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
+  %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
+  %add = fadd <2 x double> %vec, %b
+  store <2 x double> %add, <2 x double> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fadd_v3f64'
+; FASTF64: estimated cost of 6 for {{.*}} fadd <3 x double>
+; SLOWF64: estimated cost of 9 for {{.*}} fadd <3 x double>
+define void @fadd_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
+  %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
+  %add = fadd <3 x double> %vec, %b
+  store <3 x double> %add, <3 x double> addrspace(1)* %out
+  ret void
+}
+
+; ALL 'fadd_f16'
+; ALL estimated cost of 1 for {{.*}} fadd half
+define void @fadd_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
+  %vec = load half, half addrspace(1)* %vaddr
+  %add = fadd half %vec, %b
+  store half %add, half addrspace(1)* %out
+  ret void
+}
+
+; ALL 'fadd_v2f16'
+; ALL estimated cost of 2 for {{.*}} fadd <2 x half>
+define void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
+  %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
+  %add = fadd <2 x half> %vec, %b
+  store <2 x half> %add, <2 x half> addrspace(1)* %out
+  ret void
+}
+
+; ALL 'fadd_v4f16'
+; ALL estimated cost of 4 for {{.*}} fadd <4 x half>
+define void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
+  %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
+  %add = fadd <4 x half> %vec, %b
+  store <4 x half> %add, <4 x half> addrspace(1)* %out
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/Analysis/CostModel/AMDGPU/fdiv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/AMDGPU/fdiv.ll?rev=264374&view=auto
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/AMDGPU/fdiv.ll (added)
+++ llvm/trunk/test/Analysis/CostModel/AMDGPU/fdiv.ll Thu Mar 24 20:00:32 2016
@@ -0,0 +1,96 @@
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=hawaii -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=CIFASTF64 %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=CISLOWF64 %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=tahiti -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=SIFASTF64 %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mcpu=verde -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=SISLOWF64 %s
+
+; CHECK: 'fdiv_f32'
+; ALL: estimated cost of 10 for {{.*}} fdiv float
+define void @fdiv_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
+  %vec = load float, float addrspace(1)* %vaddr
+  %add = fdiv float %vec, %b
+  store float %add, float addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fdiv_v2f32'
+; ALL: estimated cost of 20 for {{.*}} fdiv <2 x float>
+define void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
+  %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
+  %add = fdiv <2 x float> %vec, %b
+  store <2 x float> %add, <2 x float> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fdiv_v3f32'
+; ALL: estimated cost of 30 for {{.*}} fdiv <3 x float>
+define void @fdiv_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
+  %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
+  %add = fdiv <3 x float> %vec, %b
+  store <3 x float> %add, <3 x float> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fdiv_f64'
+; CIFASTF64: estimated cost of 29 for {{.*}} fdiv double
+; CISLOWF64: estimated cost of 33 for {{.*}} fdiv double
+; SIFASTF64: estimated cost of 32 for {{.*}} fdiv double
+; SISLOWF64: estimated cost of 36 for {{.*}} fdiv double
+define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
+  %vec = load double, double addrspace(1)* %vaddr
+  %add = fdiv double %vec, %b
+  store double %add, double addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fdiv_v2f64'
+; CIFASTF64: estimated cost of 58 for {{.*}} fdiv <2 x double>
+; CISLOWF64: estimated cost of 66 for {{.*}} fdiv <2 x double>
+; SIFASTF64: estimated cost of 64 for {{.*}} fdiv <2 x double>
+; SISLOWF64: estimated cost of 72 for {{.*}} fdiv <2 x double>
+define void @fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
+  %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
+  %add = fdiv <2 x double> %vec, %b
+  store <2 x double> %add, <2 x double> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fdiv_v3f64'
+; CIFASTF64: estimated cost of 87 for {{.*}} fdiv <3 x double>
+; CISLOWF64: estimated cost of 99 for {{.*}} fdiv <3 x double>
+; SIFASTF64: estimated cost of 96 for {{.*}} fdiv <3 x double>
+; SISLOWF64: estimated cost of 108 for {{.*}} fdiv <3 x double>
+define void @fdiv_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
+  %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
+  %add = fdiv <3 x double> %vec, %b
+  store <3 x double> %add, <3 x double> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fdiv_f16'
+; ALL: estimated cost of 10 for {{.*}} fdiv half
+define void @fdiv_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
+  %vec = load half, half addrspace(1)* %vaddr
+  %add = fdiv half %vec, %b
+  store half %add, half addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fdiv_v2f16'
+; ALL: estimated cost of 20 for {{.*}} fdiv <2 x half>
+define void @fdiv_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
+  %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
+  %add = fdiv <2 x half> %vec, %b
+  store <2 x half> %add, <2 x half> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fdiv_v4f16'
+; ALL: estimated cost of 40 for {{.*}} fdiv <4 x half>
+define void @fdiv_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
+  %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
+  %add = fdiv <4 x half> %vec, %b
+  store <4 x half> %add, <4 x half> addrspace(1)* %out
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/Analysis/CostModel/AMDGPU/fmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/AMDGPU/fmul.ll?rev=264374&view=auto
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/AMDGPU/fmul.ll (added)
+++ llvm/trunk/test/Analysis/CostModel/AMDGPU/fmul.ll Thu Mar 24 20:00:32 2016
@@ -0,0 +1,88 @@
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FASTF64 -check-prefix=ALL %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=SLOWF64 -check-prefix=ALL %s
+
+; ALL: 'fmul_f32'
+; ALL: estimated cost of 1 for {{.*}} fmul float
+define void @fmul_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
+  %vec = load float, float addrspace(1)* %vaddr
+  %add = fmul float %vec, %b
+  store float %add, float addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fmul_v2f32'
+; ALL: estimated cost of 2 for {{.*}} fmul <2 x float>
+define void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
+  %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
+  %add = fmul <2 x float> %vec, %b
+  store <2 x float> %add, <2 x float> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fmul_v3f32'
+; ALL: estimated cost of 3 for {{.*}} fmul <3 x float>
+define void @fmul_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
+  %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
+  %add = fmul <3 x float> %vec, %b
+  store <3 x float> %add, <3 x float> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fmul_f64'
+; FASTF64: estimated cost of 2 for {{.*}} fmul double
+; SLOWF64: estimated cost of 3 for {{.*}} fmul double
+define void @fmul_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
+  %vec = load double, double addrspace(1)* %vaddr
+  %add = fmul double %vec, %b
+  store double %add, double addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fmul_v2f64'
+; FASTF64: estimated cost of 4 for {{.*}} fmul <2 x double>
+; SLOWF64: estimated cost of 6 for {{.*}} fmul <2 x double>
+define void @fmul_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
+  %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
+  %add = fmul <2 x double> %vec, %b
+  store <2 x double> %add, <2 x double> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fmul_v3f64'
+; FASTF64: estimated cost of 6 for {{.*}} fmul <3 x double>
+; SLOWF64: estimated cost of 9 for {{.*}} fmul <3 x double>
+define void @fmul_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
+  %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
+  %add = fmul <3 x double> %vec, %b
+  store <3 x double> %add, <3 x double> addrspace(1)* %out
+  ret void
+}
+
+; ALL 'fmul_f16'
+; ALL estimated cost of 1 for {{.*}} fmul half
+define void @fmul_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
+  %vec = load half, half addrspace(1)* %vaddr
+  %add = fmul half %vec, %b
+  store half %add, half addrspace(1)* %out
+  ret void
+}
+
+; ALL 'fmul_v2f16'
+; ALL estimated cost of 2 for {{.*}} fmul <2 x half>
+define void @fmul_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
+  %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
+  %add = fmul <2 x half> %vec, %b
+  store <2 x half> %add, <2 x half> addrspace(1)* %out
+  ret void
+}
+
+; ALL 'fmul_v4f16'
+; ALL estimated cost of 4 for {{.*}} fmul <4 x half>
+define void @fmul_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
+  %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
+  %add = fmul <4 x half> %vec, %b
+  store <4 x half> %add, <4 x half> addrspace(1)* %out
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/Analysis/CostModel/AMDGPU/fsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/AMDGPU/fsub.ll?rev=264374&view=auto
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/AMDGPU/fsub.ll (added)
+++ llvm/trunk/test/Analysis/CostModel/AMDGPU/fsub.ll Thu Mar 24 20:00:32 2016
@@ -0,0 +1,86 @@
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=FASTF64 -check-prefix=ALL %s
+; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=SLOWF64 -check-prefix=ALL %s
+
+; ALL: 'fsub_f32'
+; ALL: estimated cost of 1 for {{.*}} fsub float
+define void @fsub_f32(float addrspace(1)* %out, float addrspace(1)* %vaddr, float %b) #0 {
+  %vec = load float, float addrspace(1)* %vaddr
+  %add = fsub float %vec, %b
+  store float %add, float addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fsub_v2f32'
+; ALL: estimated cost of 2 for {{.*}} fsub <2 x float>
+define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %vaddr, <2 x float> %b) #0 {
+  %vec = load <2 x float>, <2 x float> addrspace(1)* %vaddr
+  %add = fsub <2 x float> %vec, %b
+  store <2 x float> %add, <2 x float> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fsub_v3f32'
+; ALL: estimated cost of 3 for {{.*}} fsub <3 x float>
+define void @fsub_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %vaddr, <3 x float> %b) #0 {
+  %vec = load <3 x float>, <3 x float> addrspace(1)* %vaddr
+  %add = fsub <3 x float> %vec, %b
+  store <3 x float> %add, <3 x float> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fsub_f64'
+; FASTF64: estimated cost of 2 for {{.*}} fsub double
+; SLOWF64: estimated cost of 3 for {{.*}} fsub double
+define void @fsub_f64(double addrspace(1)* %out, double addrspace(1)* %vaddr, double %b) #0 {
+  %vec = load double, double addrspace(1)* %vaddr
+  %add = fsub double %vec, %b
+  store double %add, double addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fsub_v2f64'
+; FASTF64: estimated cost of 4 for {{.*}} fsub <2 x double>
+; SLOWF64: estimated cost of 6 for {{.*}} fsub <2 x double>
+define void @fsub_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %vaddr, <2 x double> %b) #0 {
+  %vec = load <2 x double>, <2 x double> addrspace(1)* %vaddr
+  %add = fsub <2 x double> %vec, %b
+  store <2 x double> %add, <2 x double> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fsub_v3f64'
+; FASTF64: estimated cost of 6 for {{.*}} fsub <3 x double>
+; SLOWF64: estimated cost of 9 for {{.*}} fsub <3 x double>
+define void @fsub_v3f64(<3 x double> addrspace(1)* %out, <3 x double> addrspace(1)* %vaddr, <3 x double> %b) #0 {
+  %vec = load <3 x double>, <3 x double> addrspace(1)* %vaddr
+  %add = fsub <3 x double> %vec, %b
+  store <3 x double> %add, <3 x double> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fsub_f16'
+; ALL: estimated cost of 1 for {{.*}} fsub half
+define void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %vaddr, half %b) #0 {
+  %vec = load half, half addrspace(1)* %vaddr
+  %add = fsub half %vec, %b
+  store half %add, half addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fsub_v2f16'
+; ALL: estimated cost of 2 for {{.*}} fsub <2 x half>
+define void @fsub_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %vaddr, <2 x half> %b) #0 {
+  %vec = load <2 x half>, <2 x half> addrspace(1)* %vaddr
+  %add = fsub <2 x half> %vec, %b
+  store <2 x half> %add, <2 x half> addrspace(1)* %out
+  ret void
+}
+
+; ALL: 'fsub_v4f16'
+; ALL: estimated cost of 4 for {{.*}} fsub <4 x half>
+define void @fsub_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %vaddr, <4 x half> %b) #0 {
+  %vec = load <4 x half>, <4 x half> addrspace(1)* %vaddr
+  %add = fsub <4 x half> %vec, %b
+  store <4 x half> %add, <4 x half> addrspace(1)* %out
+  ret void
+}




More information about the llvm-commits mailing list