[llvm] r315744 - AMDGPU: Implement isFPExtFoldable

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 13 13:18:59 PDT 2017


Author: arsenm
Date: Fri Oct 13 13:18:59 2017
New Revision: 315744

URL: http://llvm.org/viewvc/llvm-project?rev=315744&view=rev
Log:
AMDGPU: Implement isFPExtFoldable

This helps match v_mad_mix* in some cases.

Added:
    llvm/trunk/test/CodeGen/AMDGPU/fpext-free.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp?rev=315744&r1=315743&r2=315744&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp Fri Oct 13 13:18:59 2017
@@ -827,6 +827,17 @@ bool AMDGPUTargetLowering::isZExtFree(SD
   return isZExtFree(Val.getValueType(), VT2);
 }
 
+// v_mad_mix* support a conversion from f16 to f32.
+//
+// There is only one special case when denormals are enabled we don't currently,
+// where this is OK to use.
+bool AMDGPUTargetLowering::isFPExtFoldable(unsigned Opcode,
+                                           EVT DestVT, EVT SrcVT) const {
+  return Opcode == ISD::FMAD && Subtarget->hasMadMixInsts() &&
+         DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() &&
+         SrcVT.getScalarType() == MVT::f16;
+}
+
 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
   // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
   // limited number of native 64-bit operations. Shrinking an operation to fit

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h?rev=315744&r1=315743&r2=315744&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h Fri Oct 13 13:18:59 2017
@@ -143,6 +143,7 @@ public:
   bool isZExtFree(Type *Src, Type *Dest) const override;
   bool isZExtFree(EVT Src, EVT Dest) const override;
   bool isZExtFree(SDValue Val, EVT VT2) const override;
+  bool isFPExtFoldable(unsigned Opcode, EVT DestVT, EVT SrcVT) const override;
 
   bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
 

Added: llvm/trunk/test/CodeGen/AMDGPU/fpext-free.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fpext-free.ll?rev=315744&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/fpext-free.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/fpext-free.ll Fri Oct 13 13:18:59 2017
@@ -0,0 +1,388 @@
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-fp32-denormals -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX89,GFX9,GFX9-F32FLUSH %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=+fp32-denormals -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX89,GFX9,GFX9-F32DENORM %s
+; RUN: llc -march=amdgcn -mcpu=gfx803 -mattr=-fp32-denormals -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX89,VI,VI-F32FLUSH %s
+; RUN: llc -march=amdgcn -mcpu=gfx803 -mattr=+fp32-denormals -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX89,VI,VI-F32DENORM %s
+
+;  fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
+
+; GCN-LABEL: {{^}}fadd_fpext_fmul_f16_to_f32:
+; GCN: s_waitcnt
+; GFX9-F32FLUSH-NEXT: v_mad_mix_f32 v0, v0, v1, v2 op_sel_hi:[1,1,0]{{$}}
+; GFX9-F32FLUSH-NEXT: s_setpc_b64
+
+; GFX9-F32DENORM-NEXT: v_mul_f16
+; GFX9-F32DENORM-NEXT: v_cvt_f32_f16
+; GFX9-F32DENORM-NEXT: v_add_f32
+define float @fadd_fpext_fmul_f16_to_f32(half %x, half %y, float %z) #0 {
+entry:
+  %mul = fmul half %x, %y
+  %mul.ext = fpext half %mul to float
+  %add = fadd float %mul.ext, %z
+  ret float %add
+}
+
+; f16->f64 is not free.
+; GCN-LABEL: {{^}}fadd_fpext_fmul_f16_to_f64:
+; GFX89: v_mul_f16
+; GFX89: v_cvt_f32_f16
+; GFX89: v_cvt_f64_f32
+; GFX89: v_add_f64
+define double @fadd_fpext_fmul_f16_to_f64(half %x, half %y, double %z) #0 {
+entry:
+  %mul = fmul half %x, %y
+  %mul.ext = fpext half %mul to double
+  %add = fadd double %mul.ext, %z
+  ret double %add
+}
+
+; f32->f64 is not free.
+; GCN-LABEL: {{^}}fadd_fpext_fmul_f32_to_f64:
+; GCN: v_mul_f32
+; GCN: v_cvt_f64_f32
+; GCN: v_add_f64
+define double @fadd_fpext_fmul_f32_to_f64(float %x, float %y, double %z) #0 {
+entry:
+  %mul = fmul float %x, %y
+  %mul.ext = fpext float %mul to double
+  %add = fadd double %mul.ext, %z
+  ret double %add
+}
+
+; fold (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
+; GCN-LABEL: {{^}}fadd_fpext_fmul_f16_to_f32_commute:
+; GCN: s_waitcnt
+; GFX9-F32FLUSH-NEXT: v_mad_mix_f32 v0, v0, v1, v2 op_sel_hi:[1,1,0]{{$}}
+; GFX9-F32FLUSH-NEXT: s_setpc_b64
+
+; GFX9-F32DENORM-NEXT: v_mul_f16
+; GFX9-F32DENORM-NEXT: v_cvt_f32_f16
+; GFX9-F32DENORM-NEXT: v_add_f32
+; GFX9-F32DENORM-NEXT: s_setpc_b64
+define float @fadd_fpext_fmul_f16_to_f32_commute(half %x, half %y, float %z) #0 {
+entry:
+  %mul = fmul half %x, %y
+  %mul.ext = fpext half %mul to float
+  %add = fadd float %z, %mul.ext
+  ret float %add
+}
+
+; fold (fadd (fma x, y, (fpext (fmul u, v))), z)
+;   -> (fma x, y, (fma (fpext u), (fpext v), z))
+
+; GCN-LABEL: {{^}}fadd_muladd_fpext_fmul_f16_to_f32:
+; GCN: s_waitcnt
+; GFX9-F32FLUSH-NEXT: v_mad_mix_f32 v2, v2, v3, v4 op_sel_hi:[1,1,0]
+; GFX9-F32FLUSH-NEXT: v_mac_f32_e32 v2, v0, v1
+; GFX9-F32FLUSH-NEXT: v_mov_b32_e32 v0, v2
+; GFX9-F32FLUSH-NEXT: s_setpc_b64
+
+; GFX9-F32DENORM-NEXT: v_mul_f16
+; GFX9-F32DENORM-NEXT: v_cvt_f32_f16
+; GFX9-F32DENORM-NEXT: v_fma_f32
+; GFX9-F32DENORM-NEXT: v_add_f32
+; GFX9-F32DENORM-NEXT: s_setpc_b64
+define float @fadd_muladd_fpext_fmul_f16_to_f32(float %x, float %y, half %u, half %v, float %z) #0 {
+entry:
+  %mul = fmul half %u, %v
+  %mul.ext = fpext half %mul to float
+  %fma = call float @llvm.fmuladd.f32(float %x, float %y, float %mul.ext)
+  %add = fadd float %fma, %z
+  ret float %add
+}
+
+; fold (fadd x, (fma y, z, (fpext (fmul u, v)))
+;   -> (fma y, z, (fma (fpext u), (fpext v), x))
+; GCN-LABEL: {{^}}fadd_muladd_fpext_fmul_f16_to_f32_commute:
+; GCN: s_waitcnt
+; GFX9-F32FLUSH-NEXT: v_mad_mix_f32 v2, v2, v3, v4 op_sel_hi:[1,1,0]
+; GFX9-F32FLUSH-NEXT: v_mac_f32_e32 v2, v0, v1
+; GFX9-F32FLUSH-NEXT: v_mov_b32_e32 v0, v2
+; GFX9-F32FLUSH-NEXT: s_setpc_b64
+
+; GFX9-F32DENORM-NEXT: v_mul_f16
+; GFX9-F32DENORM-NEXT: v_cvt_f32_f16
+; GFX9-F32DENORM-NEXT: v_fma_f32
+; GFX9-F32DENORM-NEXT: v_add_f32
+; GFX9-F32DENORM-NEXT: s_setpc_b64
+define float @fadd_muladd_fpext_fmul_f16_to_f32_commute(float %x, float %y, half %u, half %v, float %z) #0 {
+entry:
+  %mul = fmul half %u, %v
+  %mul.ext = fpext half %mul to float
+  %fma = call float @llvm.fmuladd.f32(float %x, float %y, float %mul.ext)
+  %add = fadd float %z, %fma
+  ret float %add
+}
+
+; GCN-LABEL: {{^}}fadd_fmad_fpext_fmul_f16_to_f32:
+; GCN: s_waitcnt
+; GFX9-F32FLUSH-NEXT: v_mad_mix_f32 v2, v2, v3, v4 op_sel_hi:[1,1,0]
+; GFX9-F32FLUSH-NEXT: v_mac_f32_e32 v2, v0, v1
+; GFX9-F32FLUSH-NEXT: v_mov_b32_e32 v0, v2
+; GFX9-F32FLUSH-NEXT: s_setpc_b64
+
+; GFX9-F32DENORM-NEXT: v_mul_f16_e32 v2, v2, v3
+; GFX9-F32DENORM-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX9-F32DENORM-NEXT: v_fma_f32 v0, v0, v1, v2
+define float @fadd_fmad_fpext_fmul_f16_to_f32(float %x, float %y, half %u, half %v, float %z) #0 {
+entry:
+  %mul = fmul half %u, %v
+  %mul.ext = fpext half %mul to float
+  %mul1 = fmul contract float %x, %y
+  %fmad = fadd contract float %mul1, %mul.ext
+  %add = fadd float %fmad, %z
+  ret float %add
+}
+
+; fold (fadd (fma x, y, (fpext (fmul u, v))), z)
+;   -> (fma x, y, (fma (fpext u), (fpext v), z))
+
+; GCN-LABEL: {{^}}fadd_fma_fpext_fmul_f16_to_f32:
+; GCN: s_waitcnt
+; GFX89: v_mul_f16
+; GFX89: v_cvt_f32_f16
+; GFX89: v_fma_f32
+; GFX89: v_add_f32
+define float @fadd_fma_fpext_fmul_f16_to_f32(float %x, float %y, half %u, half %v, float %z) #0 {
+entry:
+  %mul = fmul contract half %u, %v
+  %mul.ext = fpext half %mul to float
+  %fma = call float @llvm.fma.f32(float %x, float %y, float %mul.ext)
+  %add = fadd float %fma, %z
+  ret float %add
+}
+
+; GCN-LABEL: {{^}}fadd_fma_fpext_fmul_f16_to_f32_commute:
+; GCN: s_waitcnt
+; GFX89: v_mul_f16
+; GFX89: v_cvt_f32_f16
+; GFX89: v_fma_f32
+; GFX89: v_add_f32
+define float @fadd_fma_fpext_fmul_f16_to_f32_commute(float %x, float %y, half %u, half %v, float %z) #0 {
+entry:
+  %mul = fmul contract half %u, %v
+  %mul.ext = fpext half %mul to float
+  %fma = call float @llvm.fma.f32(float %x, float %y, float %mul.ext)
+  %add = fadd float %z, %fma
+  ret float %add
+}
+
+; fold (fadd x, (fpext (fma y, z, (fmul u, v)))
+;   -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x))
+
+; GCN-LABEL: {{^}}fadd_fpext_fmuladd_f16_to_f32:
+; GFX9: v_mul_f16
+; GFX9: v_fma_legacy_f16
+; GFX9: v_cvt_f32_f16
+; GFX9: v_add_f32_e32
+define float @fadd_fpext_fmuladd_f16_to_f32(float %x, half %y, half %z, half %u, half %v) #0 {
+entry:
+  %mul = fmul contract half %u, %v
+  %fma = call half @llvm.fmuladd.f16(half %y, half %z, half %mul)
+  %ext.fma = fpext half %fma to float
+  %add = fadd float %x, %ext.fma
+  ret float %add
+}
+
+; GCN-LABEL: {{^}}fadd_fpext_fma_f16_to_f32:
+; GFX9: v_mul_f16
+; GFX9: v_fma_legacy_f16
+; GFX9: v_cvt_f32_f16
+; GFX9: v_add_f32_e32
+define float @fadd_fpext_fma_f16_to_f32(float %x, half %y, half %z, half %u, half %v) #0 {
+entry:
+  %mul = fmul contract half %u, %v
+  %fma = call half @llvm.fma.f16(half %y, half %z, half %mul)
+  %ext.fma = fpext half %fma to float
+  %add = fadd float %x, %ext.fma
+  ret float %add
+}
+
+; GCN-LABEL: {{^}}fadd_fpext_fma_f16_to_f32_commute:
+; GFX9: v_mul_f16
+; GFX9: v_fma_legacy_f16
+; GFX9: v_cvt_f32_f16
+; GFX9: v_add_f32_e32
+define float @fadd_fpext_fma_f16_to_f32_commute(float %x, half %y, half %z, half %u, half %v) #0 {
+entry:
+  %mul = fmul contract half %u, %v
+  %fma = call half @llvm.fma.f16(half %y, half %z, half %mul)
+  %ext.fma = fpext half %fma to float
+  %add = fadd float %ext.fma, %x
+  ret float %add
+}
+
+; fold (fsub (fpext (fmul x, y)), z)
+;   -> (fma (fpext x), (fpext y), (fneg z))
+
+; GCN-LABEL: {{^}}fsub_fpext_fmul_f16_to_f32:
+; GCN: s_waitcnt
+; GFX9-F32FLUSH-NEXT: v_mad_mix_f32 v0, v0, v1, -v2 op_sel_hi:[1,1,0]{{$}}
+; GFX9-F32FLUSH-NEXT: s_setpc_b64
+
+; GFX9-F32DENORM-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX9-F32DENORM-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX9-F32DENORM-NEXT: v_sub_f32_e32 v0, v0, v2
+; GFX9-F32DENORM-NEXT: s_setpc_b64
+define float @fsub_fpext_fmul_f16_to_f32(half %x, half %y, float %z) #0 {
+entry:
+  %mul = fmul half %x, %y
+  %mul.ext = fpext half %mul to float
+  %add = fsub float %mul.ext, %z
+  ret float %add
+}
+
+; fold (fsub x, (fpext (fmul y, z)))
+;   -> (fma (fneg (fpext y)), (fpext z), x)
+
+; GCN-LABEL: {{^}}fsub_fpext_fmul_f16_to_f32_commute:
+; GCN: s_waitcnt
+; GFX9-F32FLUSH-NEXT: v_mad_mix_f32 v0, -v1, v2, v0 op_sel_hi:[1,1,0]
+; GFX9-F32FLUSH-NEXT: s_setpc_b64
+
+; GFX9-F32DENORM-NEXT: v_mul_f16_e32
+; GFX9-F32DENORM-NEXT: v_cvt_f32_f16_e32
+; GFX9-F32DENORM-NEXT: v_sub_f32_e32
+; GFX9-F32DENORM-NEXT: s_setpc_b64
+define float @fsub_fpext_fmul_f16_to_f32_commute(float %x, half %y, half %z) #0 {
+entry:
+  %mul = fmul contract half %y, %z
+  %mul.ext = fpext half %mul to float
+  %add = fsub contract float %x, %mul.ext
+  ret float %add
+}
+
+; fold (fsub (fpext (fneg (fmul, x, y))), z)
+;   -> (fneg (fma (fpext x), (fpext y), z))
+
+; FIXME: Should be able to fold fneg
+; GCN-LABEL: {{^}}fsub_fpext_fneg_fmul_f16_to_f32:
+; GCN: s_waitcnt
+; GFX9-F32FLUSH-NEXT: v_xor_b32_e32 v1, 0x8000, v1
+; GFX9-F32FLUSH-NEXT: v_mad_mix_f32 v0, v0, v1, -v2 op_sel_hi:[1,1,0]{{$}}
+; GFX9-F32FLUSH-NEXT: s_setpc_b64
+
+; GFX9-F32DENORM-NEXT: v_mul_f16_e64 v0, v0, -v1
+; GFX9-F32DENORM-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX9-F32DENORM-NEXT: v_sub_f32_e32 v0, v0, v2
+; GFX9-F32DENORM-NEXT: s_setpc_b64
+define float @fsub_fpext_fneg_fmul_f16_to_f32(half %x, half %y, float %z) #0 {
+entry:
+  %mul = fmul half %x, %y
+  %neg.mul = fsub half -0.0, %mul
+  %neg.mul.ext = fpext half %neg.mul to float
+  %add = fsub float %neg.mul.ext, %z
+  ret float %add
+}
+
+; fold (fsub (fneg (fpext (fmul, x, y))), z)
+;   -> (fneg (fma (fpext x)), (fpext y), z)
+
+; FIXME: Should be able to fold fneg
+; GCN-LABEL: {{^}}fsub_fneg_fpext_fmul_f16_to_f32:
+; GCN: s_waitcnt
+; GFX9-F32FLUSH-NEXT: v_xor_b32_e32 v1, 0x8000, v1
+; GFX9-F32FLUSH-NEXT: v_mad_mix_f32 v0, v0, v1, -v2 op_sel_hi:[1,1,0]{{$}}
+; GFX9-F32FLUSH-NEXT: s_setpc_b64
+
+; GFX9-F32DENORM-NEXT: v_mul_f16_e64 v0, v0, -v1
+; GFX9-F32DENORM-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX9-F32DENORM-NEXT: v_sub_f32_e32 v0, v0, v2
+; GFX9-F32DENORM-NEXT: s_setpc_b64
+define float @fsub_fneg_fpext_fmul_f16_to_f32(half %x, half %y, float %z) #0 {
+entry:
+  %mul = fmul half %x, %y
+  %mul.ext = fpext half %mul to float
+  %neg.mul.ext = fsub float -0.0, %mul.ext
+  %add = fsub float %neg.mul.ext, %z
+  ret float %add
+}
+
+; fold (fsub (fmad x, y, (fpext (fmul u, v))), z)
+;    -> (fmad x, y (fmad (fpext u), (fpext v), (fneg z)))
+; GCN-LABEL: {{^}}fsub_muladd_fpext_mul_f16_to_f32:
+; GCN: s_waitcnt
+; GFX9-F32FLUSH-NEXT: v_mad_mix_f32 v2, v3, v4, -v2 op_sel_hi:[1,1,0]{{$}}
+; GFX9-F32FLUSH-NEXT: v_mac_f32_e32 v2, v0, v1
+; GFX9-F32FLUSH-NEXT: v_mov_b32_e32 v0, v2
+; GFX9-F32FLUSH-NEXT: s_setpc_b64
+
+; GFX9-F32DENORM-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX9-F32DENORM-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX9-F32DENORM-NEXT: v_fma_f32 v0, v0, v1, v3
+; GFX9-F32DENORM-NEXT: v_sub_f32_e32 v0, v0, v2
+; GFX9-F32DENORM-NEXT: s_setpc_b64
+define float @fsub_muladd_fpext_mul_f16_to_f32(float %x, float %y, float %z, half %u, half %v) #0 {
+entry:
+  %mul = fmul half %u, %v
+  %mul.ext = fpext half %mul to float
+  %fma = call float @llvm.fmuladd.f32(float %x, float %y, float %mul.ext)
+  %add = fsub float %fma, %z
+  ret float %add
+}
+
+;  fold (fsub (fpext (fmad x, y, (fmul u, v))), z)
+;    -> (fmad (fpext x), (fpext y),
+;            (fmad (fpext u), (fpext v), (fneg z)))
+
+; GCN-LABEL: {{^}}fsub_fpext_muladd_mul_f16_to_f32:
+; GFX9: v_mul_f16
+; GFX9: v_fma_legacy_f16
+; GFX9: v_cvt_f32_f16
+; GFX9: v_sub_f32
+; GCN: s_setpc_b64
+define float @fsub_fpext_muladd_mul_f16_to_f32(half %x, half %y, float %z, half %u, half %v) #0 {
+entry:
+  %mul = fmul half %u, %v
+  %fma = call half @llvm.fmuladd.f16(half %x, half %y, half %mul)
+  %fma.ext = fpext half %fma to float
+  %add = fsub float %fma.ext, %z
+  ret float %add
+}
+
+; fold (fsub x, (fmad y, z, (fpext (fmul u, v))))
+;   -> (fmad (fneg y), z, (fmad (fneg (fpext u)), (fpext v), x))
+; GCN-LABEL: {{^}}fsub_muladd_fpext_mul_f16_to_f32_commute:
+; GCN: s_waitcnt
+; GFX9-F32FLUSH-NEXT: v_mad_mix_f32 v0, -v3, v4, v0 op_sel_hi:[1,1,0]{{$}}
+; GFX9-F32FLUSH-NEXT: v_mad_f32 v0, -v1, v2, v0{{$}}
+; GFX9-F32FLUSH-NEXT: s_setpc_b64
+
+; GFX9-F32DENORM-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX9-F32DENORM-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX9-F32DENORM-NEXT: v_fma_f32 v1, v1, v2, v3
+; GFX9-F32DENORM-NEXT: v_sub_f32_e32 v0, v0, v1
+; GFX9-F32DENORM-NEXT: s_setpc_b64
+define float @fsub_muladd_fpext_mul_f16_to_f32_commute(float %x, float %y, float %z, half %u, half %v) #0 {
+entry:
+  %mul = fmul half %u, %v
+  %mul.ext = fpext half %mul to float
+  %fma = call float @llvm.fmuladd.f32(float %y, float %z, float %mul.ext)
+  %add = fsub float %x, %fma
+  ret float %add
+}
+
+; fold (fsub x, (fpext (fma y, z, (fmul u, v))))
+;    -> (fma (fneg (fpext y)), (fpext z),
+;            (fma (fneg (fpext u)), (fpext v), x))
+; GCN-LABEL: {{^}}fsub_fpext_muladd_mul_f16_to_f32_commute:
+; GCN: s_waitcnt
+; GFX9-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX9-NEXT: v_fma_legacy_f16 v1, v1, v2, v3
+; GFX9-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX9-NEXT: v_sub_f32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64
+define float @fsub_fpext_muladd_mul_f16_to_f32_commute(float %x, half %y, half %z, half %u, half %v) #0 {
+entry:
+  %mul = fmul half %u, %v
+  %fma = call half @llvm.fmuladd.f16(half %y, half %z, half %mul)
+  %fma.ext = fpext half %fma to float
+  %add = fsub float %x, %fma.ext
+  ret float %add
+}
+
+declare float @llvm.fmuladd.f32(float, float, float) #0
+declare float @llvm.fma.f32(float, float, float) #0
+declare half @llvm.fmuladd.f16(half, half, half) #0
+declare half @llvm.fma.f16(half, half, half) #0
+
+attributes #0 = { nounwind readnone speculatable }




More information about the llvm-commits mailing list