[llvm] e5e49a0 - [AMDGPU][GlobalISel] Transform (fadd (fma x, y, (fpext (fmul u, v))), z) -> (fma x, y, (fma (fpext u), (fpext v), z))
Mirko Brkusanin via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 29 07:28:27 PST 2021
- Previous message: [llvm] f732292 - [AMDGPU][GlobalISel] Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
- Next message: [llvm] a782169 - [AMDGPU][GlobalISel] Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
Author: Mirko Brkusanin
Date: 2021-11-29T16:27:21+01:00
New Revision: e5e49a08f11618653aca133f22603c165889505e
URL: https://github.com/llvm/llvm-project/commit/e5e49a08f11618653aca133f22603c165889505e
DIFF: https://github.com/llvm/llvm-project/commit/e5e49a08f11618653aca133f22603c165889505e.diff
LOG: [AMDGPU][GlobalISel] Transform (fadd (fma x, y, (fpext (fmul u, v))), z) -> (fma x, y, (fma (fpext u), (fpext v), z))
Patch by: Mateja Marjanovic
Differential Revision: https://reviews.llvm.org/D98047
Added:
llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-ext-fma.ll
Modified:
llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
llvm/include/llvm/Target/GlobalISel/Combine.td
llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index ad76a6db69711..1597e79dd85b6 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -667,6 +667,13 @@ class CombinerHelper {
bool matchCombineFAddFMAFMulToFMadOrFMA(MachineInstr &MI,
BuildFnTy &MatchInfo);
+ // Transform (fadd (fma x, y, (fpext (fmul u, v))), z)
+ // -> (fma x, y, (fma (fpext u), (fpext v), z))
+ // (fadd (fmad x, y, (fpext (fmul u, v))), z)
+ // -> (fmad x, y, (fmad (fpext u), (fpext v), z))
+ bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI,
+ BuildFnTy &MatchInfo);
+
private:
/// Given a non-indexed load or store instruction \p MI, find an offset that
/// can be usefully and legally folded into it as a post-indexing operation.
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index be2973153c03a..caee899eee36a 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -792,6 +792,15 @@ def combine_fadd_fma_fmul_to_fmad_or_fma: GICombineRule<
${info}); }]),
(apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+// Transform (fadd (fma x, y, (fpext (fmul u, v))), z) ->
+// (fma x, y, (fma (fpext u), (fpext v), z))
+def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FADD):$root,
+ [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
+ *${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
// FIXME: These should use the custom predicate feature once it lands.
def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
undef_to_negative_one,
@@ -825,8 +834,8 @@ def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd,
mul_by_neg_one]>;
def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
- combine_fadd_fpext_fmul_to_fmad_or_fma,
- combine_fadd_fma_fmul_to_fmad_or_fma]>;
+ combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma,
+ combine_fadd_fpext_fma_fmul_to_fmad_or_fma]>;
def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
extract_vec_elt_combines, combines_for_extload,
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index e751247afdf63..5973c133a54d4 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -5023,6 +5023,130 @@ bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA(
return false;
}
+bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
+ MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ assert(MI.getOpcode() == TargetOpcode::G_FADD);
+
+ bool AllowFusionGlobally, HasFMAD, Aggressive;
+ if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
+ return false;
+
+ if (!Aggressive)
+ return false;
+
+ const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
+ LLT DstType = MRI.getType(MI.getOperand(0).getReg());
+ MachineInstr *LHS = MRI.getVRegDef(MI.getOperand(1).getReg());
+ MachineInstr *RHS = MRI.getVRegDef(MI.getOperand(2).getReg());
+
+ unsigned PreferredFusedOpcode =
+ HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
+
+ // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
+ // prefer to fold the multiply with fewer uses.
+ if (Aggressive && isContractableFMul(*LHS, AllowFusionGlobally) &&
+ isContractableFMul(*RHS, AllowFusionGlobally)) {
+ if (hasMoreUses(*LHS, *RHS, MRI))
+ std::swap(LHS, RHS);
+ }
+
+ // Builds: (fma x, y, (fma (fpext u), (fpext v), z))
+ auto buildMatchInfo = [=, &MI](Register U, Register V, Register Z, Register X,
+ Register Y, MachineIRBuilder &B) {
+ Register FpExtU = B.buildFPExt(DstType, U).getReg(0);
+ Register FpExtV = B.buildFPExt(DstType, V).getReg(0);
+ Register InnerFMA =
+ B.buildInstr(PreferredFusedOpcode, {DstType}, {FpExtU, FpExtV, Z})
+ .getReg(0);
+ B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()},
+ {X, Y, InnerFMA});
+ };
+
+ MachineInstr *FMulMI, *FMAMI;
+ // fold (fadd (fma x, y, (fpext (fmul u, v))), z)
+ // -> (fma x, y, (fma (fpext u), (fpext v), z))
+ if (LHS->getOpcode() == PreferredFusedOpcode &&
+ mi_match(LHS->getOperand(3).getReg(), MRI, m_GFPExt(m_MInstr(FMulMI))) &&
+ isContractableFMul(*FMulMI, AllowFusionGlobally) &&
+ TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
+ MRI.getType(FMulMI->getOperand(0).getReg()))) {
+ MatchInfo = [=](MachineIRBuilder &B) {
+ buildMatchInfo(FMulMI->getOperand(1).getReg(),
+ FMulMI->getOperand(2).getReg(),
+ RHS->getOperand(0).getReg(), LHS->getOperand(1).getReg(),
+ LHS->getOperand(2).getReg(), B);
+ };
+ return true;
+ }
+
+ // fold (fadd (fpext (fma x, y, (fmul u, v))), z)
+ // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
+ // FIXME: This turns two single-precision and one double-precision
+ // operation into two double-precision operations, which might not be
+ // interesting for all targets, especially GPUs.
+ if (mi_match(LHS->getOperand(0).getReg(), MRI, m_GFPExt(m_MInstr(FMAMI))) &&
+ FMAMI->getOpcode() == PreferredFusedOpcode) {
+ MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
+ if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
+ TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
+ MRI.getType(FMAMI->getOperand(0).getReg()))) {
+ MatchInfo = [=](MachineIRBuilder &B) {
+ Register X = FMAMI->getOperand(1).getReg();
+ Register Y = FMAMI->getOperand(2).getReg();
+ X = B.buildFPExt(DstType, X).getReg(0);
+ Y = B.buildFPExt(DstType, Y).getReg(0);
+ buildMatchInfo(FMulMI->getOperand(1).getReg(),
+ FMulMI->getOperand(2).getReg(),
+ RHS->getOperand(0).getReg(), X, Y, B);
+ };
+
+ return true;
+ }
+ }
+
+ // fold (fadd z, (fma x, y, (fpext (fmul u, v)))
+ // -> (fma x, y, (fma (fpext u), (fpext v), z))
+ if (RHS->getOpcode() == PreferredFusedOpcode &&
+ mi_match(RHS->getOperand(3).getReg(), MRI, m_GFPExt(m_MInstr(FMulMI))) &&
+ isContractableFMul(*FMulMI, AllowFusionGlobally) &&
+ TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
+ MRI.getType(FMulMI->getOperand(0).getReg()))) {
+ MatchInfo = [=](MachineIRBuilder &B) {
+ buildMatchInfo(FMulMI->getOperand(1).getReg(),
+ FMulMI->getOperand(2).getReg(),
+ LHS->getOperand(0).getReg(), RHS->getOperand(1).getReg(),
+ RHS->getOperand(2).getReg(), B);
+ };
+ return true;
+ }
+
+ // fold (fadd z, (fpext (fma x, y, (fmul u, v)))
+ // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
+ // FIXME: This turns two single-precision and one double-precision
+ // operation into two double-precision operations, which might not be
+ // interesting for all targets, especially GPUs.
+ if (mi_match(RHS->getOperand(0).getReg(), MRI, m_GFPExt(m_MInstr(FMAMI))) &&
+ FMAMI->getOpcode() == PreferredFusedOpcode) {
+ MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
+ if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
+ TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
+ MRI.getType(FMAMI->getOperand(0).getReg()))) {
+ MatchInfo = [=](MachineIRBuilder &B) {
+ Register X = FMAMI->getOperand(1).getReg();
+ Register Y = FMAMI->getOperand(2).getReg();
+ X = B.buildFPExt(DstType, X).getReg(0);
+ Y = B.buildFPExt(DstType, Y).getReg(0);
+ buildMatchInfo(FMulMI->getOperand(1).getReg(),
+ FMulMI->getOperand(2).getReg(),
+ LHS->getOperand(0).getReg(), X, Y, B);
+ };
+ return true;
+ }
+ }
+
+ return false;
+}
+
bool CombinerHelper::tryCombine(MachineInstr &MI) {
if (tryCombineCopy(MI))
return true;
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-ext-fma.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-ext-fma.ll
new file mode 100644
index 0000000000000..ec96e2f26d675
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-ext-fma.ll
@@ -0,0 +1,499 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 --denormal-fp-math=preserve-sign < %s | FileCheck -check-prefix=GFX9-DENORM %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 -fp-contract=fast < %s | FileCheck -check-prefix=GFX10-CONTRACT %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 --denormal-fp-math=preserve-sign < %s | FileCheck -check-prefix=GFX10-DENORM %s
+
+; fold (fadd (fma x, y, (fpext (fmul u, v))), z) -> (fma x, y, (fma (fpext u), (fpext v), z))
+define amdgpu_vs float @test_f16_f32_add_fma_ext_mul(float %x, float %y, float %z, half %u, half %v) {
+; GFX9-DENORM-LABEL: test_f16_f32_add_fma_ext_mul:
+; GFX9-DENORM: ; %bb.0: ; %.entry
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v4, v4
+; GFX9-DENORM-NEXT: v_mad_f32 v2, v3, v4, v2
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v2, v0, v1
+; GFX9-DENORM-NEXT: v_mov_b32_e32 v0, v2
+; GFX9-DENORM-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: test_f16_f32_add_fma_ext_mul:
+; GFX10: ; %bb.0: ; %.entry
+; GFX10-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX10-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX10-NEXT: v_fmac_f32_e32 v3, v0, v1
+; GFX10-NEXT: v_add_f32_e32 v0, v3, v2
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX10-CONTRACT-LABEL: test_f16_f32_add_fma_ext_mul:
+; GFX10-CONTRACT: ; %bb.0: ; %.entry
+; GFX10-CONTRACT-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v3, v0, v1
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v3, v2
+; GFX10-CONTRACT-NEXT: ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_f16_f32_add_fma_ext_mul:
+; GFX10-DENORM: ; %bb.0: ; %.entry
+; GFX10-DENORM-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX10-DENORM-NEXT: v_fmac_f32_e32 v3, v0, v1
+; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v3, v2
+; GFX10-DENORM-NEXT: ; return to shader part epilog
+.entry:
+ %a = fmul half %u, %v
+ %b = fpext half %a to float
+ %c = call float @llvm.fmuladd.f32(float %x, float %y, float %b)
+ %d = fadd float %c, %z
+ ret float %d
+}
+
+; fold (fadd (fpext (fma x, y, (fmul u, v))), z) -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
+define amdgpu_vs float @test_f16_f32_add_ext_fma_mul(half %x, half %y, float %z, half %u, half %v) {
+; GFX9-DENORM-LABEL: test_f16_f32_add_ext_fma_mul:
+; GFX9-DENORM: ; %bb.0: ; %.entry
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v5, v0
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v0, v3
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v3, v4
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX9-DENORM-NEXT: v_mad_f32 v0, v0, v3, v2
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v0, v5, v1
+; GFX9-DENORM-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: test_f16_f32_add_ext_fma_mul:
+; GFX10: ; %bb.0: ; %.entry
+; GFX10-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX10-NEXT: v_fmac_f16_e32 v3, v0, v1
+; GFX10-NEXT: v_cvt_f32_f16_e32 v0, v3
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX10-CONTRACT-LABEL: test_f16_f32_add_ext_fma_mul:
+; GFX10-CONTRACT: ; %bb.0: ; %.entry
+; GFX10-CONTRACT-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX10-CONTRACT-NEXT: v_fmac_f16_e32 v3, v0, v1
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v0, v3
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX10-CONTRACT-NEXT: ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_f16_f32_add_ext_fma_mul:
+; GFX10-DENORM: ; %bb.0: ; %.entry
+; GFX10-DENORM-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX10-DENORM-NEXT: v_mul_f16_e32 v0, v0, v1
+; GFX10-DENORM-NEXT: v_add_f16_e32 v0, v0, v3
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v0, v0
+; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v0, v2
+; GFX10-DENORM-NEXT: ; return to shader part epilog
+.entry:
+ %a = fmul half %u, %v
+ %b = call half @llvm.fmuladd.f16(half %x, half %y, half %a)
+ %c = fpext half %b to float
+ %d = fadd float %c, %z
+ ret float %d
+}
+
+; fold (fadd x, (fma y, z, (fpext (fmul u, v))) -> (fma y, z, (fma (fpext u), (fpext v), x))
+define amdgpu_vs float @test_f16_f32_add_fma_ext_mul_rhs(float %x, float %y, float %z, half %u, half %v) {
+; GFX9-DENORM-LABEL: test_f16_f32_add_fma_ext_mul_rhs:
+; GFX9-DENORM: ; %bb.0: ; %.entry
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v4, v4
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v0, v3, v4
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v0, v1, v2
+; GFX9-DENORM-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: test_f16_f32_add_fma_ext_mul_rhs:
+; GFX10: ; %bb.0: ; %.entry
+; GFX10-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX10-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX10-NEXT: v_fmac_f32_e32 v3, v1, v2
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX10-CONTRACT-LABEL: test_f16_f32_add_fma_ext_mul_rhs:
+; GFX10-CONTRACT: ; %bb.0: ; %.entry
+; GFX10-CONTRACT-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v3, v1, v2
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX10-CONTRACT-NEXT: ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_f16_f32_add_fma_ext_mul_rhs:
+; GFX10-DENORM: ; %bb.0: ; %.entry
+; GFX10-DENORM-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX10-DENORM-NEXT: v_fmac_f32_e32 v3, v1, v2
+; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX10-DENORM-NEXT: ; return to shader part epilog
+.entry:
+ %a = fmul half %u, %v
+ %b = fpext half %a to float
+ %c = call float @llvm.fmuladd.f32(float %y, float %z, float %b)
+ %d = fadd float %x, %c
+ ret float %d
+}
+
+; fold (fadd x, (fpext (fma y, z, (fmul u, v))) -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x))
+define amdgpu_vs float @test_f16_f32_add_ext_fma_mul_rhs(float %x, half %y, half %z, half %u, half %v) {
+; GFX9-DENORM-LABEL: test_f16_f32_add_ext_fma_mul_rhs:
+; GFX9-DENORM: ; %bb.0: ; %.entry
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v4, v4
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v2
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v0, v3, v4
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v0, v1, v2
+; GFX9-DENORM-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: test_f16_f32_add_ext_fma_mul_rhs:
+; GFX10: ; %bb.0: ; %.entry
+; GFX10-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX10-NEXT: v_fmac_f16_e32 v3, v1, v2
+; GFX10-NEXT: v_cvt_f32_f16_e32 v1, v3
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX10-CONTRACT-LABEL: test_f16_f32_add_ext_fma_mul_rhs:
+; GFX10-CONTRACT: ; %bb.0: ; %.entry
+; GFX10-CONTRACT-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX10-CONTRACT-NEXT: v_fmac_f16_e32 v3, v1, v2
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v1, v3
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX10-CONTRACT-NEXT: ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_f16_f32_add_ext_fma_mul_rhs:
+; GFX10-DENORM: ; %bb.0: ; %.entry
+; GFX10-DENORM-NEXT: v_mul_f16_e32 v3, v3, v4
+; GFX10-DENORM-NEXT: v_mul_f16_e32 v1, v1, v2
+; GFX10-DENORM-NEXT: v_add_f16_e32 v1, v1, v3
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v1, v1
+; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX10-DENORM-NEXT: ; return to shader part epilog
+.entry:
+ %a = fmul half %u, %v
+ %b = call half @llvm.fmuladd.f16(half %y, half %z, half %a)
+ %c = fpext half %b to float
+ %d = fadd float %x, %c
+ ret float %d
+}
+
+; fold (fadd (fma x, y, (fpext (fmul u, v))), z) -> (fma x, y, (fma (fpext u), (fpext v), z))
+define amdgpu_vs <4 x float> @test_v4f16_v4f32_add_fma_ext_mul(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x half> %u, <4 x half> %v) {
+; GFX9-DENORM-LABEL: test_v4f16_v4f32_add_fma_ext_mul:
+; GFX9-DENORM: ; %bb.0: ; %.entry
+; GFX9-DENORM-NEXT: v_pk_mul_f16 v12, v12, v14
+; GFX9-DENORM-NEXT: v_pk_mul_f16 v13, v13, v15
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v14, v12
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v15, v13
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v14, v0, v4
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v12, v1, v5
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v15, v2, v6
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v13, v3, v7
+; GFX9-DENORM-NEXT: v_add_f32_e32 v0, v14, v8
+; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v12, v9
+; GFX9-DENORM-NEXT: v_add_f32_e32 v2, v15, v10
+; GFX9-DENORM-NEXT: v_add_f32_e32 v3, v13, v11
+; GFX9-DENORM-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: test_v4f16_v4f32_add_fma_ext_mul:
+; GFX10: ; %bb.0: ; %.entry
+; GFX10-NEXT: v_pk_mul_f16 v12, v12, v14
+; GFX10-NEXT: v_pk_mul_f16 v13, v13, v15
+; GFX10-NEXT: v_cvt_f32_f16_e32 v14, v12
+; GFX10-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-NEXT: v_cvt_f32_f16_e32 v15, v13
+; GFX10-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-NEXT: v_fmac_f32_e32 v14, v0, v4
+; GFX10-NEXT: v_fmac_f32_e32 v12, v1, v5
+; GFX10-NEXT: v_fmac_f32_e32 v15, v2, v6
+; GFX10-NEXT: v_fmac_f32_e32 v13, v3, v7
+; GFX10-NEXT: v_add_f32_e32 v0, v14, v8
+; GFX10-NEXT: v_add_f32_e32 v1, v12, v9
+; GFX10-NEXT: v_add_f32_e32 v2, v15, v10
+; GFX10-NEXT: v_add_f32_e32 v3, v13, v11
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX10-CONTRACT-LABEL: test_v4f16_v4f32_add_fma_ext_mul:
+; GFX10-CONTRACT: ; %bb.0: ; %.entry
+; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v12, v12, v14
+; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v13, v13, v15
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v14, v12
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v15, v13
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v14, v0, v4
+; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v12, v1, v5
+; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v15, v2, v6
+; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v13, v3, v7
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v14, v8
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v1, v12, v9
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v2, v15, v10
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v3, v13, v11
+; GFX10-CONTRACT-NEXT: ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_v4f16_v4f32_add_fma_ext_mul:
+; GFX10-DENORM: ; %bb.0: ; %.entry
+; GFX10-DENORM-NEXT: v_pk_mul_f16 v12, v12, v14
+; GFX10-DENORM-NEXT: v_pk_mul_f16 v13, v13, v15
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v14, v12
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v15, v13
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT: v_fmac_f32_e32 v14, v0, v4
+; GFX10-DENORM-NEXT: v_fmac_f32_e32 v12, v1, v5
+; GFX10-DENORM-NEXT: v_fmac_f32_e32 v15, v2, v6
+; GFX10-DENORM-NEXT: v_fmac_f32_e32 v13, v3, v7
+; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v14, v8
+; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v12, v9
+; GFX10-DENORM-NEXT: v_add_f32_e32 v2, v15, v10
+; GFX10-DENORM-NEXT: v_add_f32_e32 v3, v13, v11
+; GFX10-DENORM-NEXT: ; return to shader part epilog
+.entry:
+ %a = fmul <4 x half> %u, %v
+ %b = fpext <4 x half> %a to <4 x float>
+ %c = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %b)
+ %d = fadd <4 x float> %c, %z
+ ret <4 x float> %d
+}
+
+; fold (fadd (fpext (fma x, y, (fmul u, v))), z) -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
+define amdgpu_vs <4 x float> @test_v4f16_v4f32_add_ext_fma_mul(<4 x half> %x, <4 x half> %y, <4 x float> %z, <4 x half> %u, <4 x half> %v) {
+; GFX9-DENORM-LABEL: test_v4f16_v4f32_add_ext_fma_mul:
+; GFX9-DENORM: ; %bb.0: ; %.entry
+; GFX9-DENORM-NEXT: v_pk_mul_f16 v8, v8, v10
+; GFX9-DENORM-NEXT: v_pk_mul_f16 v9, v9, v11
+; GFX9-DENORM-NEXT: v_pk_mul_f16 v0, v0, v2
+; GFX9-DENORM-NEXT: v_pk_mul_f16 v1, v1, v3
+; GFX9-DENORM-NEXT: v_pk_add_f16 v0, v0, v8
+; GFX9-DENORM-NEXT: v_pk_add_f16 v1, v1, v9
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v0
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v8, v1
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT: v_add_f32_e32 v0, v2, v4
+; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v3, v5
+; GFX9-DENORM-NEXT: v_add_f32_e32 v2, v8, v6
+; GFX9-DENORM-NEXT: v_add_f32_e32 v3, v9, v7
+; GFX9-DENORM-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: test_v4f16_v4f32_add_ext_fma_mul:
+; GFX10: ; %bb.0: ; %.entry
+; GFX10-NEXT: v_pk_mul_f16 v8, v8, v10
+; GFX10-NEXT: v_pk_mul_f16 v9, v9, v11
+; GFX10-NEXT: v_pk_fma_f16 v0, v0, v2, v8
+; GFX10-NEXT: v_pk_fma_f16 v1, v1, v3, v9
+; GFX10-NEXT: v_cvt_f32_f16_e32 v2, v0
+; GFX10-NEXT: v_cvt_f32_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-NEXT: v_cvt_f32_f16_e32 v8, v1
+; GFX10-NEXT: v_cvt_f32_f16_sdwa v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-NEXT: v_add_f32_e32 v0, v2, v4
+; GFX10-NEXT: v_add_f32_e32 v1, v3, v5
+; GFX10-NEXT: v_add_f32_e32 v2, v8, v6
+; GFX10-NEXT: v_add_f32_e32 v3, v9, v7
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX10-CONTRACT-LABEL: test_v4f16_v4f32_add_ext_fma_mul:
+; GFX10-CONTRACT: ; %bb.0: ; %.entry
+; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v8, v8, v10
+; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v9, v9, v11
+; GFX10-CONTRACT-NEXT: v_pk_fma_f16 v0, v0, v2, v8
+; GFX10-CONTRACT-NEXT: v_pk_fma_f16 v1, v1, v3, v9
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v2, v0
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v8, v1
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v2, v4
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v1, v3, v5
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v2, v8, v6
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v3, v9, v7
+; GFX10-CONTRACT-NEXT: ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_v4f16_v4f32_add_ext_fma_mul:
+; GFX10-DENORM: ; %bb.0: ; %.entry
+; GFX10-DENORM-NEXT: v_pk_mul_f16 v8, v8, v10
+; GFX10-DENORM-NEXT: v_pk_mul_f16 v0, v0, v2
+; GFX10-DENORM-NEXT: v_pk_mul_f16 v2, v9, v11
+; GFX10-DENORM-NEXT: v_pk_mul_f16 v1, v1, v3
+; GFX10-DENORM-NEXT: v_pk_add_f16 v0, v0, v8
+; GFX10-DENORM-NEXT: v_pk_add_f16 v1, v1, v2
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v2, v0
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v8, v1
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v2, v4
+; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v3, v5
+; GFX10-DENORM-NEXT: v_add_f32_e32 v2, v8, v6
+; GFX10-DENORM-NEXT: v_add_f32_e32 v3, v9, v7
+; GFX10-DENORM-NEXT: ; return to shader part epilog
+.entry:
+ %a = fmul <4 x half> %u, %v
+ %b = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> %x, <4 x half> %y, <4 x half> %a)
+ %c = fpext <4 x half> %b to <4 x float>
+ %d = fadd <4 x float> %c, %z
+ ret <4 x float> %d
+}
+
+; fold (fadd x, (fma y, z, (fpext (fmul u, v))) -> (fma y, z, (fma (fpext u), (fpext v), x))
+define amdgpu_vs <4 x float> @test_v4f16_v4f32_add_fma_ext_mul_rhs(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x half> %u, <4 x half> %v) {
+; GFX9-DENORM-LABEL: test_v4f16_v4f32_add_fma_ext_mul_rhs:
+; GFX9-DENORM: ; %bb.0: ; %.entry
+; GFX9-DENORM-NEXT: v_pk_mul_f16 v12, v12, v14
+; GFX9-DENORM-NEXT: v_pk_mul_f16 v13, v13, v15
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v14, v12
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v15, v13
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v14, v4, v8
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v12, v5, v9
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v15, v6, v10
+; GFX9-DENORM-NEXT: v_mac_f32_e32 v13, v7, v11
+; GFX9-DENORM-NEXT: v_add_f32_e32 v0, v0, v14
+; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v1, v12
+; GFX9-DENORM-NEXT: v_add_f32_e32 v2, v2, v15
+; GFX9-DENORM-NEXT: v_add_f32_e32 v3, v3, v13
+; GFX9-DENORM-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: test_v4f16_v4f32_add_fma_ext_mul_rhs:
+; GFX10: ; %bb.0: ; %.entry
+; GFX10-NEXT: v_pk_mul_f16 v12, v12, v14
+; GFX10-NEXT: v_pk_mul_f16 v13, v13, v15
+; GFX10-NEXT: v_cvt_f32_f16_e32 v14, v12
+; GFX10-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-NEXT: v_cvt_f32_f16_e32 v15, v13
+; GFX10-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-NEXT: v_fmac_f32_e32 v14, v4, v8
+; GFX10-NEXT: v_fmac_f32_e32 v12, v5, v9
+; GFX10-NEXT: v_fmac_f32_e32 v15, v6, v10
+; GFX10-NEXT: v_fmac_f32_e32 v13, v7, v11
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v14
+; GFX10-NEXT: v_add_f32_e32 v1, v1, v12
+; GFX10-NEXT: v_add_f32_e32 v2, v2, v15
+; GFX10-NEXT: v_add_f32_e32 v3, v3, v13
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX10-CONTRACT-LABEL: test_v4f16_v4f32_add_fma_ext_mul_rhs:
+; GFX10-CONTRACT: ; %bb.0: ; %.entry
+; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v12, v12, v14
+; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v13, v13, v15
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v14, v12
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v15, v13
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v14, v4, v8
+; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v12, v5, v9
+; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v15, v6, v10
+; GFX10-CONTRACT-NEXT: v_fmac_f32_e32 v13, v7, v11
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v0, v14
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v1, v1, v12
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v2, v2, v15
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v3, v3, v13
+; GFX10-CONTRACT-NEXT: ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_v4f16_v4f32_add_fma_ext_mul_rhs:
+; GFX10-DENORM: ; %bb.0: ; %.entry
+; GFX10-DENORM-NEXT: v_pk_mul_f16 v12, v12, v14
+; GFX10-DENORM-NEXT: v_pk_mul_f16 v13, v13, v15
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v14, v12
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v12, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v15, v13
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v13, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT: v_fmac_f32_e32 v14, v4, v8
+; GFX10-DENORM-NEXT: v_fmac_f32_e32 v12, v5, v9
+; GFX10-DENORM-NEXT: v_fmac_f32_e32 v15, v6, v10
+; GFX10-DENORM-NEXT: v_fmac_f32_e32 v13, v7, v11
+; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v0, v14
+; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v1, v12
+; GFX10-DENORM-NEXT: v_add_f32_e32 v2, v2, v15
+; GFX10-DENORM-NEXT: v_add_f32_e32 v3, v3, v13
+; GFX10-DENORM-NEXT: ; return to shader part epilog
+.entry:
+ %a = fmul <4 x half> %u, %v
+ %b = fpext <4 x half> %a to <4 x float>
+ %c = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %y, <4 x float> %z, <4 x float> %b)
+ %d = fadd <4 x float> %x, %c
+ ret <4 x float> %d
+}
+
+; fold (fadd x, (fpext (fma y, z, (fmul u, v))) -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x))
+define amdgpu_vs <4 x float> @test_v4f16_v4f32_add_ext_fma_mul_rhs(<4 x float> %x, <4 x half> %y, <4 x half> %z, <4 x half> %u, <4 x half> %v) {
+; GFX9-DENORM-LABEL: test_v4f16_v4f32_add_ext_fma_mul_rhs:
+; GFX9-DENORM: ; %bb.0: ; %.entry
+; GFX9-DENORM-NEXT: v_pk_mul_f16 v8, v8, v10
+; GFX9-DENORM-NEXT: v_pk_mul_f16 v9, v9, v11
+; GFX9-DENORM-NEXT: v_pk_mul_f16 v4, v4, v6
+; GFX9-DENORM-NEXT: v_pk_mul_f16 v5, v5, v7
+; GFX9-DENORM-NEXT: v_pk_add_f16 v4, v4, v8
+; GFX9-DENORM-NEXT: v_pk_add_f16 v5, v5, v9
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v6, v4
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_e32 v7, v5
+; GFX9-DENORM-NEXT: v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX9-DENORM-NEXT: v_add_f32_e32 v1, v1, v4
+; GFX9-DENORM-NEXT: v_add_f32_e32 v2, v2, v7
+; GFX9-DENORM-NEXT: v_add_f32_e32 v3, v3, v5
+; GFX9-DENORM-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: test_v4f16_v4f32_add_ext_fma_mul_rhs:
+; GFX10: ; %bb.0: ; %.entry
+; GFX10-NEXT: v_pk_mul_f16 v8, v8, v10
+; GFX10-NEXT: v_pk_mul_f16 v9, v9, v11
+; GFX10-NEXT: v_pk_fma_f16 v4, v4, v6, v8
+; GFX10-NEXT: v_pk_fma_f16 v5, v5, v7, v9
+; GFX10-NEXT: v_cvt_f32_f16_e32 v6, v4
+; GFX10-NEXT: v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-NEXT: v_cvt_f32_f16_e32 v7, v5
+; GFX10-NEXT: v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX10-NEXT: v_add_f32_e32 v1, v1, v4
+; GFX10-NEXT: v_add_f32_e32 v2, v2, v7
+; GFX10-NEXT: v_add_f32_e32 v3, v3, v5
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX10-CONTRACT-LABEL: test_v4f16_v4f32_add_ext_fma_mul_rhs:
+; GFX10-CONTRACT: ; %bb.0: ; %.entry
+; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v8, v8, v10
+; GFX10-CONTRACT-NEXT: v_pk_mul_f16 v9, v9, v11
+; GFX10-CONTRACT-NEXT: v_pk_fma_f16 v4, v4, v6, v8
+; GFX10-CONTRACT-NEXT: v_pk_fma_f16 v5, v5, v7, v9
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v6, v4
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_e32 v7, v5
+; GFX10-CONTRACT-NEXT: v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v1, v1, v4
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v2, v2, v7
+; GFX10-CONTRACT-NEXT: v_add_f32_e32 v3, v3, v5
+; GFX10-CONTRACT-NEXT: ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_v4f16_v4f32_add_ext_fma_mul_rhs:
+; GFX10-DENORM: ; %bb.0: ; %.entry
+; GFX10-DENORM-NEXT: v_pk_mul_f16 v8, v8, v10
+; GFX10-DENORM-NEXT: v_pk_mul_f16 v4, v4, v6
+; GFX10-DENORM-NEXT: v_pk_mul_f16 v6, v9, v11
+; GFX10-DENORM-NEXT: v_pk_mul_f16 v5, v5, v7
+; GFX10-DENORM-NEXT: v_pk_add_f16 v4, v4, v8
+; GFX10-DENORM-NEXT: v_pk_add_f16 v5, v5, v6
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v6, v4
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_e32 v7, v5
+; GFX10-DENORM-NEXT: v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT: v_add_f32_e32 v0, v0, v6
+; GFX10-DENORM-NEXT: v_add_f32_e32 v1, v1, v4
+; GFX10-DENORM-NEXT: v_add_f32_e32 v2, v2, v7
+; GFX10-DENORM-NEXT: v_add_f32_e32 v3, v3, v5
+; GFX10-DENORM-NEXT: ; return to shader part epilog
+.entry:
+ %a = fmul <4 x half> %u, %v
+ %b = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> %y, <4 x half> %z, <4 x half> %a)
+ %c = fpext <4 x half> %b to <4 x float>
+ %d = fadd <4 x float> %x, %c
+ ret <4 x float> %d
+}
+
+declare float @llvm.fmuladd.f32(float, float, float) #0
+declare half @llvm.fmuladd.f16(half, half, half) #0
+declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>) #0
+declare <4 x half> @llvm.fmuladd.v4f16(<4 x half>, <4 x half>, <4 x half>) #0
+
+attributes #0 = { nounwind readnone }
- Previous message: [llvm] f732292 - [AMDGPU][GlobalISel] Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
- Next message: [llvm] a782169 - [AMDGPU][GlobalISel] Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
More information about the llvm-commits
mailing list