[llvm] 0dd570f - [AMDGPU][GlobalISel] Transform (fsub (fpext (fneg (fmul x, y))), z) -> (fneg (fma (fpext x), (fpext y), z))

Mirko Brkusanin via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 29 07:28:33 PST 2021


Author: Mirko Brkusanin
Date: 2021-11-29T16:27:22+01:00
New Revision: 0dd570ff56c53dd6d11305fb0b36edab69eb1484

URL: https://github.com/llvm/llvm-project/commit/0dd570ff56c53dd6d11305fb0b36edab69eb1484
DIFF: https://github.com/llvm/llvm-project/commit/0dd570ff56c53dd6d11305fb0b36edab69eb1484.diff

LOG: [AMDGPU][GlobalISel] Transform (fsub (fpext (fneg (fmul x, y))), z) -> (fneg (fma (fpext x), (fpext y), z))

Patch by: Mateja Marjanovic

Differential Revision: https://reviews.llvm.org/D98050

Added: 
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-sub-ext-neg-mul.ll

Modified: 
    llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
    llvm/include/llvm/Target/GlobalISel/Combine.td
    llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 05bf36e599d3a..f3fa652b01754 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -690,6 +690,13 @@ class CombinerHelper {
   bool matchCombineFSubFpExtFMulToFMadOrFMA(MachineInstr &MI,
                                             BuildFnTy &MatchInfo);
 
+  /// Transform (fsub (fpext (fneg (fmul x, y))), z)
+  ///           -> (fneg (fma (fpext x), (fpext y), z))
+  ///           (fsub (fpext (fneg (fmul x, y))), z)
+  ///           -> (fneg (fmad (fpext x), (fpext y), z))
+  bool matchCombineFSubFpExtFNegFMulToFMadOrFMA(MachineInstr &MI,
+                                                BuildFnTy &MatchInfo);
+
 private:
   /// Given a non-indexed load or store instruction \p MI, find an offset that
   /// can be usefully and legally folded into it as a post-indexing operation.

diff  --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 1b3b04f940499..1d189c6dea6d8 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -828,6 +828,15 @@ def combine_fsub_fpext_fmul_to_fmad_or_fma: GICombineRule<
                                                                ${info}); }]),
   (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
 
+// Transform (fsub (fneg (fpext (fmul x, y))), z) ->
+//           (fneg (fma (fpext x), (fpext y), z))
+def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule<
+  (defs root:$root, build_fn_matchinfo:$info),
+  (match (wip_match_opcode G_FSUB):$root,
+         [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA(
+                                            *${root}, ${info}); }]),
+  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
 // FIXME: These should use the custom predicate feature once it lands.
 def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
                                      undef_to_negative_one,
@@ -863,8 +872,8 @@ def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd,
 def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
   combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma,
   combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma,
-  combine_fsub_fneg_fmul_to_fmad_or_fma,
-  combine_fsub_fpext_fmul_to_fmad_or_fma]>;
+  combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma,
+  combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>;
 
 def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
     extract_vec_elt_combines, combines_for_extload,

diff  --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 460ea22594c18..b3dee82d1798e 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -5295,6 +5295,65 @@ bool CombinerHelper::matchCombineFSubFpExtFMulToFMadOrFMA(
   return false;
 }
 
+bool CombinerHelper::matchCombineFSubFpExtFNegFMulToFMadOrFMA(
+    MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+  assert(MI.getOpcode() == TargetOpcode::G_FSUB);
+
+  bool AllowFusionGlobally, HasFMAD, Aggressive;
+  if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive))
+    return false;
+
+  const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering();
+  LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
+  Register LHSReg = MI.getOperand(1).getReg();
+  Register RHSReg = MI.getOperand(2).getReg();
+
+  unsigned PreferredFusedOpcode =
+      HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA;
+
+  auto buildMatchInfo = [=](Register Dst, Register X, Register Y, Register Z,
+                            MachineIRBuilder &B) {
+    Register FpExtX = B.buildFPExt(DstTy, X).getReg(0);
+    Register FpExtY = B.buildFPExt(DstTy, Y).getReg(0);
+    B.buildInstr(PreferredFusedOpcode, {Dst}, {FpExtX, FpExtY, Z});
+  };
+
+  MachineInstr *FMulMI;
+  // fold (fsub (fpext (fneg (fmul x, y))), z) ->
+  //      (fneg (fma (fpext x), (fpext y), z))
+  // fold (fsub (fneg (fpext (fmul x, y))), z) ->
+  //      (fneg (fma (fpext x), (fpext y), z))
+  if ((mi_match(LHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) ||
+       mi_match(LHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) &&
+      isContractableFMul(*FMulMI, AllowFusionGlobally) &&
+      TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy,
+                          MRI.getType(FMulMI->getOperand(0).getReg()))) {
+    MatchInfo = [=, &MI](MachineIRBuilder &B) {
+      Register FMAReg = MRI.createGenericVirtualRegister(DstTy);
+      buildMatchInfo(FMAReg, FMulMI->getOperand(1).getReg(),
+                     FMulMI->getOperand(2).getReg(), RHSReg, B);
+      B.buildFNeg(MI.getOperand(0).getReg(), FMAReg);
+    };
+    return true;
+  }
+
+  // fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
+  // fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
+  if ((mi_match(RHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) ||
+       mi_match(RHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) &&
+      isContractableFMul(*FMulMI, AllowFusionGlobally) &&
+      TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy,
+                          MRI.getType(FMulMI->getOperand(0).getReg()))) {
+    MatchInfo = [=, &MI](MachineIRBuilder &B) {
+      buildMatchInfo(MI.getOperand(0).getReg(), FMulMI->getOperand(1).getReg(),
+                     FMulMI->getOperand(2).getReg(), LHSReg, B);
+    };
+    return true;
+  }
+
+  return false;
+}
+
 bool CombinerHelper::tryCombine(MachineInstr &MI) {
   if (tryCombineCopy(MI))
     return true;

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-sub-ext-neg-mul.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-sub-ext-neg-mul.ll
new file mode 100644
index 0000000000000..bb0a3b352eb37
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-sub-ext-neg-mul.ll
@@ -0,0 +1,265 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 --denormal-fp-math=preserve-sign < %s | FileCheck -check-prefix=GFX9-DENORM %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 --denormal-fp-math=preserve-sign < %s | FileCheck -check-prefix=GFX10-DENORM %s
+
+; fold (fsub (fpext (fneg (fmul, x, y))), z) -> (fneg (fma (fpext x), (fpext y), z))
+define amdgpu_vs float @test_f16_to_f32_sub_ext_neg_mul(half %x, half %y, float %z) {
+; GFX9-DENORM-LABEL: test_f16_to_f32_sub_ext_neg_mul:
+; GFX9-DENORM:       ; %bb.0: ; %entry
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e64 v1, -v1
+; GFX9-DENORM-NEXT:    v_mad_f32 v0, v0, v1, -v2
+; GFX9-DENORM-NEXT:    ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_f16_to_f32_sub_ext_neg_mul:
+; GFX10-DENORM:       ; %bb.0: ; %entry
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e64 v1, -v1
+; GFX10-DENORM-NEXT:    v_fma_f32 v0, v0, v1, -v2
+; GFX10-DENORM-NEXT:    ; return to shader part epilog
+entry:
+  %a = fmul fast half %x, %y
+  %b = fneg half %a
+  %c = fpext half %b to float
+  %d = fsub fast float %c, %z
+  ret float %d
+}
+
+; fold (fsub (fneg (fpext (fmul, x, y))), z) -> (fneg (fma (fpext x)), (fpext y), z)
+define amdgpu_vs float @test_f16_to_f32_sub_neg_ext_mul(half %x, half %y, float %z) {
+; GFX9-DENORM-LABEL: test_f16_to_f32_sub_neg_ext_mul:
+; GFX9-DENORM:       ; %bb.0: ; %entry
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e64 v1, -v1
+; GFX9-DENORM-NEXT:    v_mad_f32 v0, v0, v1, -v2
+; GFX9-DENORM-NEXT:    ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_f16_to_f32_sub_neg_ext_mul:
+; GFX10-DENORM:       ; %bb.0: ; %entry
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e64 v1, -v1
+; GFX10-DENORM-NEXT:    v_fma_f32 v0, v0, v1, -v2
+; GFX10-DENORM-NEXT:    ; return to shader part epilog
+entry:
+  %a = fmul fast half %x, %y
+  %b = fpext half %a to float
+  %c = fneg float %b
+  %d = fsub fast float %c, %z
+  ret float %d
+}
+
+
+; fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
+define amdgpu_vs float @test_f16_to_f32_sub_ext_neg_mul2(float %x, half %y, half %z) {
+; GFX9-DENORM-LABEL: test_f16_to_f32_sub_ext_neg_mul2:
+; GFX9-DENORM:       ; %bb.0: ; %entry
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e64 v2, -v2
+; GFX9-DENORM-NEXT:    v_mad_f32 v0, -v1, v2, v0
+; GFX9-DENORM-NEXT:    ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_f16_to_f32_sub_ext_neg_mul2:
+; GFX10-DENORM:       ; %bb.0: ; %entry
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e64 v2, -v2
+; GFX10-DENORM-NEXT:    v_fma_f32 v0, -v1, v2, v0
+; GFX10-DENORM-NEXT:    ; return to shader part epilog
+entry:
+  %a = fmul fast half %y, %z
+  %b = fneg half %a
+  %c = fpext half %b to float
+  %d = fsub fast float %x, %c
+  ret float %d
+}
+
+; fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
+define amdgpu_vs float @test_f16_to_f32_sub_neg_ext_mul2(float %x, half %y, half %z) {
+; GFX9-DENORM-LABEL: test_f16_to_f32_sub_neg_ext_mul2:
+; GFX9-DENORM:       ; %bb.0: ; %entry
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e64 v2, -v2
+; GFX9-DENORM-NEXT:    v_mad_f32 v0, -v1, v2, v0
+; GFX9-DENORM-NEXT:    ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_f16_to_f32_sub_neg_ext_mul2:
+; GFX10-DENORM:       ; %bb.0: ; %entry
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e64 v2, -v2
+; GFX10-DENORM-NEXT:    v_fma_f32 v0, -v1, v2, v0
+; GFX10-DENORM-NEXT:    ; return to shader part epilog
+entry:
+  %a = fmul fast half %y, %z
+  %b = fpext half %a to float
+  %c = fneg float %b
+  %d = fsub fast float %x, %c
+  ret float %d
+}
+
+; fold (fsub (fpext (fneg (fmul, x, y))), z) -> (fneg (fma (fpext x), (fpext y), z))
+define amdgpu_vs <4 x float> @test_v4f16_to_v4f32_sub_ext_neg_mul(<4 x half> %x, <4 x half> %y, <4 x float> %z) {
+; GFX9-DENORM-LABEL: test_v4f16_to_v4f32_sub_ext_neg_mul:
+; GFX9-DENORM:       ; %bb.0: ; %entry
+; GFX9-DENORM-NEXT:    v_pk_mul_f16 v0, v0, v2 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-DENORM-NEXT:    v_pk_mul_f16 v1, v1, v3 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e32 v2, v0
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e32 v8, v1
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_sdwa v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v0, v2, v4
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v1, v3, v5
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v2, v8, v6
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v3, v9, v7
+; GFX9-DENORM-NEXT:    ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_v4f16_to_v4f32_sub_ext_neg_mul:
+; GFX10-DENORM:       ; %bb.0: ; %entry
+; GFX10-DENORM-NEXT:    s_mov_b32 s0, 0x80008000
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v8, v0
+; GFX10-DENORM-NEXT:    v_xor_b32_e32 v2, s0, v2
+; GFX10-DENORM-NEXT:    v_xor_b32_e32 v3, s0, v3
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v10, v1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v11, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v0, v2
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v2, v3
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_fma_f32 v0, v8, v0, -v4
+; GFX10-DENORM-NEXT:    v_fma_f32 v1, v9, v1, -v5
+; GFX10-DENORM-NEXT:    v_fma_f32 v2, v10, v2, -v6
+; GFX10-DENORM-NEXT:    v_fma_f32 v3, v11, v3, -v7
+; GFX10-DENORM-NEXT:    ; return to shader part epilog
+entry:
+  %a = fmul fast <4 x half> %x, %y
+  %b = fneg <4 x half> %a
+  %c = fpext <4 x half> %b to <4 x float>
+  %d = fsub fast <4 x float> %c, %z
+  ret <4 x float> %d
+}
+
+; fold (fsub (fneg (fpext (fmul, x, y))), z) -> (fneg (fma (fpext x)), (fpext y), z)
+define amdgpu_vs <4 x float> @test_v4f16_to_v4f32_sub_neg_ext_mul(<4 x half> %x, <4 x half> %y, <4 x float> %z) {
+; GFX9-DENORM-LABEL: test_v4f16_to_v4f32_sub_neg_ext_mul:
+; GFX9-DENORM:       ; %bb.0: ; %entry
+; GFX9-DENORM-NEXT:    v_pk_mul_f16 v0, v0, v2 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-DENORM-NEXT:    v_pk_mul_f16 v1, v1, v3 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e32 v2, v0
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e32 v8, v1
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_sdwa v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v0, v2, v4
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v1, v3, v5
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v2, v8, v6
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v3, v9, v7
+; GFX9-DENORM-NEXT:    ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_v4f16_to_v4f32_sub_neg_ext_mul:
+; GFX10-DENORM:       ; %bb.0: ; %entry
+; GFX10-DENORM-NEXT:    s_mov_b32 s0, 0x80008000
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v8, v0
+; GFX10-DENORM-NEXT:    v_xor_b32_e32 v2, s0, v2
+; GFX10-DENORM-NEXT:    v_xor_b32_e32 v3, s0, v3
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v10, v1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v11, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v0, v2
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v2, v3
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_fma_f32 v0, v8, v0, -v4
+; GFX10-DENORM-NEXT:    v_fma_f32 v1, v9, v1, -v5
+; GFX10-DENORM-NEXT:    v_fma_f32 v2, v10, v2, -v6
+; GFX10-DENORM-NEXT:    v_fma_f32 v3, v11, v3, -v7
+; GFX10-DENORM-NEXT:    ; return to shader part epilog
+entry:
+  %a = fmul fast <4 x half> %x, %y
+  %b = fpext <4 x half> %a to <4 x float>
+  %c = fneg <4 x float> %b
+  %d = fsub fast <4 x float> %c, %z
+  ret <4 x float> %d
+}
+
+
+; fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
+define amdgpu_vs <4 x float> @test_v4f16_to_v4f32_sub_ext_neg_mul2(<4 x float> %x, <4 x half> %y, <4 x half> %z) {
+; GFX9-DENORM-LABEL: test_v4f16_to_v4f32_sub_ext_neg_mul2:
+; GFX9-DENORM:       ; %bb.0: ; %entry
+; GFX9-DENORM-NEXT:    v_pk_mul_f16 v4, v4, v6 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-DENORM-NEXT:    v_pk_mul_f16 v5, v5, v7 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e32 v6, v4
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e32 v7, v5
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v0, v0, v6
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v1, v1, v4
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v2, v2, v7
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v3, v3, v5
+; GFX9-DENORM-NEXT:    ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_v4f16_to_v4f32_sub_ext_neg_mul2:
+; GFX10-DENORM:       ; %bb.0: ; %entry
+; GFX10-DENORM-NEXT:    s_mov_b32 s0, 0x80008000
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v8, v4
+; GFX10-DENORM-NEXT:    v_xor_b32_e32 v6, s0, v6
+; GFX10-DENORM-NEXT:    v_xor_b32_e32 v7, s0, v7
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v9, v5
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v10, v6
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v11, v7
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_fma_f32 v0, -v8, v10, v0
+; GFX10-DENORM-NEXT:    v_fma_f32 v1, -v4, v6, v1
+; GFX10-DENORM-NEXT:    v_fma_f32 v2, -v9, v11, v2
+; GFX10-DENORM-NEXT:    v_fma_f32 v3, -v5, v7, v3
+; GFX10-DENORM-NEXT:    ; return to shader part epilog
+entry:
+  %a = fmul fast <4 x half> %y, %z
+  %b = fneg <4 x half> %a
+  %c = fpext <4 x half> %b to <4 x float>
+  %d = fsub fast <4 x float> %x, %c
+  ret <4 x float> %d
+}
+
+; fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
+define amdgpu_vs <4 x float> @test_v4f16_to_v4f32_sub_neg_ext_mul2(<4 x float> %x, <4 x half> %y, <4 x half> %z) {
+; GFX9-DENORM-LABEL: test_v4f16_to_v4f32_sub_neg_ext_mul2:
+; GFX9-DENORM:       ; %bb.0: ; %entry
+; GFX9-DENORM-NEXT:    v_pk_mul_f16 v4, v4, v6 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-DENORM-NEXT:    v_pk_mul_f16 v5, v5, v7 neg_lo:[0,1] neg_hi:[0,1]
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e32 v6, v4
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_e32 v7, v5
+; GFX9-DENORM-NEXT:    v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v0, v0, v6
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v1, v1, v4
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v2, v2, v7
+; GFX9-DENORM-NEXT:    v_sub_f32_e32 v3, v3, v5
+; GFX9-DENORM-NEXT:    ; return to shader part epilog
+;
+; GFX10-DENORM-LABEL: test_v4f16_to_v4f32_sub_neg_ext_mul2:
+; GFX10-DENORM:       ; %bb.0: ; %entry
+; GFX10-DENORM-NEXT:    s_mov_b32 s0, 0x80008000
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v8, v4
+; GFX10-DENORM-NEXT:    v_xor_b32_e32 v6, s0, v6
+; GFX10-DENORM-NEXT:    v_xor_b32_e32 v7, s0, v7
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v9, v5
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v5, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v10, v6
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_e32 v11, v7
+; GFX10-DENORM-NEXT:    v_cvt_f32_f16_sdwa v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX10-DENORM-NEXT:    v_fma_f32 v0, -v8, v10, v0
+; GFX10-DENORM-NEXT:    v_fma_f32 v1, -v4, v6, v1
+; GFX10-DENORM-NEXT:    v_fma_f32 v2, -v9, v11, v2
+; GFX10-DENORM-NEXT:    v_fma_f32 v3, -v5, v7, v3
+; GFX10-DENORM-NEXT:    ; return to shader part epilog
+entry:
+  %a = fmul fast <4 x half> %y, %z
+  %b = fpext <4 x half> %a to <4 x float>
+  %c = fneg <4 x float> %b
+  %d = fsub fast <4 x float> %x, %c
+  ret <4 x float> %d
+}


        


More information about the llvm-commits mailing list