[llvm-branch-commits] [llvm] [AMDGPU][DAGCombiner][GlobalISel] Extend allMulUsesCanBeContracted with FMA/FMAD pattern (PR #188117)
Adel Ejjeh via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Mon Mar 23 15:51:13 PDT 2026
https://github.com/adelejjeh updated https://github.com/llvm/llvm-project/pull/188117
>From a08c141fb1661589e984d7084abab064d1667adf Mon Sep 17 00:00:00 2001
From: Adel Ejjeh <adel.ejjeh at amd.com>
Date: Thu, 12 Mar 2026 11:11:20 -0500
Subject: [PATCH] [AMDGPU][DAGCombiner][GlobalISel] Extend
allMulUsesCanBeContracted with FMA/FMAD pattern
Add conservative FMA/FMAD recognition to allMulUsesCanBeContracted:
a multiply used by an existing FMA/FMAD is assumed to be contractable
(it's already being contracted elsewhere). This avoids unnecessary
contraction blocking for multiplies that feed into FMA chains.
Also adds FMA/FMAD to the FPEXT user set (fpext(fmul) --> fma is
recognized as contractable when isFPExtFoldable).
Guards all remaining FMA-chain reassociation fold sites in both
SDAG (visitFADDForFMACombine/visitFSUBForFMACombine, 8 sites) and
GISel (matchCombineFAddFpExtFMulToFMadOrFMAAggressive, 4 sites).
This re-enables contractions that were conservatively blocked in
earlier patches where the multiply had an FMA use that wasn't yet
recognized: dagcombine-fma-crash.ll and dagcombine-fma-fmad.ll
CHECK lines revert to upstream behavior.
Co-Authored-By: Claude Opus 4.6 <noreply at anthropic.com>
---
.../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 19 +-
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 23 ++-
.../CodeGen/AMDGPU/dagcombine-fma-crash.ll | 22 +-
.../CodeGen/AMDGPU/dagcombine-fma-fmad.ll | 189 +++++++++---------
4 files changed, 141 insertions(+), 112 deletions(-)
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 0941e6da0f40f..2b9b4b5828cc4 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -6320,8 +6320,9 @@ static bool hasMoreUses(const MachineInstr &MI0, const MachineInstr &MI1,
/// - fmul --> fadd/fsub: Direct contraction
/// - fmul --> fneg --> fsub: Contraction through fneg
/// - fmul --> fneg --> fpext --> fsub: FNEG then FPEXT folds if foldable
-/// - fmul --> fpext --> {fadd, fsub}: FPEXT folds if foldable
+/// - fmul --> fpext --> {fadd, fsub, fma}: FPEXT folds if foldable
/// - fmul --> fpext --> fneg --> fsub: FPEXT then FNEG to FSUB
+/// - fmul --> fma: Assume FMA can always be contracted (conservative)
bool CombinerHelper::allMulUsesCanBeContracted(
const MachineInstr &MI, unsigned PreferredFusedOpcode) const {
const auto &TLI = getTargetLowering();
@@ -6380,7 +6381,9 @@ bool CombinerHelper::allMulUsesCanBeContracted(
return false;
unsigned ExtUseOpcode = FPExtUseMI.getOpcode();
if (ExtUseOpcode == TargetOpcode::G_FADD ||
- ExtUseOpcode == TargetOpcode::G_FSUB) {
+ ExtUseOpcode == TargetOpcode::G_FSUB ||
+ ExtUseOpcode == TargetOpcode::G_FMA ||
+ ExtUseOpcode == TargetOpcode::G_FMAD) {
continue;
}
if (ExtUseOpcode == TargetOpcode::G_FNEG) {
@@ -6398,6 +6401,14 @@ bool CombinerHelper::allMulUsesCanBeContracted(
continue;
}
+ // FMA - assume a mul being used by an FMA will get contracted.
+ // There is a chance we may miss some corner cases where we will still have
+ // the mul left over, but this keeps the analysis simple and maintains
+ // existing behavior in the worst case.
+ if (Opcode == TargetOpcode::G_FMA || Opcode == TargetOpcode::G_FMAD) {
+ continue;
+ }
+
// Any other use type is not currently recognized as contractable.
return false;
}
@@ -6670,6 +6681,7 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
mi_match(LHS.MI->getOperand(3).getReg(), MRI,
m_GFPExt(m_MInstr(FMulMI))) &&
isContractableFMul(*FMulMI, AllowFusionGlobally) &&
+ allMulUsesCanBeContracted(*FMulMI, PreferredFusedOpcode) &&
TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
MRI.getType(FMulMI->getOperand(0).getReg()))) {
MatchInfo = [=](MachineIRBuilder &B) {
@@ -6690,6 +6702,7 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
FMAMI->getOpcode() == PreferredFusedOpcode) {
MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
+ allMulUsesCanBeContracted(*FMulMI, PreferredFusedOpcode) &&
TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
MRI.getType(FMAMI->getOperand(0).getReg()))) {
MatchInfo = [=](MachineIRBuilder &B) {
@@ -6711,6 +6724,7 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
mi_match(RHS.MI->getOperand(3).getReg(), MRI,
m_GFPExt(m_MInstr(FMulMI))) &&
isContractableFMul(*FMulMI, AllowFusionGlobally) &&
+ allMulUsesCanBeContracted(*FMulMI, PreferredFusedOpcode) &&
TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
MRI.getType(FMulMI->getOperand(0).getReg()))) {
MatchInfo = [=](MachineIRBuilder &B) {
@@ -6731,6 +6745,7 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
FMAMI->getOpcode() == PreferredFusedOpcode) {
MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg());
if (isContractableFMul(*FMulMI, AllowFusionGlobally) &&
+ allMulUsesCanBeContracted(*FMulMI, PreferredFusedOpcode) &&
TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType,
MRI.getType(FMAMI->getOperand(0).getReg()))) {
MatchInfo = [=](MachineIRBuilder &B) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 1b9acb3ab6b9d..dd92ea8f847d8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -17677,8 +17677,9 @@ static bool isFusedOp(const MatcherClass &Matcher, SDValue N) {
/// - fmul --> fadd/fsub: Direct contraction
/// - fmul --> fneg --> fsub: Contraction through fneg
/// - fmul --> fneg --> fpext --> fsub: FNEG then FPEXT folds if foldable
-/// - fmul --> fpext --> {fadd, fsub}: FPEXT folds if foldable
+/// - fmul --> fpext --> {fadd, fsub, fma}: FPEXT folds if foldable
/// - fmul --> fpext --> fneg --> fsub: FPEXT then FNEG to FSUB
+/// - fmul --> fma: Assume FMA can always be contracted (conservative)
static bool allMulUsesCanBeContracted(SDValue Mul,
const unsigned PreferredFusedOpcode,
const TargetLowering &TLI,
@@ -17727,8 +17728,9 @@ static bool allMulUsesCanBeContracted(SDValue Mul,
if (!TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, DstVT, SrcVT))
return false; // this FPEXT cannot be folded
- if (ExtUserOp == ISD::FADD || ExtUserOp == ISD::FSUB) {
- continue; // FPEXT --> {FADD, FSUB} is contractable
+ if (ExtUserOp == ISD::FADD || ExtUserOp == ISD::FSUB ||
+ ExtUserOp == ISD::FMA || ExtUserOp == ISD::FMAD) {
+ continue; // FPEXT --> {FADD, FSUB, FMA, FMAD} is contractable
}
if (ExtUserOp == ISD::FNEG) {
// FP_EXTEND --> FNEG --> FSUB
@@ -17744,6 +17746,13 @@ static bool allMulUsesCanBeContracted(SDValue Mul,
continue; // All FPEXT uses are contractable
}
+ // FMA - assume a mul being used by an FMA will get contracted.
+ // There is a chance we may miss some corner cases where we will still have
+ // the mul left over, but this keeps the analysis simple and maintains
+ // existing behavior in the worst case.
+ if (Opcode == ISD::FMA || Opcode == ISD::FMAD)
+ continue;
+
// Any other use type is not currently recognized as contractable.
return false;
}
@@ -17918,6 +17927,7 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
if (matcher.match(N02, ISD::FP_EXTEND)) {
SDValue N020 = N02.getOperand(0);
if (isContractableFMUL(N020) &&
+ allMulUsesCanBeContracted(N020, PreferredFusedOpcode, TLI, DAG) &&
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N020.getValueType())) {
return FoldFAddFMAFPExtFMul(N0.getOperand(0), N0.getOperand(1),
@@ -17947,6 +17957,7 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
if (isFusedOp(matcher, N00)) {
SDValue N002 = N00.getOperand(2);
if (isContractableFMUL(N002) &&
+ allMulUsesCanBeContracted(N002, PreferredFusedOpcode, TLI, DAG) &&
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N00.getValueType())) {
return FoldFAddFPExtFMAFMul(N00.getOperand(0), N00.getOperand(1),
@@ -17963,6 +17974,7 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
if (N12.getOpcode() == ISD::FP_EXTEND) {
SDValue N120 = N12.getOperand(0);
if (isContractableFMUL(N120) &&
+ allMulUsesCanBeContracted(N120, PreferredFusedOpcode, TLI, DAG) &&
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N120.getValueType())) {
return FoldFAddFMAFPExtFMul(N1.getOperand(0), N1.getOperand(1),
@@ -17982,6 +17994,7 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
if (isFusedOp(matcher, N10)) {
SDValue N102 = N10.getOperand(2);
if (isContractableFMUL(N102) &&
+ allMulUsesCanBeContracted(N102, PreferredFusedOpcode, TLI, DAG) &&
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N10.getValueType())) {
return FoldFAddFPExtFMAFMul(N10.getOperand(0), N10.getOperand(1),
@@ -18244,6 +18257,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
if (matcher.match(N02, ISD::FP_EXTEND)) {
SDValue N020 = N02.getOperand(0);
if (isContractableAndReassociableFMUL(N020) &&
+ allMulUsesCanBeContracted(N020, PreferredFusedOpcode, TLI, DAG) &&
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N020.getValueType())) {
return matcher.getNode(
@@ -18268,6 +18282,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
if (isFusedOp(matcher, N00)) {
SDValue N002 = N00.getOperand(2);
if (isContractableAndReassociableFMUL(N002) &&
+ allMulUsesCanBeContracted(N002, PreferredFusedOpcode, TLI, DAG) &&
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N00.getValueType())) {
return matcher.getNode(
@@ -18289,6 +18304,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
matcher.match(N1.getOperand(2), ISD::FP_EXTEND) && N1->hasOneUse()) {
SDValue N120 = N1.getOperand(2).getOperand(0);
if (isContractableAndReassociableFMUL(N120) &&
+ allMulUsesCanBeContracted(N120, PreferredFusedOpcode, TLI, DAG) &&
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
N120.getValueType())) {
SDValue N1200 = N120.getOperand(0);
@@ -18318,6 +18334,7 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
SDValue N101 = CvtSrc.getOperand(1);
SDValue N102 = CvtSrc.getOperand(2);
if (isContractableAndReassociableFMUL(N102) &&
+ allMulUsesCanBeContracted(N102, PreferredFusedOpcode, TLI, DAG) &&
TLI.isFPExtFoldable(DAG, PreferredFusedOpcode, VT,
CvtSrc.getValueType())) {
SDValue N1020 = N102.getOperand(0);
diff --git a/llvm/test/CodeGen/AMDGPU/dagcombine-fma-crash.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-fma-crash.ll
index 57070e763e79b..142494a803755 100644
--- a/llvm/test/CodeGen/AMDGPU/dagcombine-fma-crash.ll
+++ b/llvm/test/CodeGen/AMDGPU/dagcombine-fma-crash.ll
@@ -20,24 +20,22 @@ define void @main(float %arg) {
; CHECK-NEXT: bb.1.bb2:
; CHECK-NEXT: successors: %bb.2(0x80000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = contract reassoc nofpexcept V_MUL_F32_e64 0, [[S_MOV_B32_]], 0, [[S_MOV_B32_]], 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sgpr_32 = S_MOV_B32 1065353216
- ; CHECK-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = contract reassoc nofpexcept V_ADD_F32_e64 0, [[V_MUL_F32_e64_]], 0, [[S_MOV_B32_2]], 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = contract reassoc nofpexcept V_FMAC_F32_e64 0, [[COPY]], 0, [[COPY]], 0, [[V_ADD_F32_e64_]], 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: [[V_FMAC_F32_e64_1:%[0-9]+]]:vgpr_32 = contract reassoc nofpexcept V_FMAC_F32_e64 0, [[COPY]], 0, [[COPY]], 0, [[V_MUL_F32_e64_]], 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: [[V_ADD_F32_e64_1:%[0-9]+]]:vgpr_32 = contract reassoc nofpexcept V_ADD_F32_e64 0, killed [[V_FMAC_F32_e64_1]], 0, [[S_MOV_B32_2]], 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec
+ ; CHECK-NEXT: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = contract reassoc nofpexcept V_FMAC_F32_e64 0, [[S_MOV_B32_]], 0, [[S_MOV_B32_]], 0, [[V_MOV_B32_e32_]], 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[V_FMAC_F32_e64_1:%[0-9]+]]:vgpr_32 = contract reassoc nofpexcept V_FMAC_F32_e64 0, [[COPY]], 0, [[COPY]], 0, [[V_FMAC_F32_e64_]], 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = contract reassoc nofpexcept V_ADD_F32_e64 0, [[V_FMAC_F32_e64_1]], 0, [[V_MOV_B32_e32_]], 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2.bb11:
; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[DEF]], %bb.0, [[V_FMAC_F32_e64_]], %bb.1
- ; CHECK-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[DEF]], %bb.0, [[V_ADD_F32_e64_1]], %bb.1
- ; CHECK-NEXT: [[PHI2:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[S_MOV_B32_1]], %bb.0, [[S_MOV_B32_3]], %bb.1
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[DEF]], %bb.0, [[V_FMAC_F32_e64_1]], %bb.1
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[DEF]], %bb.0, [[V_ADD_F32_e64_]], %bb.1
+ ; CHECK-NEXT: [[PHI2:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[S_MOV_B32_1]], %bb.0, [[S_MOV_B32_2]], %bb.1
; CHECK-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, [[PHI2]], implicit $exec
- ; CHECK-NEXT: [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 1
+ ; CHECK-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 1
; CHECK-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[V_CNDMASK_B32_e64_]]
- ; CHECK-NEXT: S_CMP_LG_U32 killed [[COPY1]], killed [[S_MOV_B32_4]], implicit-def $scc
+ ; CHECK-NEXT: S_CMP_LG_U32 killed [[COPY1]], killed [[S_MOV_B32_3]], implicit-def $scc
; CHECK-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $scc
; CHECK-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 $exec_lo, killed [[COPY2]], implicit-def dead $scc
; CHECK-NEXT: $vcc_lo = COPY [[S_AND_B32_1]]
diff --git a/llvm/test/CodeGen/AMDGPU/dagcombine-fma-fmad.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-fma-fmad.ll
index e95f19a19d134..28a18ec3845e0 100644
--- a/llvm/test/CodeGen/AMDGPU/dagcombine-fma-fmad.ll
+++ b/llvm/test/CodeGen/AMDGPU/dagcombine-fma-fmad.ll
@@ -12,64 +12,64 @@ define amdgpu_ps float @_amdgpu_ps_main() #0 {
; GFX10-NEXT: s_and_b32 exec_lo, exec_lo, s0
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: s_clause 0x1
-; GFX10-NEXT: image_sample v2, v[0:1], s[0:7], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D
-; GFX10-NEXT: image_sample v3, v[0:1], s[0:7], s[0:3] dmask:0x4 dim:SQ_RSRC_IMG_2D
+; GFX10-NEXT: image_sample v2, v[0:1], s[0:7], s[0:3] dmask:0x4 dim:SQ_RSRC_IMG_2D
+; GFX10-NEXT: image_sample v3, v[0:1], s[0:7], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D
; GFX10-NEXT: v_mov_b32_e32 v4, 0
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: image_load_mip v4, v[2:4], s[0:7] dmask:0x4 dim:SQ_RSRC_IMG_2D unorm
-; GFX10-NEXT: s_clause 0x2
+; GFX10-NEXT: s_clause 0x3
; GFX10-NEXT: s_buffer_load_dword s24, s[0:3], 0x5c
-; GFX10-NEXT: s_buffer_load_dword s25, s[0:3], 0x7c
+; GFX10-NEXT: s_buffer_load_dword s28, s[0:3], 0x7c
+; GFX10-NEXT: s_buffer_load_dword s29, s[0:3], 0xc0
; GFX10-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0)
; GFX10-NEXT: s_nop 0
; GFX10-NEXT: s_buffer_load_dwordx4 s[0:3], s[0:3], 0x40
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_clause 0x2
-; GFX10-NEXT: s_buffer_load_dword s26, s[0:3], 0xc0
+; GFX10-NEXT: s_clause 0x1
; GFX10-NEXT: s_buffer_load_dwordx4 s[4:7], s[0:3], 0x50
-; GFX10-NEXT: s_buffer_load_dwordx4 s[8:11], s[0:3], 0x60
-; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_clause 0x3
-; GFX10-NEXT: s_buffer_load_dword s4, s[0:3], 0x2c
-; GFX10-NEXT: s_buffer_load_dwordx4 s[12:15], s[0:3], 0x70
-; GFX10-NEXT: s_buffer_load_dwordx4 s[16:19], s[0:3], 0x20
-; GFX10-NEXT: s_buffer_load_dwordx4 s[20:23], s[0:3], 0x0
-; GFX10-NEXT: v_max_f32_e64 v5, s0, s0 clamp
-; GFX10-NEXT: v_sub_f32_e64 v6, s24, s25
-; GFX10-NEXT: v_mul_f32_e32 v7, s2, v5
-; GFX10-NEXT: s_buffer_load_dwordx4 s[0:3], s[0:3], 0x10
-; GFX10-NEXT: v_fma_f32 v1, v1, v6, s25
+; GFX10-NEXT: s_nop 0
+; GFX10-NEXT: s_buffer_load_dword s0, s[0:3], 0x2c
+; GFX10-NEXT: v_sub_f32_e64 v5, s24, s28
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_clause 0x4
+; GFX10-NEXT: s_buffer_load_dwordx4 s[8:11], s[0:3], 0x60
+; GFX10-NEXT: s_buffer_load_dwordx4 s[12:15], s[0:3], 0x20
+; GFX10-NEXT: s_buffer_load_dwordx4 s[16:19], s[0:3], 0x0
+; GFX10-NEXT: s_buffer_load_dwordx4 s[20:23], s[0:3], 0x70
+; GFX10-NEXT: s_buffer_load_dwordx4 s[24:27], s[0:3], 0x10
+; GFX10-NEXT: v_fma_f32 v1, v1, v5, s28
+; GFX10-NEXT: v_max_f32_e64 v6, s0, s0 clamp
+; GFX10-NEXT: v_add_f32_e64 v5, s29, -1.0
+; GFX10-NEXT: v_sub_f32_e32 v8, s0, v1
+; GFX10-NEXT: v_fma_f32 v7, -s2, v6, s6
+; GFX10-NEXT: v_fma_f32 v5, v6, v5, 1.0
+; GFX10-NEXT: v_mad_f32 v10, s2, v6, v2
; GFX10-NEXT: s_mov_b32 s0, 0x3c23d70a
-; GFX10-NEXT: v_add_f32_e64 v6, s26, -1.0
-; GFX10-NEXT: v_sub_f32_e32 v8, s6, v7
-; GFX10-NEXT: v_mul_f32_e32 v0, s10, v0
-; GFX10-NEXT: v_sub_f32_e32 v9, s4, v1
-; GFX10-NEXT: v_mul_f32_e32 v2, s14, v2
-; GFX10-NEXT: v_fma_f32 v6, v5, v6, 1.0
-; GFX10-NEXT: v_fmac_f32_e32 v7, v8, v5
-; GFX10-NEXT: v_sub_f32_e32 v8, s18, v0
-; GFX10-NEXT: v_fmac_f32_e32 v1, v5, v9
-; GFX10-NEXT: v_mul_f32_e32 v9, s22, v3
-; GFX10-NEXT: v_mul_f32_e32 v2, v5, v2
-; GFX10-NEXT: v_add_f32_e32 v7, v3, v7
-; GFX10-NEXT: v_fmac_f32_e32 v0, v8, v5
-; GFX10-NEXT: v_sub_f32_e32 v1, v1, v6
-; GFX10-NEXT: v_mul_f32_e32 v8, v9, v5
-; GFX10-NEXT: v_sub_f32_e32 v0, v0, v2
-; GFX10-NEXT: v_fmac_f32_e32 v6, v1, v5
-; GFX10-NEXT: v_fma_f32 v1, v3, s2, -v8
-; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v5
-; GFX10-NEXT: v_fmaak_f32 v0, s0, v6, 0x3ca3d70a
-; GFX10-NEXT: v_fmac_f32_e32 v8, v1, v5
-; GFX10-NEXT: v_mul_f32_e32 v1, v3, v5
-; GFX10-NEXT: v_mul_f32_e32 v0, v2, v0
+; GFX10-NEXT: v_fmac_f32_e32 v1, v6, v8
+; GFX10-NEXT: v_fmac_f32_e32 v10, v7, v6
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: v_mul_f32_e32 v9, s10, v0
+; GFX10-NEXT: v_fma_f32 v0, -v0, s10, s14
+; GFX10-NEXT: v_mul_f32_e32 v8, s18, v2
+; GFX10-NEXT: v_mul_f32_e32 v3, s22, v3
+; GFX10-NEXT: v_fmac_f32_e32 v9, v0, v6
+; GFX10-NEXT: v_sub_f32_e32 v0, v1, v5
+; GFX10-NEXT: v_mul_f32_e32 v1, v8, v6
+; GFX10-NEXT: v_mul_f32_e32 v7, v6, v3
+; GFX10-NEXT: v_fma_f32 v3, -v6, v3, v9
+; GFX10-NEXT: v_fmac_f32_e32 v5, v0, v6
+; GFX10-NEXT: v_fma_f32 v0, v2, s26, -v1
+; GFX10-NEXT: v_fmac_f32_e32 v7, v3, v6
+; GFX10-NEXT: v_fmac_f32_e32 v1, v0, v6
+; GFX10-NEXT: v_mul_f32_e32 v0, v2, v6
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_add_f32_e32 v4, v4, v7
-; GFX10-NEXT: v_mul_f32_e32 v4, v4, v5
-; GFX10-NEXT: v_mul_f32_e32 v2, v4, v8
-; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1
-; GFX10-NEXT: v_max_f32_e32 v0, 0, v2
+; GFX10-NEXT: v_add_f32_e32 v4, v4, v10
+; GFX10-NEXT: v_mul_f32_e32 v3, v4, v6
+; GFX10-NEXT: v_fmaak_f32 v4, s0, v5, 0x3ca3d70a
+; GFX10-NEXT: v_mul_f32_e32 v1, v3, v1
+; GFX10-NEXT: v_mul_f32_e32 v2, v7, v4
+; GFX10-NEXT: v_fmac_f32_e32 v1, v2, v0
+; GFX10-NEXT: v_max_f32_e32 v0, 0, v1
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: _amdgpu_ps_main:
@@ -80,68 +80,67 @@ define amdgpu_ps float @_amdgpu_ps_main() #0 {
; GFX11-NEXT: s_and_b32 exec_lo, exec_lo, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: image_sample v2, v[0:1], s[0:7], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D
-; GFX11-NEXT: image_sample v3, v[0:1], s[0:7], s[0:3] dmask:0x4 dim:SQ_RSRC_IMG_2D
+; GFX11-NEXT: image_sample v2, v[0:1], s[0:7], s[0:3] dmask:0x4 dim:SQ_RSRC_IMG_2D
+; GFX11-NEXT: image_sample v3, v[0:1], s[0:7], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D
; GFX11-NEXT: v_mov_b32_e32 v4, 0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: image_load_mip v4, v[2:4], s[0:7] dmask:0x4 dim:SQ_RSRC_IMG_2D unorm
-; GFX11-NEXT: s_clause 0x2
+; GFX11-NEXT: s_clause 0x3
; GFX11-NEXT: s_buffer_load_b32 s24, s[0:3], 0x5c
-; GFX11-NEXT: s_buffer_load_b32 s25, s[0:3], 0x7c
+; GFX11-NEXT: s_buffer_load_b32 s28, s[0:3], 0x7c
+; GFX11-NEXT: s_buffer_load_b32 s29, s[0:3], 0xc0
; GFX11-NEXT: s_buffer_load_b128 s[0:3], s[0:3], 0x40
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_clause 0x2
-; GFX11-NEXT: s_buffer_load_b32 s26, s[0:3], 0xc0
+; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: s_buffer_load_b128 s[4:7], s[0:3], 0x50
-; GFX11-NEXT: s_buffer_load_b128 s[8:11], s[0:3], 0x60
+; GFX11-NEXT: s_buffer_load_b32 s0, s[0:3], 0x2c
+; GFX11-NEXT: v_sub_f32_e64 v5, s24, s28
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_clause 0x3
-; GFX11-NEXT: s_buffer_load_b32 s4, s[0:3], 0x2c
-; GFX11-NEXT: s_buffer_load_b128 s[12:15], s[0:3], 0x70
-; GFX11-NEXT: s_buffer_load_b128 s[16:19], s[0:3], 0x20
-; GFX11-NEXT: s_buffer_load_b128 s[20:23], s[0:3], 0x0
-; GFX11-NEXT: v_max_f32_e64 v5, s0, s0 clamp
-; GFX11-NEXT: v_sub_f32_e64 v6, s24, s25
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_mul_f32_e32 v7, s2, v5
-; GFX11-NEXT: s_buffer_load_b128 s[0:3], s[0:3], 0x10
-; GFX11-NEXT: v_fma_f32 v1, v1, v6, s25
-; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_buffer_load_b128 s[8:11], s[0:3], 0x60
+; GFX11-NEXT: s_buffer_load_b128 s[12:15], s[0:3], 0x20
+; GFX11-NEXT: s_buffer_load_b128 s[16:19], s[0:3], 0x0
+; GFX11-NEXT: s_buffer_load_b128 s[20:23], s[0:3], 0x70
+; GFX11-NEXT: v_fma_f32 v1, v1, v5, s28
+; GFX11-NEXT: v_max_f32_e64 v6, s0, s0 clamp
+; GFX11-NEXT: s_buffer_load_b128 s[24:27], s[0:3], 0x10
+; GFX11-NEXT: v_add_f32_e64 v5, s29, -1.0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_sub_f32_e32 v8, s0, v1
+; GFX11-NEXT: v_fma_f32 v7, -s2, v6, s6
+; GFX11-NEXT: v_fma_f32 v10, s2, v6, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_fma_f32 v5, v6, v5, 1.0
; GFX11-NEXT: s_mov_b32 s0, 0x3c23d70a
-; GFX11-NEXT: v_add_f32_e64 v6, s26, -1.0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_sub_f32 v8, s6, v7 :: v_dual_sub_f32 v9, s4, v1
-; GFX11-NEXT: v_mul_f32_e32 v0, s10, v0
-; GFX11-NEXT: v_mul_f32_e32 v2, s14, v2
-; GFX11-NEXT: v_fma_f32 v6, v5, v6, 1.0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_fmac_f32_e32 v7, v8, v5
-; GFX11-NEXT: v_dual_fmac_f32 v1, v5, v9 :: v_dual_sub_f32 v8, s18, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mul_f32 v9, s22, v3 :: v_dual_mul_f32 v2, v5, v2
-; GFX11-NEXT: v_add_f32_e32 v7, v3, v7
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mul_f32_e32 v9, s10, v0
+; GFX11-NEXT: v_fma_f32 v0, -v0, s10, s14
+; GFX11-NEXT: v_mul_f32_e32 v3, s22, v3
+; GFX11-NEXT: v_dual_fmac_f32 v1, v6, v8 :: v_dual_mul_f32 v8, s18, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_fmac_f32_e32 v9, v0, v6
+; GFX11-NEXT: v_dual_fmac_f32 v10, v7, v6 :: v_dual_mul_f32 v7, v6, v3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_sub_f32 v1, v1, v6 :: v_dual_fmac_f32 v0, v8, v5
-; GFX11-NEXT: v_mul_f32_e32 v8, v9, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_fmac_f32_e32 v6, v1, v5
-; GFX11-NEXT: v_sub_f32_e32 v0, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_fma_f32 v1, v3, s2, -v8
-; GFX11-NEXT: v_fmac_f32_e32 v2, v0, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_fmaak_f32 v0, s0, v6, 0x3ca3d70a
-; GFX11-NEXT: v_fmac_f32_e32 v8, v1, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mul_f32 v1, v3, v5 :: v_dual_mul_f32 v0, v2, v0
+; GFX11-NEXT: v_sub_f32_e32 v0, v1, v5
+; GFX11-NEXT: v_fma_f32 v3, -v6, v3, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_fmac_f32_e32 v7, v3, v6
+; GFX11-NEXT: v_fmac_f32_e32 v5, v0, v6
+; GFX11-NEXT: v_mul_f32_e32 v1, v8, v6
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_add_f32_e32 v4, v4, v7
-; GFX11-NEXT: v_mul_f32_e32 v4, v4, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mul_f32_e32 v2, v4, v8
-; GFX11-NEXT: v_fmac_f32_e32 v2, v0, v1
+; GFX11-NEXT: v_add_f32_e32 v4, v4, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_dual_mul_f32 v3, v4, v6 :: v_dual_fmaak_f32 v4, s0, v5, 0x3ca3d70a
+; GFX11-NEXT: v_fma_f32 v0, v2, s26, -v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_fmac_f32_e32 v1, v0, v6
+; GFX11-NEXT: v_mul_f32_e32 v0, v2, v6
+; GFX11-NEXT: v_mul_f32_e32 v2, v7, v4
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v1, v3, v1
+; GFX11-NEXT: v_fmac_f32_e32 v1, v2, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_max_f32_e32 v0, 0, v2
+; GFX11-NEXT: v_max_f32_e32 v0, 0, v1
; GFX11-NEXT: ; return to shader part epilog
.entry:
%0 = call <3 x float> @llvm.amdgcn.image.sample.2d.v3f32.f32(i32 7, float poison, float poison, <8 x i32> poison, <4 x i32> poison, i1 false, i32 0, i32 0)
More information about the llvm-branch-commits
mailing list