[llvm] [AMDGPU] Fix Xcnt handling between blocks (PR #165201)

via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 27 23:55:34 PDT 2025


https://github.com/easyonaadit updated https://github.com/llvm/llvm-project/pull/165201

>From 8bd1850185f9116cc039e3d2dfc6fc7c39e268a2 Mon Sep 17 00:00:00 2001
From: Aaditya <Aaditya.AlokDeshpande at amd.com>
Date: Tue, 28 Oct 2025 10:57:16 +0530
Subject: [PATCH] [AMDGPU] Fix Xcnt handling between blocks

The compiler needs to conservatively flush the
Xcnt Counter on entry to a block in case of
pending SMEM and VMEM events.
---
 llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp   | 42 +++++++++++++------
 .../AMDGPU/flat-load-saddr-to-vaddr.ll        |  5 +++
 llvm/test/CodeGen/AMDGPU/wait-xcnt.mir        |  2 +-
 3 files changed, 35 insertions(+), 14 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 6dcbced010a5a..11ff1bdb9e2a1 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -565,12 +565,12 @@ class SIInsertWaitcnts {
   bool isVmemAccess(const MachineInstr &MI) const;
   bool generateWaitcntInstBefore(MachineInstr &MI,
                                  WaitcntBrackets &ScoreBrackets,
-                                 MachineInstr *OldWaitcntInstr,
-                                 bool FlushVmCnt);
+                                 MachineInstr *OldWaitcntInstr, bool FlushVmCnt,
+                                 bool FlushXCnt);
   bool generateWaitcnt(AMDGPU::Waitcnt Wait,
                        MachineBasicBlock::instr_iterator It,
                        MachineBasicBlock &Block, WaitcntBrackets &ScoreBrackets,
-                       MachineInstr *OldWaitcntInstr);
+                       MachineInstr *OldWaitcntInstr, bool FlushXCnt);
   void updateEventWaitcntAfter(MachineInstr &Inst,
                                WaitcntBrackets *ScoreBrackets);
   bool isNextENDPGM(MachineBasicBlock::instr_iterator It,
@@ -1841,12 +1841,13 @@ static bool callWaitsOnFunctionReturn(const MachineInstr &MI) { return true; }
 ///  and if so what the value of each counter is.
 ///  The "score bracket" is bound by the lower bound and upper bound
 ///  scores (*_score_LB and *_score_ub respectively).
-///  If FlushVmCnt is true, that means that we want to generate a s_waitcnt to
-///  flush the vmcnt counter here.
+///  If FlushVmCnt/FlushXcnt is true, that means that we want to
+/// generate a s_waitcnt to flush the vmcnt/xcnt counter here.
 bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
                                                  WaitcntBrackets &ScoreBrackets,
                                                  MachineInstr *OldWaitcntInstr,
-                                                 bool FlushVmCnt) {
+                                                 bool FlushVmCnt,
+                                                 bool FlushXCnt) {
   setForceEmitWaitcnt();
 
   assert(!MI.isMetaInstruction());
@@ -2101,18 +2102,26 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
       Wait.BvhCnt = 0;
   }
 
+  // Conservatively flush the Xcnt Counter at the start of the block.
+  if (FlushXCnt) {
+    if (ScoreBrackets.hasPendingEvent(SMEM_GROUP) &&
+        ScoreBrackets.hasPendingEvent(VMEM_GROUP))
+      Wait.XCnt = 0;
+  }
+
   if (ForceEmitZeroLoadFlag && Wait.LoadCnt != ~0u)
     Wait.LoadCnt = 0;
 
   return generateWaitcnt(Wait, MI.getIterator(), *MI.getParent(), ScoreBrackets,
-                         OldWaitcntInstr);
+                         OldWaitcntInstr, FlushXCnt);
 }
 
 bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt Wait,
                                        MachineBasicBlock::instr_iterator It,
                                        MachineBasicBlock &Block,
                                        WaitcntBrackets &ScoreBrackets,
-                                       MachineInstr *OldWaitcntInstr) {
+                                       MachineInstr *OldWaitcntInstr,
+                                       bool FlushXCnt) {
   bool Modified = false;
 
   if (OldWaitcntInstr)
@@ -2141,7 +2150,9 @@ bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt Wait,
   }
 
   // XCnt may be already consumed by a load wait.
-  if (Wait.XCnt != ~0u) {
+  // If we need to flush the Xcnt counter, don't
+  // combine it with any other wait events.
+  if (Wait.XCnt != ~0u && !FlushXCnt) {
     if (Wait.KmCnt == 0 && !ScoreBrackets.hasPendingEvent(SMEM_GROUP))
       Wait.XCnt = ~0u;
 
@@ -2213,8 +2224,9 @@ bool SIInsertWaitcnts::insertForcedWaitAfter(MachineInstr &Inst,
   ScoreBrackets.simplifyWaitcnt(Wait);
 
   auto SuccessorIt = std::next(Inst.getIterator());
-  bool Result = generateWaitcnt(Wait, SuccessorIt, Block, ScoreBrackets,
-                                /*OldWaitcntInstr=*/nullptr);
+  bool Result =
+      generateWaitcnt(Wait, SuccessorIt, Block, ScoreBrackets,
+                      /*OldWaitcntInstr=*/nullptr, /*FlushXCnt=*/false);
 
   if (Result && NeedsEndPGMCheck && isNextENDPGM(SuccessorIt, &Block)) {
     BuildMI(Block, SuccessorIt, Inst.getDebugLoc(), TII->get(AMDGPU::S_NOP))
@@ -2454,6 +2466,7 @@ bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
 
   // Walk over the instructions.
   MachineInstr *OldWaitcntInstr = nullptr;
+  bool FirstInstInBlock = true;
 
   for (MachineBasicBlock::instr_iterator Iter = Block.instr_begin(),
                                          E = Block.instr_end();
@@ -2475,10 +2488,13 @@ bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
 
     bool FlushVmCnt = Block.getFirstTerminator() == Inst &&
                       isPreheaderToFlush(Block, ScoreBrackets);
+    bool FlushXCnt = FirstInstInBlock;
+    if (FirstInstInBlock)
+      FirstInstInBlock = false;
 
     // Generate an s_waitcnt instruction to be placed before Inst, if needed.
     Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr,
-                                          FlushVmCnt);
+                                          FlushVmCnt, FlushXCnt);
     OldWaitcntInstr = nullptr;
 
     // Restore vccz if it's not known to be correct already.
@@ -2567,7 +2583,7 @@ bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
 
   // Combine or remove any redundant waitcnts at the end of the block.
   Modified |= generateWaitcnt(Wait, Block.instr_end(), Block, ScoreBrackets,
-                              OldWaitcntInstr);
+                              OldWaitcntInstr, /*FlushXCnt=*/false);
 
   LLVM_DEBUG({
     dbgs() << "*** End Block: ";
diff --git a/llvm/test/CodeGen/AMDGPU/flat-load-saddr-to-vaddr.ll b/llvm/test/CodeGen/AMDGPU/flat-load-saddr-to-vaddr.ll
index e8efa859ce13f..564b2a8af338c 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-load-saddr-to-vaddr.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-load-saddr-to-vaddr.ll
@@ -10,6 +10,10 @@
 ; Check that we are changing SADDR form of a load to VADDR and do not have to use
 ; readfirstlane instructions to move address from VGPRs into SGPRs.
 
+; FIXME: Redundant xcnt in the loop header.
+; Pending xcnt events should check if they can be folded into soft waitcnts
+; before being propogated.
+
 define amdgpu_kernel void @test_move_load_address_to_vgpr(ptr addrspace(1) nocapture %arg1, ptr nocapture %arg2) {
 ; GCN-LABEL: test_move_load_address_to_vgpr:
 ; GCN:       ; %bb.0: ; %bb
@@ -24,6 +28,7 @@ define amdgpu_kernel void @test_move_load_address_to_vgpr(ptr addrspace(1) nocap
 ; GCN-NEXT:    v_add_nc_u64_e32 v[0:1], s[2:3], v[0:1]
 ; GCN-NEXT:  .LBB0_1: ; %bb3
 ; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT:    s_wait_xcnt 0x0
 ; GCN-NEXT:    s_wait_dscnt 0x0
 ; GCN-NEXT:    flat_load_b32 v3, v[0:1] scope:SCOPE_SYS
 ; GCN-NEXT:    s_wait_loadcnt 0x0
diff --git a/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir b/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir
index 1b8e126f19ae1..2a80de849aec7 100644
--- a/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir
+++ b/llvm/test/CodeGen/AMDGPU/wait-xcnt.mir
@@ -945,7 +945,6 @@ body: |
     $vgpr0 = V_MOV_B32_e32 0, implicit $exec
 ...
 
-# FIXME: Missing S_WAIT_XCNT before overwriting vgpr0.
 ---
 name: wait_kmcnt_with_outstanding_vmem_2
 tracksRegLiveness: true
@@ -970,6 +969,7 @@ body: |
   ; GCN-NEXT:   liveins: $sgpr2
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   S_WAIT_KMCNT 0
+  ; GCN-NEXT:   S_WAIT_XCNT 0
   ; GCN-NEXT:   $sgpr2 = S_MOV_B32 $sgpr2
   ; GCN-NEXT:   $vgpr0 = V_MOV_B32_e32 0, implicit $exec
   bb.0:



More information about the llvm-commits mailing list