[llvm] [AMDGPU] asyncmark support for ASYNC_CNT (PR #185813)

Sameer Sahasrabuddhe via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 25 03:27:10 PDT 2026


https://github.com/ssahasra updated https://github.com/llvm/llvm-project/pull/185813

>From e67a7d0bf6683aeea763b30deb47fe6ec697dd5b Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Mon, 9 Mar 2026 12:00:28 +0530
Subject: [PATCH 1/2] [AMDGPU] Introduce ASYNC_CNT on GFX1250

Async operations transfer data between global memory and LDS. Their progress is
tracked by the ASYNC_CNT counter on GFX1250 and later architectures. This change
introduces the representation of that counter in SIInsertWaitCnts. For now, the
programmer must manually insert s_wait_asyncnt instructions. Later changes will
add compiler assistance for generating the waits by including this counter in
the asyncmark instructions.

Assisted-by: Claude Sonnet 4.5
---
 llvm/lib/Target/AMDGPU/AMDGPU.td              |  6 ++++
 llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp   | 31 ++++++++++++++-----
 .../Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp    |  9 ++++++
 llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h | 19 ++++++++++--
 4 files changed, 54 insertions(+), 11 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index a0b6ff13e7d7a..4259bf4c1b0bf 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -972,6 +972,11 @@ defm Vscnt : AMDGPUSubtargetFeature<"vscnt",
   /*GenPredicate=*/0
 >;
 
+defm Asynccnt : AMDGPUSubtargetFeature<"asynccnt",
+  "Has separate asynccnt counter",
+  /*GenPredicate=*/0
+>;
+
 defm GetWaveIdInst : AMDGPUSubtargetFeature<"get-wave-id-inst",
   "Has s_get_waveid_in_workgroup instruction"
 >;
@@ -2032,6 +2037,7 @@ def FeatureISAVersion12_50_Common : FeatureSet<
    FeatureSupportsSRAMECC,
    FeatureMaxHardClauseLength63,
    FeatureWaitXcnt,
+   FeatureAsynccnt,
    FeatureAtomicFMinFMaxF64GlobalInsts,
    FeatureAtomicFMinFMaxF64FlatInsts,
    FeatureFlatBufferGlobalAtomicFaddF64Inst,
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index b07516c22cf29..a804ba35bade7 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -160,7 +160,8 @@ static constexpr VMEMID toVMEMID(MCRegUnit RU) {
   DECL(VGPR_XDL_WRITE)           /* write VGPR dest in XDL VALU */             \
   DECL(VGPR_LDS_READ)            /* read VGPR source in LDS */                 \
   DECL(VGPR_FLAT_READ)           /* read VGPR source in FLAT */                \
-  DECL(VGPR_VMEM_READ)           /* read VGPR source in other VMEM */
+  DECL(VGPR_VMEM_READ)           /* read VGPR source in other VMEM */          \
+  DECL(ASYNC_ACCESS)             /* access that uses ASYNC_CNT */
 
 // clang-format off
 #define AMDGPU_EVENT_ENUM(Name) Name,
@@ -217,7 +218,7 @@ enum VmemType {
 static const unsigned instrsForExtendedCounterTypes[NUM_EXTENDED_INST_CNTS] = {
     AMDGPU::S_WAIT_LOADCNT,  AMDGPU::S_WAIT_DSCNT,     AMDGPU::S_WAIT_EXPCNT,
     AMDGPU::S_WAIT_STORECNT, AMDGPU::S_WAIT_SAMPLECNT, AMDGPU::S_WAIT_BVHCNT,
-    AMDGPU::S_WAIT_KMCNT,    AMDGPU::S_WAIT_XCNT};
+    AMDGPU::S_WAIT_KMCNT,    AMDGPU::S_WAIT_XCNT,      AMDGPU::S_WAIT_ASYNCCNT};
 
 static bool updateVMCntOnly(const MachineInstr &Inst) {
   return (SIInstrInfo::isVMEM(Inst) && !SIInstrInfo::isFLAT(Inst)) ||
@@ -405,6 +406,8 @@ class WaitcntGenerator {
 
   // Returns a new waitcnt with all counters except VScnt set to 0. If
   // IncludeVSCnt is true, VScnt is set to 0, otherwise it is set to ~0u.
+  // AsyncCnt always defaults to ~0u (don't wait for it). It is only updated
+  // when a call to @llvm.amdgcn.wait.asyncmark() is processed.
   virtual AMDGPU::Waitcnt getAllZeroWaitcnt(bool IncludeVSCnt) const = 0;
 
   virtual ~WaitcntGenerator() = default;
@@ -459,6 +462,7 @@ class WaitcntGeneratorGFX12Plus final : public WaitcntGenerator {
           WaitEventSet({VMEM_BVH_READ_ACCESS}),
           WaitEventSet({SMEM_ACCESS, SQ_MESSAGE, SCC_WRITE}),
           WaitEventSet({VMEM_GROUP, SMEM_GROUP}),
+          WaitEventSet({ASYNC_ACCESS}),
           WaitEventSet({VGPR_CSMACC_WRITE, VGPR_DPMACC_WRITE, VGPR_TRANS_WRITE,
                         VGPR_XDL_WRITE}),
           WaitEventSet({VGPR_LDS_READ, VGPR_FLAT_READ, VGPR_VMEM_READ})};
@@ -1314,6 +1318,9 @@ void WaitcntBrackets::print(raw_ostream &OS) const {
     case X_CNT:
       OS << "    X_CNT(" << SR << "):";
       break;
+    case ASYNC_CNT:
+      OS << "    ASYNC_CNT(" << SR << "):";
+      break;
     case VA_VDST:
       OS << "    VA_VDST(" << SR << "): ";
       break;
@@ -1418,6 +1425,9 @@ void WaitcntBrackets::print(raw_ostream &OS) const {
       case X_CNT:
         OS << "  X_CNT: " << MarkedScore;
         break;
+      case ASYNC_CNT:
+        OS << "  ASYNC_CNT: " << MarkedScore;
+        break;
       default:
         OS << "  UNKNOWN: " << MarkedScore;
         break;
@@ -1442,6 +1452,7 @@ void WaitcntBrackets::simplifyWaitcnt(const AMDGPU::Waitcnt &CheckWait,
   simplifyXcnt(CheckWait, UpdateWait);
   simplifyWaitcnt(UpdateWait, VA_VDST);
   simplifyVmVsrc(CheckWait, UpdateWait);
+  simplifyWaitcnt(UpdateWait, ASYNC_CNT);
 }
 
 void WaitcntBrackets::simplifyWaitcnt(InstCounterType T,
@@ -1977,7 +1988,8 @@ AMDGPU::Waitcnt
 WaitcntGeneratorGFX12Plus::getAllZeroWaitcnt(bool IncludeVSCnt) const {
   unsigned ExpertVal = IsExpertMode ? 0 : ~0u;
   return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt ? 0 : ~0u, 0, 0, 0,
-                         ~0u /* XCNT */, ExpertVal, ExpertVal);
+                         ~0u /* XCNT */, ~0u /* ASYNC_CNT */, ExpertVal,
+                         ExpertVal);
 }
 
 /// Combine consecutive S_WAIT_*CNT instructions that precede \p It and
@@ -2917,6 +2929,10 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
         TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
       ScoreBrackets->setPendingGDS();
     }
+  } else if (SIInstrInfo::usesASYNC_CNT(Inst)) {
+    // Async instructions use flat encoding, so this needs to happen before the
+    // isFLAT() check below.
+    ScoreBrackets->updateByEvent(ASYNC_ACCESS, Inst);
   } else if (TII->isFLAT(Inst)) {
     if (Inst.mayLoadOrStore() && TII->mayAccessVMEMThroughFlat(Inst) &&
         TII->mayAccessLDSThroughFlat(Inst) && !SIInstrInfo::isLDSDMA(Inst))
@@ -2927,7 +2943,8 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
       // pointers so that both VM and LGKM counters are flushed.
       ScoreBrackets->setPendingFlat();
   } else if (Inst.isCall()) {
-    // Act as a wait on everything
+    // Act as a wait on everything, but AsyncCnt is never included in such
+    // blanket waits.
     ScoreBrackets->applyWaitcnt(WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/false));
     ScoreBrackets->setStateOnFunctionEntryOrReturn();
   } else if (TII->isVINTERP(Inst)) {
@@ -3265,12 +3282,9 @@ bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
     OldWaitcntInstr = nullptr;
 
     if (Inst.getOpcode() == AMDGPU::ASYNCMARK) {
-      // FIXME: Not supported on GFX12 yet. Will need a new feature when we do.
-      //
       // Asyncmarks record the current wait state and so should not allow
       // waitcnts that occur after them to be merged into waitcnts that occur
       // before.
-      assert(ST->getGeneration() < AMDGPUSubtarget::GFX12);
       ScoreBrackets.recordAsyncMark(Inst);
       continue;
     }
@@ -3677,7 +3691,8 @@ bool SIInsertWaitcnts::run(MachineFunction &MF) {
       BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAIT_LOADCNT_DSCNT))
           .addImm(0);
       for (auto CT : inst_counter_types(NUM_EXTENDED_INST_CNTS)) {
-        if (CT == LOAD_CNT || CT == DS_CNT || CT == STORE_CNT || CT == X_CNT)
+        if (CT == LOAD_CNT || CT == DS_CNT || CT == STORE_CNT || CT == X_CNT ||
+            CT == ASYNC_CNT)
           continue;
 
         if (!ST->hasImageInsts() &&
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 488c150dd5c28..b04e5264feddc 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -135,6 +135,10 @@ unsigned getXcntBitWidth(unsigned VersionMajor, unsigned VersionMinor) {
   return VersionMajor == 12 && VersionMinor == 5 ? 6 : 0;
 }
 
+unsigned getAsynccntBitWidth(unsigned VersionMajor, unsigned VersionMinor) {
+  return VersionMajor == 12 && VersionMinor == 5 ? 6 : 0;
+}
+
 /// \returns shift for Loadcnt/Storecnt in combined S_WAIT instructions.
 unsigned getLoadcntStorecntBitShift(unsigned VersionMajor) {
   return VersionMajor >= 12 ? 8 : 0;
@@ -1808,6 +1812,10 @@ unsigned getXcntBitMask(const IsaVersion &Version) {
   return (1 << getXcntBitWidth(Version.Major, Version.Minor)) - 1;
 }
 
+unsigned getAsynccntBitMask(const IsaVersion &Version) {
+  return (1 << getAsynccntBitWidth(Version.Major, Version.Minor)) - 1;
+}
+
 unsigned getStorecntBitMask(const IsaVersion &Version) {
   return (1 << getStorecntBitWidth(Version.Major)) - 1;
 }
@@ -1827,6 +1835,7 @@ HardwareLimits::HardwareLimits(const IsaVersion &IV) {
   BvhcntMax = getBvhcntBitMask(IV);
   KmcntMax = getKmcntBitMask(IV);
   XcntMax = getXcntBitMask(IV);
+  AsyncMax = getAsynccntBitMask(IV);
   VaVdstMax = DepCtr::getVaVdstBitMask();
   VmVsrcMax = DepCtr::getVmVsrcBitMask();
 }
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index b3d20777ccfcf..9cec56090172b 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -1096,6 +1096,7 @@ enum InstCounterType {
   BVH_CNT,                           // gfx12+ only.
   KM_CNT,                            // gfx12+ only.
   X_CNT,                             // gfx1250.
+  ASYNC_CNT,                         // gfx1250.
   NUM_EXTENDED_INST_CNTS,
   VA_VDST = NUM_EXTENDED_INST_CNTS, // gfx12+ expert mode only.
   VM_VSRC,                          // gfx12+ expert mode only.
@@ -1130,6 +1131,7 @@ class Waitcnt {
   unsigned BvhCnt = ~0u;    // gfx12+ only.
   unsigned KmCnt = ~0u;     // gfx12+ only.
   unsigned XCnt = ~0u;      // gfx1250.
+  unsigned AsyncCnt = ~0u;  // gfx1250.
   unsigned VaVdst = ~0u;    // gfx12+ expert scheduling mode only.
   unsigned VmVsrc = ~0u;    // gfx12+ expert scheduling mode only.
 
@@ -1152,6 +1154,8 @@ class Waitcnt {
       return KmCnt;
     case X_CNT:
       return XCnt;
+    case ASYNC_CNT:
+      return AsyncCnt;
     case VA_VDST:
       return VaVdst;
     case VM_VSRC:
@@ -1186,6 +1190,9 @@ class Waitcnt {
     case X_CNT:
       XCnt = Val;
       break;
+    case ASYNC_CNT:
+      AsyncCnt = Val;
+      break;
     case VA_VDST:
       VaVdst = Val;
       break;
@@ -1205,10 +1212,10 @@ class Waitcnt {
   // gfx12+ constructor.
   Waitcnt(unsigned LoadCnt, unsigned ExpCnt, unsigned DsCnt, unsigned StoreCnt,
           unsigned SampleCnt, unsigned BvhCnt, unsigned KmCnt, unsigned XCnt,
-          unsigned VaVdst, unsigned VmVsrc)
+          unsigned AsyncCnt, unsigned VaVdst, unsigned VmVsrc)
       : LoadCnt(LoadCnt), ExpCnt(ExpCnt), DsCnt(DsCnt), StoreCnt(StoreCnt),
         SampleCnt(SampleCnt), BvhCnt(BvhCnt), KmCnt(KmCnt), XCnt(XCnt),
-        VaVdst(VaVdst), VmVsrc(VmVsrc) {}
+        AsyncCnt(AsyncCnt), VaVdst(VaVdst), VmVsrc(VmVsrc) {}
 
   bool hasWait() const { return StoreCnt != ~0u || hasWaitExceptStoreCnt(); }
 
@@ -1230,7 +1237,8 @@ class Waitcnt {
         std::min(DsCnt, Other.DsCnt), std::min(StoreCnt, Other.StoreCnt),
         std::min(SampleCnt, Other.SampleCnt), std::min(BvhCnt, Other.BvhCnt),
         std::min(KmCnt, Other.KmCnt), std::min(XCnt, Other.XCnt),
-        std::min(VaVdst, Other.VaVdst), std::min(VmVsrc, Other.VmVsrc));
+        std::min(AsyncCnt, Other.AsyncCnt), std::min(VaVdst, Other.VaVdst),
+        std::min(VmVsrc, Other.VmVsrc));
   }
 
   friend raw_ostream &operator<<(raw_ostream &OS, const AMDGPU::Waitcnt &Wait);
@@ -1246,6 +1254,7 @@ struct HardwareLimits {
   unsigned BvhcntMax;    // gfx12+ only.
   unsigned KmcntMax;     // gfx12+ only.
   unsigned XcntMax;      // gfx1250.
+  unsigned AsyncMax;     // gfx1250.
   unsigned VaVdstMax;    // gfx12+ expert mode only.
   unsigned VmVsrcMax;    // gfx12+ expert mode only.
 
@@ -1349,6 +1358,10 @@ unsigned getSamplecntBitMask(const IsaVersion &Version);
 /// Returns 0 for versions that do not support BVHcnt
 unsigned getBvhcntBitMask(const IsaVersion &Version);
 
+/// \returns Asynccnt bit mask for given isa \p Version.
+/// Returns 0 for versions that do not support Asynccnt
+unsigned getAsynccntBitMask(const IsaVersion &Version);
+
 /// \returns Dscnt bit mask for given isa \p Version.
 /// Returns 0 for versions that do not support DScnt
 unsigned getDscntBitMask(const IsaVersion &Version);

>From 121f1a895d9fcaa1b463c132da20d7f134e1402c Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <sameer.sahasrabuddhe at amd.com>
Date: Mon, 9 Mar 2026 12:04:27 +0530
Subject: [PATCH 2/2] [AMDGPU] asyncmark support for ASYNC_CNT

The ASYNC_CNT is used to track the progress of asynchronous copies between
global and LDS memories. By including it in asyncmark, the compiler can now
assist the programmer in generating waits for ASYNC_CNT.

Assisted-By: Claude Sonnet 4.5
---
 llvm/lib/Target/AMDGPU/AMDGPU.td              |   3 +
 .../AMDGPU/AMDGPUInstructionSelector.cpp      |   3 +-
 llvm/lib/Target/AMDGPU/GCNSubtarget.h         |   2 +
 llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp   |  21 +-
 llvm/lib/Target/AMDGPU/SOPInstructions.td     |   2 +-
 llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h |   2 +-
 llvm/test/CodeGen/AMDGPU/asyncmark-err.ll     |  19 -
 .../CodeGen/AMDGPU/asyncmark-gfx12plus.ll     | 366 ++++++++++++++++++
 8 files changed, 388 insertions(+), 30 deletions(-)
 delete mode 100644 llvm/test/CodeGen/AMDGPU/asyncmark-err.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/asyncmark-gfx12plus.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 4259bf4c1b0bf..42a9c91a0fc3f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -1298,6 +1298,9 @@ defm VMemToLDSLoad : AMDGPUSubtargetFeature<"vmem-to-lds-load-insts",
   "w/lds bit set or global_load_lds. This does not include scratch_load_lds."
 >;
 
+// Manual predicate for hasAsyncMark() which combines HasVMemToLDSLoad and GFX1250Plus
+def HasAsyncMark : Predicate<"Subtarget->hasAsyncMark()">;
+
 defm LdsBarrierArriveAtomic : AMDGPUSubtargetFeature<"lds-barrier-arrive-atomic",
   "Has LDS barrier-arrive atomic instructions"
 >;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 81e224355411b..35f5e89f95986 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -2398,8 +2398,7 @@ bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
     return selectTensorLoadStore(I, IntrinsicID);
   case Intrinsic::amdgcn_asyncmark:
   case Intrinsic::amdgcn_wait_asyncmark:
-    // FIXME: Not supported on GFX12 yet. Will need a new feature when we do.
-    if (!Subtarget->hasVMemToLDSLoad())
+    if (!Subtarget->hasAsyncMark())
       return false;
     break;
   case Intrinsic::amdgcn_exp_compr:
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index b9c7bad4cef0d..b35c661b1b608 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -220,6 +220,8 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
 
   bool hasScalarSubwordLoads() const { return getGeneration() >= GFX12; }
 
+  bool hasAsyncMark() const { return hasVMemToLDSLoad() || HasAsynccnt; }
+
   TrapHandlerAbi getTrapHandlerAbi() const {
     return isAmdHsaOS() ? TrapHandlerAbi::AMDHSA : TrapHandlerAbi::NONE;
   }
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index a804ba35bade7..16f278ca0ee6a 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -649,6 +649,14 @@ class SIInsertWaitcnts {
     return SIInstrInfo::mayWriteLDSThroughDMA(MI) && isAsync(MI);
   }
 
+  bool shouldUpdateAsyncMark(const MachineInstr &MI, InstCounterType T) const {
+    if (!isAsyncLdsDmaWrite(MI))
+      return false;
+    if (SIInstrInfo::usesASYNC_CNT(MI))
+      return T == ASYNC_CNT;
+    return T == LOAD_CNT;
+  }
+
   bool isVmemAccess(const MachineInstr &MI) const;
   bool generateWaitcntInstBefore(MachineInstr &MI,
                                  WaitcntBrackets &ScoreBrackets,
@@ -1252,12 +1260,7 @@ void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) {
         setVMemScore(LDSDMA_BEGIN + Slot, T, CurrScore);
     }
 
-    // FIXME: Not supported on GFX12 yet. Newer async operations use other
-    // counters too, so will need a map from instruction or event types to
-    // counter types.
-    if (Context->isAsyncLdsDmaWrite(Inst) && T == LOAD_CNT) {
-      assert(!SIInstrInfo::usesASYNC_CNT(Inst) &&
-             "unexpected GFX1250 instruction");
+    if (Context->shouldUpdateAsyncMark(Inst, T)) {
       AsyncScore[T] = CurrScore;
     }
 
@@ -2104,7 +2107,11 @@ bool WaitcntGeneratorGFX12Plus::applyPreexistingWaitcnt(
       II.eraseFromParent();
       Modified = true;
     } else if (Opcode == AMDGPU::WAIT_ASYNCMARK) {
-      reportFatalUsageError("WAIT_ASYNCMARK is not ready for GFX12 yet");
+      // Update the Waitcnt, but don't erase the wait.asyncmark() itself. It
+      // shows up in the assembly as a comment with the original parameter N.
+      unsigned N = II.getOperand(0).getImm();
+      AMDGPU::Waitcnt OldWait = ScoreBrackets.determineAsyncWait(N);
+      Wait = Wait.combined(OldWait);
     } else {
       std::optional<InstCounterType> CT = counterTypeForInstr(Opcode);
       assert(CT.has_value());
diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index ce6e862104b4f..e3c1ba58197c8 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -1716,7 +1716,7 @@ def S_WAITCNT_lds_direct : SPseudoInstSI<(outs), (ins)> {
    let hasSideEffects = 0;
 }
 
-let SubtargetPredicate = HasVMemToLDSLoad in {
+let SubtargetPredicate = HasAsyncMark in {
 def ASYNCMARK : SPseudoInstSI<(outs), (ins),
   [(int_amdgcn_asyncmark)]> {
    let maybeAtomic = 0;
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index 9cec56090172b..fd6afe509239c 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -1207,7 +1207,7 @@ class Waitcnt {
   Waitcnt() = default;
   // Pre-gfx12 constructor.
   Waitcnt(unsigned VmCnt, unsigned ExpCnt, unsigned LgkmCnt, unsigned VsCnt)
-      : LoadCnt(VmCnt), ExpCnt(ExpCnt), DsCnt(LgkmCnt), StoreCnt(VsCnt) {}
+    : LoadCnt(VmCnt), ExpCnt(ExpCnt), DsCnt(LgkmCnt), StoreCnt(VsCnt) {}
 
   // gfx12+ constructor.
   Waitcnt(unsigned LoadCnt, unsigned ExpCnt, unsigned DsCnt, unsigned StoreCnt,
diff --git a/llvm/test/CodeGen/AMDGPU/asyncmark-err.ll b/llvm/test/CodeGen/AMDGPU/asyncmark-err.ll
deleted file mode 100644
index f929cb3e380b7..0000000000000
--- a/llvm/test/CodeGen/AMDGPU/asyncmark-err.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: split-file %s %t
-; RUN: not --crash llc -filetype=null -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 %t/mark.ll 2>&1 | FileCheck --ignore-case %s
-; RUN: not         llc -filetype=null -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 %t/mark.ll 2>&1 | FileCheck --ignore-case %s
-; RUN: not --crash llc -filetype=null -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 %t/wait.ll 2>&1 | FileCheck --ignore-case %s
-; RUN: not         llc -filetype=null -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 %t/wait.ll 2>&1 | FileCheck --ignore-case %s
-
-; CHECK: LLVM ERROR: Cannot select
-
-;--- mark.ll
-define void @async_err() {
-  call void @llvm.amdgcn.asyncmark()
-  ret void
-}
-
-;--- wait.ll
-define void @async_err() {
-  call void @llvm.amdgcn.wait.asyncmark(i16 0)
-  ret void
-}
diff --git a/llvm/test/CodeGen/AMDGPU/asyncmark-gfx12plus.ll b/llvm/test/CodeGen/AMDGPU/asyncmark-gfx12plus.ll
new file mode 100644
index 0000000000000..9e0acd51472c5
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/asyncmark-gfx12plus.ll
@@ -0,0 +1,366 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -march=amdgcn -mcpu=gfx1250  < %s | FileCheck %s -check-prefixes=GFX1250
+
+; Test async mark/wait with global_load_lds and global loads
+; This version uses wave barriers to enforce program order so that unrelated vmem
+; instructions do not get reordered before reaching this point.
+
+define void @interleaved_with_wave_barrier(ptr addrspace(1) %foo, ptr addrspace(3) %lds, ptr addrspace(1) %bar, ptr addrspace(1) %out) {
+; GFX1250-LABEL: interleaved_with_wave_barrier:
+; GFX1250:       ; %bb.0: ; %entry
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_mov_b32 v7, v6 :: v_dual_mov_b32 v9, v4
+; GFX1250-NEXT:    v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v6, v5
+; GFX1250-NEXT:    v_add_nc_u64_e32 v[4:5], 0x54, v[0:1]
+; GFX1250-NEXT:    v_add_nc_u32_e32 v3, 0x54, v2
+; GFX1250-NEXT:    global_load_b32 v10, v[8:9], off offset:44
+; GFX1250-NEXT:    global_load_b32 v11, v[0:1], off offset:4
+; GFX1250-NEXT:    ; wave barrier
+; GFX1250-NEXT:    global_load_async_to_lds_b32 v3, v[4:5], off offset:4 th:TH_LOAD_NT nv
+; GFX1250-NEXT:    v_add_nc_u64_e32 v[4:5], 0x58, v[8:9]
+; GFX1250-NEXT:    v_add_nc_u32_e32 v3, 0x58, v2
+; GFX1250-NEXT:    ; wave barrier
+; GFX1250-NEXT:    ; asyncmark
+; GFX1250-NEXT:    global_load_b32 v0, v[0:1], off offset:8
+; GFX1250-NEXT:    ; wave barrier
+; GFX1250-NEXT:    global_load_async_to_lds_b32 v3, v[4:5], off offset:4 th:TH_LOAD_LU nv
+; GFX1250-NEXT:    ; wave barrier
+; GFX1250-NEXT:    global_load_b32 v1, v[8:9], off offset:48
+; GFX1250-NEXT:    ; asyncmark
+; GFX1250-NEXT:    ; wait_asyncmark(1)
+; GFX1250-NEXT:    s_wait_asynccnt 0x1
+; GFX1250-NEXT:    ds_load_b32 v3, v2 offset:84
+; GFX1250-NEXT:    ; wait_asyncmark(0)
+; GFX1250-NEXT:    s_wait_asynccnt 0x0
+; GFX1250-NEXT:    ds_load_b32 v2, v2 offset:88
+; GFX1250-NEXT:    s_wait_loadcnt 0x2
+; GFX1250-NEXT:    v_add_nc_u32_e32 v4, v11, v10
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x101
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_add3_u32 v0, v4, v3, v0
+; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    v_add3_u32 v0, v0, v1, v2
+; GFX1250-NEXT:    global_store_b32 v[6:7], v0, off
+; GFX1250-NEXT:    s_set_pc_i64 s[30:31]
+entry:
+  ; First batch: global load, global load, async global-to-LDS
+  %bar_gep11 = getelementptr i32, ptr addrspace(1) %bar, i32 11
+  %bar_v11 = load i32, ptr addrspace(1) %bar_gep11
+  %foo_gep1 = getelementptr i32, ptr addrspace(1) %foo, i32 1
+  %foo_v1 = load i32, ptr addrspace(1) %foo_gep1
+  %lds_gep21 = getelementptr i32, ptr addrspace(3) %lds, i32 21
+  %bar_gep21 = getelementptr i32, ptr addrspace(1) %foo, i32 21
+  call void @llvm.amdgcn.wave.barrier()
+  call void @llvm.amdgcn.global.load.async.to.lds.b32(ptr addrspace(1) %bar_gep21, ptr addrspace(3) %lds_gep21, i32 4, i32 u0x21)
+  call void @llvm.amdgcn.wave.barrier()
+  call void @llvm.amdgcn.asyncmark()
+
+  ; Second batch: global load, async global-to-LDS, global load
+  %foo_gep2 = getelementptr i32, ptr addrspace(1) %foo, i32 2
+  %foo_v2 = load i32, ptr addrspace(1) %foo_gep2
+  %bar_gep22 = getelementptr i32, ptr addrspace(1) %bar, i32 22
+  %lds_gep22 = getelementptr i32, ptr addrspace(3) %lds, i32 22
+  call void @llvm.amdgcn.wave.barrier()
+  call void @llvm.amdgcn.global.load.async.to.lds.b32(ptr addrspace(1) %bar_gep22, ptr addrspace(3) %lds_gep22, i32 4, i32 u0x23)
+  call void @llvm.amdgcn.wave.barrier()
+  %bar_gep12 = getelementptr i32, ptr addrspace(1) %bar, i32 12
+  %bar_v12 = load i32, ptr addrspace(1) %bar_gep12
+  call void @llvm.amdgcn.asyncmark()
+
+  ; Wait for first async mark and read from LDS
+  ; This results in vmcnt(3) corresponding to the second batch.
+  call void @llvm.amdgcn.wait.asyncmark(i16 1)
+  %lds_val21 = load i32, ptr addrspace(3) %lds_gep21
+
+  ; Wait for the next lds dma
+  ; This results in vmcnt(1), corresponding to %bar_v12. Could have been combined with the lgkmcnt(1) for %lds_val21.
+  ; Notable that the asyncmark is sufficient to prevent the optimizer from coalescing the previous ds_read with the next one.
+  call void @llvm.amdgcn.wait.asyncmark(i16 0)
+  %lds_val22 = load i32, ptr addrspace(3) %lds_gep22
+  %sum1 = add i32 %foo_v1, %bar_v11
+  %sum2 = add i32 %sum1, %lds_val21
+  %sum3 = add i32 %sum2, %foo_v2
+  ; Finally a vmcnt(0) for %bar_v12, which was not included in the async mark that followed it.
+  %sum4 = add i32 %sum3, %bar_v12
+  %sum5 = add i32 %sum4, %lds_val22
+  store i32 %sum5, ptr addrspace(1) %out
+
+  ret void
+}
+
+; A perfect loop that is unlikely to exist in real life. It uses only async LDS
+; DMA operations, and result in vmcnt waits that exactly match the stream of
+; those outstanding operations.
+
+define amdgpu_kernel void @test_pipelined_loop(ptr addrspace(1) %foo, ptr addrspace(3) %lds, ptr addrspace(1) %bar, ptr addrspace(1) %out, i32 %n) {
+; GFX1250-LABEL: test_pipelined_loop:
+; GFX1250:       ; %bb.0: ; %prolog
+; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    s_load_b96 s[0:2], s[4:5], 0x24 nv
+; GFX1250-NEXT:    s_load_b32 s3, s[4:5], 0x44 nv
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX1250-NEXT:    s_add_co_i32 s6, s2, 4
+; GFX1250-NEXT:    s_mov_b32 s7, s2
+; GFX1250-NEXT:    v_mov_b32_e32 v2, s6
+; GFX1250-NEXT:    s_mov_b32 s6, 2
+; GFX1250-NEXT:    global_load_async_to_lds_b32 v1, v0, s[0:1] offset:4 nv
+; GFX1250-NEXT:    v_mov_b32_e32 v1, 4
+; GFX1250-NEXT:    ; asyncmark
+; GFX1250-NEXT:    global_load_async_to_lds_b32 v2, v1, s[0:1] offset:4 nv
+; GFX1250-NEXT:    v_mov_b32_e32 v1, 0
+; GFX1250-NEXT:    s_add_nc_u64 s[0:1], s[0:1], 8
+; GFX1250-NEXT:    ; asyncmark
+; GFX1250-NEXT:  .LBB1_1: ; %loop_body
+; GFX1250-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT:    s_add_co_i32 s8, s7, 8
+; GFX1250-NEXT:    s_add_co_i32 s6, s6, 1
+; GFX1250-NEXT:    v_mov_b32_e32 v2, s8
+; GFX1250-NEXT:    global_load_async_to_lds_b32 v2, v0, s[0:1] offset:4 nv
+; GFX1250-NEXT:    v_mov_b32_e32 v2, s7
+; GFX1250-NEXT:    ; asyncmark
+; GFX1250-NEXT:    ; wait_asyncmark(2)
+; GFX1250-NEXT:    s_wait_asynccnt 0x2
+; GFX1250-NEXT:    s_add_co_i32 s7, s7, 4
+; GFX1250-NEXT:    s_cmp_lt_i32 s6, s3
+; GFX1250-NEXT:    ds_load_b32 v2, v2
+; GFX1250-NEXT:    s_add_nc_u64 s[0:1], s[0:1], 4
+; GFX1250-NEXT:    s_wait_dscnt 0x0
+; GFX1250-NEXT:    v_add_nc_u32_e32 v1, v1, v2
+; GFX1250-NEXT:    s_cbranch_scc1 .LBB1_1
+; GFX1250-NEXT:  ; %bb.2: ; %epilog
+; GFX1250-NEXT:    s_lshl2_add_u32 s0, s3, s2
+; GFX1250-NEXT:    ; wait_asyncmark(1)
+; GFX1250-NEXT:    s_wait_asynccnt 0x1
+; GFX1250-NEXT:    s_add_co_i32 s0, s0, -8
+; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s0
+; GFX1250-NEXT:    s_load_b64 s[0:1], s[4:5], 0x34 nv
+; GFX1250-NEXT:    ds_load_b32 v0, v0
+; GFX1250-NEXT:    ; wait_asyncmark(0)
+; GFX1250-NEXT:    s_wait_dscnt 0x0
+; GFX1250-NEXT:    s_wait_asynccnt 0x0
+; GFX1250-NEXT:    v_add_nc_u32_e32 v0, v1, v0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    global_store_b32 v2, v0, s[0:1]
+; GFX1250-NEXT:    s_endpgm
+prolog:
+  ; Load first iteration
+  call void @llvm.amdgcn.global.load.async.to.lds.b32(ptr addrspace(1) %foo, ptr addrspace(3) %lds, i32 4, i32 u0x20)
+  call void @llvm.amdgcn.asyncmark()
+
+  ; Load second iteration
+  %lds_gep1 = getelementptr i32, ptr addrspace(3) %lds, i32 1
+  %foo_gep1 = getelementptr i32, ptr addrspace(1) %foo, i32 1
+  call void @llvm.amdgcn.global.load.async.to.lds.b32(ptr addrspace(1) %foo_gep1, ptr addrspace(3) %lds_gep1, i32 4, i32 u0x20)
+  call void @llvm.amdgcn.asyncmark()
+
+  br label %loop_body
+
+loop_body:
+  %i = phi i32 [ 2, %prolog ], [ %i.next, %loop_body ]
+  %sum = phi i32 [ 0, %prolog ], [ %sum_i, %loop_body ]
+
+  ; Load next iteration
+  %lds_gep_cur = getelementptr i32, ptr addrspace(3) %lds, i32 %i
+  %foo_gep_cur = getelementptr i32, ptr addrspace(1) %foo, i32 %i
+  call void @llvm.amdgcn.global.load.async.to.lds.b32(ptr addrspace(1) %foo_gep_cur, ptr addrspace(3) %lds_gep_cur, i32 4, i32 u0x20)
+  call void @llvm.amdgcn.asyncmark()
+
+  ; Wait for iteration i-2 and process
+  call void @llvm.amdgcn.wait.asyncmark(i16 2)
+  %lds_idx = sub i32 %i, 2
+  %lds_gep_read = getelementptr i32, ptr addrspace(3) %lds, i32 %lds_idx
+  %lds_val = load i32, ptr addrspace(3) %lds_gep_read
+
+  %sum_i = add i32 %sum, %lds_val
+
+  %i.next = add i32 %i, 1
+  %cmp = icmp slt i32 %i.next, %n
+  br i1 %cmp, label %loop_body, label %epilog
+
+epilog:
+  ; Process remaining iterations
+  call void @llvm.amdgcn.wait.asyncmark(i16 1)
+  %lds_n_2 = sub i32 %n, 2
+  %lds_gep_n_2 = getelementptr i32, ptr addrspace(3) %lds, i32 %lds_n_2
+  %lds_val_n_2 = load i32, ptr addrspace(3) %lds_gep_n_2
+  %sum_e2 = add i32 %sum_i, %lds_val_n_2
+  %out_gep_e1 = getelementptr i32, ptr addrspace(1) %out, i32 %lds_n_2
+
+  call void @llvm.amdgcn.wait.asyncmark(i16 0)
+  %lds_n_1 = sub i32 %n, 1
+  %lds_gep_n_1 = getelementptr i32, ptr addrspace(3) %lds, i32 %lds_n_1
+  %lds_val_n_1 = load i32, ptr addrspace(3) %lds_gep_n_1
+  %sum_e1 = add i32 %sum_e2, %lds_val_n_1
+  store i32 %sum_e2, ptr addrspace(1) %bar
+
+  ret void
+}
+
+; Software pipelined loop with async global-to-LDS and global loads
+
+define amdgpu_kernel void @test_pipelined_loop_with_global(ptr addrspace(1) %foo, ptr addrspace(3) %lds, ptr addrspace(1) %bar, ptr addrspace(1) %out, i32 %n) {
+; GFX1250-LABEL: test_pipelined_loop_with_global:
+; GFX1250:       ; %bb.0: ; %prolog
+; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    s_load_b96 s[8:10], s[4:5], 0x24 nv
+; GFX1250-NEXT:    s_load_b128 s[0:3], s[4:5], 0x34 nv
+; GFX1250-NEXT:    v_mov_b32_e32 v0, 0
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    s_load_b32 s6, s[8:9], 0x0
+; GFX1250-NEXT:    s_load_b32 s7, s[0:1], 0x0
+; GFX1250-NEXT:    v_mov_b32_e32 v1, s10
+; GFX1250-NEXT:    s_add_co_i32 s11, s10, 4
+; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT:    v_dual_mov_b32 v3, 4 :: v_dual_mov_b32 v4, s11
+; GFX1250-NEXT:    s_load_b32 s11, s[4:5], 0x44 nv
+; GFX1250-NEXT:    global_load_async_to_lds_b32 v1, v0, s[8:9] offset:4 nv
+; GFX1250-NEXT:    ; asyncmark
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    global_load_b32 v1, v0, s[8:9] offset:4
+; GFX1250-NEXT:    global_load_b32 v2, v0, s[0:1] offset:4
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_add_nc_u64 s[0:1], s[0:1], 8
+; GFX1250-NEXT:    s_add_nc_u64 s[4:5], s[8:9], 8
+; GFX1250-NEXT:    s_wait_kmcnt 0x0
+; GFX1250-NEXT:    v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s7
+; GFX1250-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GFX1250-NEXT:    global_load_async_to_lds_b32 v4, v3, s[8:9] offset:4 nv
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v4, v2
+; GFX1250-NEXT:    s_mov_b32 s8, 2
+; GFX1250-NEXT:    s_mov_b32 s9, s10
+; GFX1250-NEXT:    ; asyncmark
+; GFX1250-NEXT:  .LBB2_1: ; %loop_body
+; GFX1250-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT:    s_add_co_i32 s12, s9, 8
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    v_dual_mov_b32 v7, v4 :: v_dual_mov_b32 v9, s12
+; GFX1250-NEXT:    v_mov_b32_e32 v8, v3
+; GFX1250-NEXT:    s_clause 0x1
+; GFX1250-NEXT:    global_load_b32 v3, v0, s[4:5]
+; GFX1250-NEXT:    global_load_b32 v4, v0, s[0:1]
+; GFX1250-NEXT:    v_dual_add_nc_u32 v10, v5, v6 :: v_dual_mov_b32 v6, v2
+; GFX1250-NEXT:    global_load_async_to_lds_b32 v9, v0, s[4:5] offset:4 nv
+; GFX1250-NEXT:    v_mov_b32_e32 v9, s9
+; GFX1250-NEXT:    ; asyncmark
+; GFX1250-NEXT:    ; wait_asyncmark(2)
+; GFX1250-NEXT:    s_wait_asynccnt 0x2
+; GFX1250-NEXT:    s_wait_asynccnt 0x2
+; GFX1250-NEXT:    s_add_co_i32 s8, s8, 1
+; GFX1250-NEXT:    s_add_co_i32 s9, s9, 4
+; GFX1250-NEXT:    ds_load_b32 v9, v9
+; GFX1250-NEXT:    v_mov_b32_e32 v5, v1
+; GFX1250-NEXT:    s_cmp_lt_i32 s8, s11
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_add_nc_u64 s[0:1], s[0:1], 4
+; GFX1250-NEXT:    s_add_nc_u64 s[4:5], s[4:5], 4
+; GFX1250-NEXT:    s_wait_dscnt 0x0
+; GFX1250-NEXT:    v_add_nc_u32_e32 v9, v10, v9
+; GFX1250-NEXT:    global_store_b32 v0, v9, s[6:7]
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_add_nc_u64 s[6:7], s[6:7], 4
+; GFX1250-NEXT:    s_cbranch_scc1 .LBB2_1
+; GFX1250-NEXT:  ; %bb.2: ; %epilog
+; GFX1250-NEXT:    s_add_co_i32 s0, s11, -2
+; GFX1250-NEXT:    ; wait_asyncmark(1)
+; GFX1250-NEXT:    s_wait_asynccnt 0x1
+; GFX1250-NEXT:    s_lshl2_add_u32 s1, s0, s10
+; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT:    v_dual_add_nc_u32 v2, v8, v7 :: v_dual_mov_b32 v0, s1
+; GFX1250-NEXT:    ds_load_b32 v1, v0
+; GFX1250-NEXT:    s_wait_dscnt 0x0
+; GFX1250-NEXT:    v_dual_mov_b32 v5, s0 :: v_dual_add_nc_u32 v1, v2, v1
+; GFX1250-NEXT:    global_store_b32 v5, v1, s[2:3] scale_offset
+; GFX1250-NEXT:    ; wait_asyncmark(0)
+; GFX1250-NEXT:    s_wait_asynccnt 0x0
+; GFX1250-NEXT:    ds_load_b32 v0, v0 offset:4
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    v_add_nc_u32_e32 v1, v3, v4
+; GFX1250-NEXT:    s_wait_dscnt 0x0
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_add_nc_u32_e32 v0, v1, v0
+; GFX1250-NEXT:    global_store_b32 v5, v0, s[2:3] offset:4 scale_offset
+; GFX1250-NEXT:    s_endpgm
+prolog:
+  ; Load first iteration
+  %v0 = load i32, ptr addrspace(1) %foo
+  %g0 = load i32, ptr addrspace(1) %bar
+  call void @llvm.amdgcn.global.load.async.to.lds.b32(ptr addrspace(1) %foo, ptr addrspace(3) %lds, i32 4, i32 u0x20)
+  call void @llvm.amdgcn.asyncmark()
+
+  ; Load second iteration
+  %foo_gep1 = getelementptr i32, ptr addrspace(1) %foo, i32 1
+  %v1 = load i32, ptr addrspace(1) %foo_gep1
+  %bar_gep1 = getelementptr i32, ptr addrspace(1) %bar, i32 1
+  %g1 = load i32, ptr addrspace(1) %bar_gep1
+
+  %lds_gep1 = getelementptr i32, ptr addrspace(3) %lds, i32 1
+  call void @llvm.amdgcn.global.load.async.to.lds.b32(ptr addrspace(1) %foo_gep1, ptr addrspace(3) %lds_gep1, i32 4, i32 u0x20)
+  call void @llvm.amdgcn.asyncmark()
+
+  br label %loop_body
+
+  ; The vmcnt at the end of the prolog and at the start of the loop header seems
+  ; to be stricter than necessary because of the ordinary global operations. We
+  ; could, in principle, further relax the wait by introducing asyn globals (non
+  ; LDS DMA) in a similar way.
+
+loop_body:
+  %i = phi i32 [ 2, %prolog ], [ %i.next, %loop_body ]
+  %prev_v = phi i32 [ %v0, %prolog ], [ %v1, %loop_body ]
+  %prev_g = phi i32 [ %g0, %prolog ], [ %g1, %loop_body ]
+  %v1_phi = phi i32 [ %v1, %prolog ], [ %cur_v, %loop_body ]
+  %g1_phi = phi i32 [ %g1, %prolog ], [ %cur_g, %loop_body ]
+
+  ; Load next iteration
+  %foo_gep_cur = getelementptr i32, ptr addrspace(1) %foo, i32 %i
+  %cur_v = load i32, ptr addrspace(1) %foo_gep_cur
+  %bar_gep_cur = getelementptr i32, ptr addrspace(1) %bar, i32 %i
+  %cur_g = load i32, ptr addrspace(1) %bar_gep_cur
+  %lds_gep_cur = getelementptr i32, ptr addrspace(3) %lds, i32 %i
+  call void @llvm.amdgcn.global.load.async.to.lds.b32(ptr addrspace(1) %foo_gep_cur, ptr addrspace(3) %lds_gep_cur, i32 4, i32 u0x20)
+  call void @llvm.amdgcn.asyncmark()
+
+  ; Wait for iteration i-2 and process
+  call void @llvm.amdgcn.wait.asyncmark(i16 2)
+  %lds_idx = sub i32 %i, 2
+  %lds_gep_read = getelementptr i32, ptr addrspace(3) %lds, i32 %lds_idx
+  %lds_val = load i32, ptr addrspace(3) %lds_gep_read
+
+  %sum1 = add i32 %prev_v, %prev_g
+  %sum2 = add i32 %sum1, %lds_val
+  %out_gep = getelementptr i32, ptr addrspace(1) %out, i32 %lds_idx
+  store i32 %sum2, ptr addrspace(1) %out_gep
+
+  %i.next = add i32 %i, 1
+  %cmp = icmp slt i32 %i.next, %n
+  br i1 %cmp, label %loop_body, label %epilog
+
+epilog:
+  ; Process remaining iterations
+  call void @llvm.amdgcn.wait.asyncmark(i16 1)
+  %lds_n_2 = sub i32 %n, 2
+  %lds_gep_n_2 = getelementptr i32, ptr addrspace(3) %lds, i32 %lds_n_2
+  %lds_val_n_2 = load i32, ptr addrspace(3) %lds_gep_n_2
+  %sum_e1 = add i32 %v1_phi, %g1_phi
+  %sum_e2 = add i32 %sum_e1, %lds_val_n_2
+  %out_gep_e1 = getelementptr i32, ptr addrspace(1) %out, i32 %lds_n_2
+  store i32 %sum_e2, ptr addrspace(1) %out_gep_e1
+
+  call void @llvm.amdgcn.wait.asyncmark(i16 0)
+  %lds_n_1 = sub i32 %n, 1
+  %lds_gep_n_1 = getelementptr i32, ptr addrspace(3) %lds, i32 %lds_n_1
+  %lds_val_n_1 = load i32, ptr addrspace(3) %lds_gep_n_1
+  %sum_e3 = add i32 %cur_v, %cur_g
+  %sum_e4 = add i32 %sum_e3, %lds_val_n_1
+  %out_gep_e2 = getelementptr i32, ptr addrspace(1) %out, i32 %lds_n_1
+  store i32 %sum_e4, ptr addrspace(1) %out_gep_e2
+
+  ret void
+}



More information about the llvm-commits mailing list