[llvm-branch-commits] [llvm] [AMDGPU] Add block carried latency to CoExecSched (PR #187413)

Jeffrey Byrnes via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Mar 24 15:56:21 PDT 2026


https://github.com/jrbyrnes updated https://github.com/llvm/llvm-project/pull/187413

>From ea4e247f005c2b705f2709bb2a75fb804c5d9533 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Wed, 18 Mar 2026 12:16:52 -0700
Subject: [PATCH 1/3] [AMDGPU] Add block carried latency to CoExecSched

Change-Id: Ib04e40e57d38e127d6c5452d1719e32dacef2ade
---
 .../AMDGPU/AMDGPUCoExecSchedStrategy.cpp      | 212 ++++-
 .../Target/AMDGPU/AMDGPUCoExecSchedStrategy.h |  27 +-
 llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp   |  37 -
 llvm/lib/Target/AMDGPU/GCNSchedStrategy.h     |   4 -
 llvm/test/CodeGen/AMDGPU/coexec-scheduler.ll  | 884 +++++++++++++++++-
 5 files changed, 1069 insertions(+), 95 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.cpp
index cc5ef20847beb..db19581779211 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.cpp
@@ -176,8 +176,12 @@ CandidateHeuristics::getHWUIFromFlavor(InstructionFlavor Flavor) {
   return nullptr;
 }
 
-unsigned CandidateHeuristics::getHWUICyclesForInst(SUnit *SU) {
+unsigned CandidateHeuristics::getHWUICyclesForSU(SUnit *SU) {
   assert(SchedModel && SchedModel->hasInstrSchedModel());
+  MachineInstr *MI = SU->getInstr();
+  // Loads and stores are not pipelined
+  if (MI->mayLoadOrStore())
+    return SchedModel->computeInstrLatency(MI, false);
   unsigned ReleaseAtCycle = 0;
   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
   for (TargetSchedModel::ProcResIter PI = SchedModel->getWriteProcResBegin(SC),
@@ -192,7 +196,23 @@ void CandidateHeuristics::updateForScheduling(SUnit *SU) {
   HardwareUnitInfo *HWUI =
       getHWUIFromFlavor(classifyFlavor(*SU->getInstr(), *SII));
   assert(HWUI);
-  HWUI->markScheduled(SU, getHWUICyclesForInst(SU));
+  HWUI->markScheduled(SU, getHWUICyclesForSU(SU));
+}
+
+unsigned CandidateHeuristics::getHWUICyclesForMI(MachineInstr *MI) {
+  assert(SchedModel && SchedModel->hasInstrSchedModel());
+  // Loads and stores are not pipelined
+  if (MI->mayLoadOrStore())
+    return SchedModel->computeInstrLatency(MI, false);
+
+  unsigned ReleaseAtCycle = 0;
+  const MCSchedClassDesc *SC = SchedModel->resolveSchedClass(MI);
+  for (TargetSchedModel::ProcResIter PI = SchedModel->getWriteProcResBegin(SC),
+                                     PE = SchedModel->getWriteProcResEnd(SC);
+       PI != PE; ++PI) {
+    ReleaseAtCycle = std::max(ReleaseAtCycle, (unsigned)PI->ReleaseAtCycle);
+  }
+  return ReleaseAtCycle;
 }
 
 void CandidateHeuristics::initialize(ScheduleDAGMI *SchedDAG,
@@ -216,16 +236,77 @@ void CandidateHeuristics::initialize(ScheduleDAGMI *SchedDAG,
   HWUInfo[(int)InstructionFlavor::MultiCycleVALU].setProducesCoexecWindow(true);
   HWUInfo[(int)InstructionFlavor::TRANS].setProducesCoexecWindow(true);
 
-  collectHWUIPressure();
+  collectRegionSummary();
+}
+
+unsigned CandidateHeuristics::getCarriedLatency(SUnit *SU) {
+  MachineInstr *MI = SU->getInstr();
+  unsigned CarriedLatency = 0;
+  for (auto &Op : MI->operands()) {
+    if (!Op.isReg())
+      continue;
+    if (!Op.isUse())
+      continue;
+    auto Reg = Op.getReg();
+    if (!Reg.isVirtual())
+      continue;
+
+    for (auto &Def : DAG->MRI.def_instructions(Reg)) {
+      // We don't have the proper modelling to accurately measure all carried
+      // latency. Just try to measure carried latency for long latency loads to
+      // avoid long stalls.
+      if (!Def.mayLoad())
+        continue;
+
+      unsigned Latency = getHWUICyclesForMI(&Def);
+
+      // Load is carried across block
+      if (Def.getParent() != MI->getParent()) {
+        bool FoundUseInDefBlock = false;
+        for (auto &Use : DAG->MRI.use_nodbg_instructions(Reg)) {
+          if (Use.getParent() != Def.getParent())
+            continue;
+
+          SlotIndex DefIdx = DAG->getLIS()->getInstructionIndex(Def);
+          SlotIndex UseIdx = DAG->getLIS()->getInstructionIndex(Use);
+          // We have a use of this load in the def block that occurs after the
+          // load. In this case we must wait for the load in the def block, and
+          // we do not have any carried latency from this load.
+          if (SlotIndex::isEarlierInstr(DefIdx, UseIdx)) {
+            FoundUseInDefBlock = true;
+            break;
+          }
+        }
+        if (!FoundUseInDefBlock)
+          CarriedLatency = std::max(Latency, CarriedLatency);
+
+        continue;
+      }
+
+      assert(Def.getParent() == MI->getParent());
+      // Load is in the same block
+      SlotIndex LoadIdx = DAG->getLIS()->getInstructionIndex(Def);
+      SlotIndex UseIdx = DAG->getLIS()->getInstructionIndex(*MI);
+      // The load occurs after this use -- the latency is carried across loop
+      // backedge.
+      if (SlotIndex::isEarlierInstr(UseIdx, LoadIdx))
+        CarriedLatency = std::max(Latency, CarriedLatency);
+    }
+  }
+  return CarriedLatency;
 }
 
-void CandidateHeuristics::collectHWUIPressure() {
+void CandidateHeuristics::collectRegionSummary() {
   if (!SchedModel || !SchedModel->hasInstrSchedModel())
     return;
 
   for (auto &SU : DAG->SUnits) {
-    const InstructionFlavor Flavor = classifyFlavor(*SU.getInstr(), *SII);
-    HWUInfo[(int)(Flavor)].insert(&SU, getHWUICyclesForInst(&SU));
+    MachineInstr *MI = SU.getInstr();
+    const InstructionFlavor Flavor = classifyFlavor(*MI, *SII);
+    HWUInfo[(int)(Flavor)].insert(&SU, getHWUICyclesForSU(&SU));
+    unsigned CarriedLatency = getCarriedLatency(&SU);
+    if (CarriedLatency)
+      CarriedLatencies[MI] = CarriedLatency;
   }
 
   LLVM_DEBUG(dumpRegionSummary());
@@ -268,6 +349,85 @@ void CandidateHeuristics::sortHWUIResources() {
   });
 }
 
+unsigned CandidateHeuristics::getStructuralStallCycles(SchedBoundary &Zone,
+                                                       SUnit *SU) {
+  // Only implemented for top-down scheduling currently.
+  if (!Zone.isTop() || !SU)
+    return 0;
+
+  MachineInstr *MI = SU->getInstr();
+  unsigned CurrCycle = Zone.getCurrCycle();
+  unsigned Stall = 0;
+
+  // Query SchedModel for resource stalls (unbuffered resources).
+  if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
+    const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
+    for (const MCWriteProcResEntry &PE :
+         make_range(SchedModel->getWriteProcResBegin(SC),
+                    SchedModel->getWriteProcResEnd(SC))) {
+      unsigned NextAvail =
+          Zone.getNextResourceCycle(SC, PE.ProcResourceIdx, PE.ReleaseAtCycle,
+                                    PE.AcquireAtCycle)
+              .first;
+      if (NextAvail > CurrCycle)
+        Stall = std::max(Stall, NextAvail - CurrCycle);
+    }
+  }
+
+  // Query HazardRecognizer for sequence-dependent hazard penalties.
+  if (Zone.HazardRec && Zone.HazardRec->isEnabled()) {
+    auto *HR = static_cast<GCNHazardRecognizer *>(Zone.HazardRec);
+    Stall = std::max(Stall, HR->getHazardWaitStates(MI));
+  }
+
+  return Stall;
+}
+
+
+bool CandidateHeuristics::tryEffectiveStall(
+    GenericSchedulerBase::SchedCandidate &Cand,
+    GenericSchedulerBase::SchedCandidate &TryCand, SchedBoundary &Zone) {
+  // Treat structural and latency stalls as a single scheduling cost for the
+  // current cycle.
+  struct StallCosts {
+    unsigned Ready = 0;
+    unsigned Structural = 0;
+    unsigned Latency = 0;
+    unsigned Effective = 0;
+    unsigned Carried = 0;
+  };
+
+  unsigned CurrCycle = Zone.getCurrCycle();
+  auto GetStallCosts = [&](SUnit *SU) {
+    unsigned ReadyCycle = Zone.isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
+    StallCosts Costs;
+    Costs.Ready = ReadyCycle > CurrCycle ? ReadyCycle - CurrCycle : 0;
+    Costs.Structural = getStructuralStallCycles(Zone, SU);
+    Costs.Latency = Zone.getLatencyStallCycles(SU);
+    unsigned TryCarriedLatency = CarriedLatencies.contains(TryCand.SU->getInstr())
+                                   ? CarriedLatencies[TryCand.SU->getInstr()]
+                                   : 0;
+    Costs.Carried = TryCarriedLatency > CurrCycle ? TryCarriedLatency - CurrCycle : 0;
+
+    Costs.Effective = std::max({Costs.Ready, Costs.Structural, Costs.Latency, Costs.Carried});
+    return Costs;
+  };
+
+  StallCosts TryCosts = GetStallCosts(TryCand.SU);
+  StallCosts CandCosts = GetStallCosts(Cand.SU);
+
+  LLVM_DEBUG(if (TryCosts.Effective || CandCosts.Effective) {
+    dbgs() << "Effective stalls: try=" << TryCosts.Effective
+           << " (ready=" << TryCosts.Ready << ", struct=" << TryCosts.Structural
+           << ", lat=" << TryCosts.Latency << ", carried=" << TryCosts.Carried << ") cand=" << CandCosts.Effective
+           << " (ready=" << CandCosts.Ready
+           << ", struct=" << CandCosts.Structural << ", carried=" << CandCosts.Carried
+           << ", lat=" << CandCosts.Latency << ")\n";
+  });
+
+  return tryLess(TryCosts.Effective, CandCosts.Effective, TryCand, Cand, AMDGPUCoExecSchedStrategy::Stall);
+}
+
 bool CandidateHeuristics::tryCriticalResourceDependency(
     GenericSchedulerBase::SchedCandidate &TryCand,
     GenericSchedulerBase::SchedCandidate &Cand, SchedBoundary *Zone) const {
@@ -593,7 +753,7 @@ bool AMDGPUCoExecSchedStrategy::tryCandidateCoexec(SchedCandidate &Cand,
   if (SameBoundary) {
     // Compare candidates by the stall they would introduce if
     // scheduled in the current cycle.
-    if (tryEffectiveStall(Cand, TryCand, *Zone))
+    if (Heurs.tryEffectiveStall(Cand, TryCand, *Zone))
       return TryCand.Reason != NoCand;
 
     Heurs.sortHWUIResources();
@@ -656,44 +816,6 @@ bool AMDGPUCoExecSchedStrategy::tryCandidateCoexec(SchedCandidate &Cand,
   return false;
 }
 
-bool AMDGPUCoExecSchedStrategy::tryEffectiveStall(SchedCandidate &Cand,
-                                                  SchedCandidate &TryCand,
-                                                  SchedBoundary &Zone) const {
-  // Treat structural and latency stalls as a single scheduling cost for the
-  // current cycle.
-  struct StallCosts {
-    unsigned Ready = 0;
-    unsigned Structural = 0;
-    unsigned Latency = 0;
-    unsigned Effective = 0;
-  };
-
-  unsigned CurrCycle = Zone.getCurrCycle();
-  auto GetStallCosts = [&](SUnit *SU) {
-    unsigned ReadyCycle = Zone.isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
-    StallCosts Costs;
-    Costs.Ready = ReadyCycle > CurrCycle ? ReadyCycle - CurrCycle : 0;
-    Costs.Structural = getStructuralStallCycles(Zone, SU);
-    Costs.Latency = Zone.getLatencyStallCycles(SU);
-    Costs.Effective = std::max({Costs.Ready, Costs.Structural, Costs.Latency});
-    return Costs;
-  };
-
-  StallCosts TryCosts = GetStallCosts(TryCand.SU);
-  StallCosts CandCosts = GetStallCosts(Cand.SU);
-
-  LLVM_DEBUG(if (TryCosts.Effective || CandCosts.Effective) {
-    dbgs() << "Effective stalls: try=" << TryCosts.Effective
-           << " (ready=" << TryCosts.Ready << ", struct=" << TryCosts.Structural
-           << ", lat=" << TryCosts.Latency << ") cand=" << CandCosts.Effective
-           << " (ready=" << CandCosts.Ready
-           << ", struct=" << CandCosts.Structural
-           << ", lat=" << CandCosts.Latency << ")\n";
-  });
-
-  return tryLess(TryCosts.Effective, CandCosts.Effective, TryCand, Cand, Stall);
-}
-
 ScheduleDAGInstrs *
 llvm::createGCNCoExecMachineScheduler(MachineSchedContext *C) {
   LLVM_DEBUG(dbgs() << "AMDGPU coexec preRA scheduler selected for "
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.h b/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.h
index f746fe72580e6..3d9b09a36b259 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.h
@@ -14,6 +14,7 @@
 #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUCOEXECSCHEDSTRATEGY_H
 #define LLVM_LIB_TARGET_AMDGPU_AMDGPUCOEXECSCHEDSTRATEGY_H
 
+#include "GCNHazardRecognizer.h"
 #include "GCNSchedStrategy.h"
 #include "llvm/CodeGen/MachineScheduler.h"
 
@@ -249,13 +250,25 @@ class CandidateHeuristics {
   const SIRegisterInfo *SRI;
   const TargetSchedModel *SchedModel;
   SmallVector<HardwareUnitInfo, 8> HWUInfo;
+  DenseMap<MachineInstr *, unsigned> CarriedLatencies;
 
-  /// Walk over the region and collect total usage per HardwareUnit
-  void collectHWUIPressure();
+  /// Walk over the region and collect characteristics for the various
+  /// heuristics.
+  void collectRegionSummary();
 
   /// Compute the blocking cycles for the appropriate HardwareUnit given an \p
   /// SU
-  unsigned getHWUICyclesForInst(SUnit *SU);
+  unsigned getHWUICyclesForSU(SUnit *SU);
+  /// Compute the blocking cycles for the appropriate HardwareUnit given an \p
+  /// MI
+  unsigned getHWUICyclesForMI(MachineInstr *MI);
+
+  /// Estimate the block carried latency from loads for a given \p SU. This is
+  /// essentially global scheduling info that our local scheduling
+  /// infrastructure lacks the necessary infrastructure to accurately measure.
+  /// Thus, this method just attempts to find a reasonable upper bound for
+  /// carried load latency to avoid long stalls.
+  unsigned getCarriedLatency(SUnit *SU);
 
   /// Given a \p Flavor , find the corresponding HardwareUnit. \returns the
   /// mapped HardwareUnit.
@@ -275,6 +288,12 @@ class CandidateHeuristics {
   /// keeping the critical Hardware unit busy.
   void sortHWUIResources();
 
+  unsigned getStructuralStallCycles(SchedBoundary &Zone, SUnit *SU);
+
+  bool tryEffectiveStall(GenericSchedulerBase::SchedCandidate &TryCand,
+                         GenericSchedulerBase::SchedCandidate &Cand,
+                         SchedBoundary &Zone);
+
   /// Check for critical resource consumption. Prefer the candidate that uses
   /// the most prioritized HardwareUnit. If both candidates use the same
   /// HarwareUnit, prefer the candidate with higher priority on that
@@ -298,8 +317,6 @@ class CandidateHeuristics {
 
 class AMDGPUCoExecSchedStrategy final : public GCNSchedStrategy {
 protected:
-  bool tryEffectiveStall(SchedCandidate &Cand, SchedCandidate &TryCand,
-                         SchedBoundary &Zone) const;
   AMDGPU::AMDGPUSchedReason LastAMDGPUReason = AMDGPU::AMDGPUSchedReason::None;
   CandidateHeuristics Heurs;
 
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index ad24bad1fd5d7..0b63d9cf66c49 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -233,43 +233,6 @@ void GCNSchedStrategy::getRegisterPressures(
   Pressure[AMDGPU::RegisterPressureSets::AGPR_32] = NewPressure.getAGPRNum();
 }
 
-unsigned GCNSchedStrategy::getStructuralStallCycles(SchedBoundary &Zone,
-                                                    SUnit *SU) const {
-  // Only implemented for top-down scheduling currently.
-  if (!Zone.isTop() || !SU)
-    return 0;
-
-  MachineInstr *MI = SU->getInstr();
-  unsigned CurrCycle = Zone.getCurrCycle();
-  unsigned Stall = 0;
-
-  // Query SchedModel for resource stalls (unbuffered resources).
-  if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
-    const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
-    for (const MCWriteProcResEntry &PE :
-         make_range(SchedModel->getWriteProcResBegin(SC),
-                    SchedModel->getWriteProcResEnd(SC))) {
-      unsigned NextAvail =
-          Zone.getNextResourceCycle(SC, PE.ProcResourceIdx, PE.ReleaseAtCycle,
-                                    PE.AcquireAtCycle)
-              .first;
-      if (NextAvail > CurrCycle)
-        Stall = std::max(Stall, NextAvail - CurrCycle);
-    }
-  }
-
-  // Query HazardRecognizer for sequence-dependent hazard penalties.
-  // AMDGPU currently installs GCNHazardRecognizer for MI scheduling only in
-  // the post-RA configuration without vreg liveness.
-  if (!DAG->hasVRegLiveness() && Zone.HazardRec &&
-      Zone.HazardRec->isEnabled()) {
-    auto *HR = static_cast<GCNHazardRecognizer *>(Zone.HazardRec);
-    Stall = std::max(Stall, HR->getHazardWaitStates(MI));
-  }
-
-  return Stall;
-}
-
 void GCNSchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
                                      bool AtTop,
                                      const RegPressureTracker &RPTracker,
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
index ae86388af5545..4430503d441e0 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
@@ -60,10 +60,6 @@ class GCNSchedStrategy : public GenericScheduler {
                      const SIRegisterInfo *SRI, unsigned SGPRPressure,
                      unsigned VGPRPressure, bool IsBottomUp);
 
-  /// Estimate how many cycles \p SU must wait due to structural hazards at the
-  /// current boundary cycle. Returns zero when no stall is required.
-  unsigned getStructuralStallCycles(SchedBoundary &Zone, SUnit *SU) const;
-
   /// Evaluates instructions in the pending queue using a subset of scheduling
   /// heuristics.
   ///
diff --git a/llvm/test/CodeGen/AMDGPU/coexec-scheduler.ll b/llvm/test/CodeGen/AMDGPU/coexec-scheduler.ll
index c1e7bc005998c..8fc2947bbf3c1 100644
--- a/llvm/test/CodeGen/AMDGPU/coexec-scheduler.ll
+++ b/llvm/test/CodeGen/AMDGPU/coexec-scheduler.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -amdgpu-sched-strategy=coexec --enable-post-misched=0 --verify-misched  < %s | FileCheck -check-prefix=COEXEC %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250  < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
 
 
 define amdgpu_kernel void @ds_wmma(ptr addrspace(3) %base, ptr addrspace(1) %out, i1 %br0, i32 %delta) local_unnamed_addr #0 {
@@ -44,7 +44,6 @@ define amdgpu_kernel void @ds_wmma(ptr addrspace(3) %base, ptr addrspace(1) %out
 ; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_mov_b32_e32 v92, s2
-; COEXEC-NEXT:    s_add_co_i32 s2, s2, s1
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[32:35], v92 offset:128
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[40:43], v92
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[36:39], v92 offset:192
@@ -61,6 +60,7 @@ define amdgpu_kernel void @ds_wmma(ptr addrspace(3) %base, ptr addrspace(1) %out
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[88:91], v92 offset:768
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[84:87], v92 offset:960
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[92:95], v92 offset:832
+; COEXEC-NEXT:    s_add_co_i32 s2, s2, s1
 ; COEXEC-NEXT:    s_wait_dscnt 0xc
 ; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[40:47], v[32:39], v[24:31]
 ; COEXEC-NEXT:    s_wait_dscnt 0x8
@@ -75,9 +75,9 @@ define amdgpu_kernel void @ds_wmma(ptr addrspace(3) %base, ptr addrspace(1) %out
 ; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[88:95], v[80:87], v[0:7]
 ; COEXEC-NEXT:    s_cbranch_vccnz .LBB0_1
 ; COEXEC-NEXT:  ; %bb.2: ; %end
+; COEXEC-NEXT:    s_load_b64 s[0:1], s[4:5], 0x8 nv
 ; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_mov_b32_e32 v32, 0
-; COEXEC-NEXT:    s_load_b64 s[0:1], s[4:5], 0x8 nv
 ; COEXEC-NEXT:    s_wait_kmcnt 0x0
 ; COEXEC-NEXT:    s_clause 0x7
 ; COEXEC-NEXT:    global_store_b128 v32, v[28:31], s[0:1] offset:16
@@ -353,8 +353,8 @@ define amdgpu_kernel void @ds_wmma_permute(ptr addrspace(3) %base, ptr addrspace
 ; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[120:127], v[152:159], v[0:7]
 ; COEXEC-NEXT:    s_cbranch_vccnz .LBB1_1
 ; COEXEC-NEXT:  ; %bb.2: ; %end
-; COEXEC-NEXT:    v_mov_b32_e32 v32, 0
 ; COEXEC-NEXT:    s_load_b64 s[0:1], s[4:5], 0x8 nv
+; COEXEC-NEXT:    v_mov_b32_e32 v32, 0
 ; COEXEC-NEXT:    s_wait_kmcnt 0x0
 ; COEXEC-NEXT:    s_clause 0x7
 ; COEXEC-NEXT:    global_store_b128 v32, v[28:31], s[0:1] offset:16
@@ -603,4 +603,880 @@ end:
 }
 
 
+define amdgpu_kernel void @ds_wmma_block_carried(ptr addrspace(3) %base, ptr addrspace(1) %out, i1 %br0, i32 %delta) local_unnamed_addr #0 {
+; COEXEC-LABEL: ds_wmma_block_carried:
+; COEXEC:       ; %bb.0: ; %entry
+; COEXEC-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
+; COEXEC-NEXT:    s_mov_b32 s8, 0
+; COEXEC-NEXT:    s_clause 0x1
+; COEXEC-NEXT:    s_load_b32 s2, s[4:5], 0x0 nv
+; COEXEC-NEXT:    s_load_b64 s[0:1], s[4:5], 0x10 nv
+; COEXEC-NEXT:    s_mov_b32 s9, s8
+; COEXEC-NEXT:    v_mov_b32_e32 v0, 0
+; COEXEC-NEXT:    s_mov_b32 s10, s8
+; COEXEC-NEXT:    s_mov_b32 s11, s8
+; COEXEC-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; COEXEC-NEXT:    v_mov_b32_e32 v1, v0
+; COEXEC-NEXT:    s_mov_b32 s12, s8
+; COEXEC-NEXT:    s_mov_b32 s13, s8
+; COEXEC-NEXT:    v_mov_b32_e32 v2, v0
+; COEXEC-NEXT:    s_mov_b32 s14, s8
+; COEXEC-NEXT:    s_mov_b32 s15, s8
+; COEXEC-NEXT:    s_wait_kmcnt 0x0
+; COEXEC-NEXT:    v_dual_mov_b32 v3, v0 :: v_dual_mov_b32 v6, s2
+; COEXEC-NEXT:    s_bitcmp1_b32 s0, 0
+; COEXEC-NEXT:    v_mov_b32_e32 v4, v0
+; COEXEC-NEXT:    s_cselect_b32 s3, -1, 0
+; COEXEC-NEXT:    ds_load_tr16_b128 v[24:27], v6
+; COEXEC-NEXT:    ds_load_tr16_b128 v[32:35], v6 offset:128
+; COEXEC-NEXT:    ds_load_tr16_b128 v[8:11], v6 offset:256
+; COEXEC-NEXT:    ds_load_tr16_b128 v[16:19], v6 offset:384
+; COEXEC-NEXT:    ds_load_tr16_b128 v[28:31], v6 offset:64
+; COEXEC-NEXT:    ds_load_tr16_b128 v[36:39], v6 offset:192
+; COEXEC-NEXT:    ds_load_tr16_b128 v[12:15], v6 offset:320
+; COEXEC-NEXT:    v_mov_b32_e32 v5, v0
+; COEXEC-NEXT:    ds_load_tr16_b128 v[20:23], v6 offset:448
+; COEXEC-NEXT:    s_add_co_i32 s0, s2, s1
+; COEXEC-NEXT:    s_xor_b32 s2, s3, -1
+; COEXEC-NEXT:    v_dual_mov_b32 v6, v0 :: v_dual_mov_b32 v7, v0
+; COEXEC-NEXT:    v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v48, v0
+; COEXEC-NEXT:    v_dual_mov_b32 v56, v0 :: v_dual_mov_b32 v41, v0
+; COEXEC-NEXT:    v_dual_mov_b32 v49, v0 :: v_dual_mov_b32 v57, v0
+; COEXEC-NEXT:    v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v50, v0
+; COEXEC-NEXT:    v_dual_mov_b32 v58, v0 :: v_dual_mov_b32 v43, v0
+; COEXEC-NEXT:    v_dual_mov_b32 v51, v0 :: v_dual_mov_b32 v59, v0
+; COEXEC-NEXT:    v_dual_mov_b32 v44, v0 :: v_dual_mov_b32 v52, v0
+; COEXEC-NEXT:    v_dual_mov_b32 v60, v0 :: v_dual_mov_b32 v45, v0
+; COEXEC-NEXT:    v_dual_mov_b32 v53, v0 :: v_dual_mov_b32 v61, v0
+; COEXEC-NEXT:    v_dual_mov_b32 v46, v0 :: v_dual_mov_b32 v54, v0
+; COEXEC-NEXT:    v_dual_mov_b32 v62, v0 :: v_dual_mov_b32 v47, v0
+; COEXEC-NEXT:    v_dual_mov_b32 v55, v0 :: v_dual_mov_b32 v63, v0
+; COEXEC-NEXT:    s_wait_dscnt 0x0
+; COEXEC-NEXT:  .LBB2_1: ; %loop
+; COEXEC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; COEXEC-NEXT:    s_wait_dscnt 0x4
+; COEXEC-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(TRANS32_DEP_2)
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[24:31], v[32:39], v[56:63]
+; COEXEC-NEXT:    s_wait_dscnt 0x0
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[8:15], v[16:23], v[48:55]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[24:31], v[32:39], v[56:63]
+; COEXEC-NEXT:    v_nop
+; COEXEC-NEXT:    v_mov_b64_e32 v[70:71], s[14:15]
+; COEXEC-NEXT:    v_mov_b64_e32 v[68:69], s[12:13]
+; COEXEC-NEXT:    v_mov_b64_e32 v[66:67], s[10:11]
+; COEXEC-NEXT:    v_mov_b64_e32 v[64:65], s[8:9]
+; COEXEC-NEXT:    v_mov_b32_e32 v72, s0
+; COEXEC-NEXT:    s_add_co_i32 s0, s0, s1
+; COEXEC-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[40:47], v[64:71], v[64:71], v[40:47]
+; COEXEC-NEXT:    ds_load_tr16_b128 v[24:27], v72
+; COEXEC-NEXT:    ds_load_tr16_b128 v[32:35], v72 offset:128
+; COEXEC-NEXT:    ds_load_tr16_b128 v[28:31], v72 offset:64
+; COEXEC-NEXT:    ds_load_tr16_b128 v[36:39], v72 offset:192
+; COEXEC-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s2
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[64:71], v[64:71], v[0:7]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[8:15], v[16:23], v[48:55]
+; COEXEC-NEXT:    ds_load_tr16_b128 v[8:11], v72 offset:256
+; COEXEC-NEXT:    ds_load_tr16_b128 v[16:19], v72 offset:384
+; COEXEC-NEXT:    ds_load_tr16_b128 v[12:15], v72 offset:320
+; COEXEC-NEXT:    ds_load_tr16_b128 v[20:23], v72 offset:448
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[40:47], v[64:71], v[64:71], v[40:47]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[64:71], v[64:71], v[0:7]
+; COEXEC-NEXT:    s_cbranch_vccnz .LBB2_1
+; COEXEC-NEXT:  ; %bb.2: ; %end
+; COEXEC-NEXT:    s_load_b64 s[0:1], s[4:5], 0x8 nv
+; COEXEC-NEXT:    s_wait_dscnt 0x3
+; COEXEC-NEXT:    v_nop
+; COEXEC-NEXT:    v_nop
+; COEXEC-NEXT:    v_mov_b32_e32 v8, 0
+; COEXEC-NEXT:    s_wait_kmcnt 0x0
+; COEXEC-NEXT:    s_clause 0x7
+; COEXEC-NEXT:    global_store_b128 v8, v[60:63], s[0:1] offset:16
+; COEXEC-NEXT:    global_store_b128 v8, v[56:59], s[0:1]
+; COEXEC-NEXT:    global_store_b128 v8, v[52:55], s[0:1] offset:144
+; COEXEC-NEXT:    global_store_b128 v8, v[48:51], s[0:1] offset:128
+; COEXEC-NEXT:    global_store_b128 v8, v[44:47], s[0:1] offset:272
+; COEXEC-NEXT:    global_store_b128 v8, v[40:43], s[0:1] offset:256
+; COEXEC-NEXT:    global_store_b128 v8, v[4:7], s[0:1] offset:400
+; COEXEC-NEXT:    global_store_b128 v8, v[0:3], s[0:1] offset:384
+; COEXEC-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; COEXEC-NEXT:    s_endpgm
+;
+; GCN-LABEL: ds_wmma_block_carried:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
+; GCN-NEXT:    s_clause 0x1
+; GCN-NEXT:    s_load_b64 s[0:1], s[4:5], 0x10 nv
+; GCN-NEXT:    s_load_b32 s2, s[4:5], 0x0 nv
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_mov_b32 s8, 0
+; GCN-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT:    s_mov_b32 s9, s8
+; GCN-NEXT:    s_mov_b32 s10, s8
+; GCN-NEXT:    s_mov_b32 s11, s8
+; GCN-NEXT:    s_mov_b32 s12, s8
+; GCN-NEXT:    s_mov_b32 s13, s8
+; GCN-NEXT:    s_mov_b32 s14, s8
+; GCN-NEXT:    s_mov_b32 s15, s8
+; GCN-NEXT:    s_wait_kmcnt 0x0
+; GCN-NEXT:    s_bitcmp1_b32 s0, 0
+; GCN-NEXT:    v_dual_mov_b32 v7, s2 :: v_dual_mov_b32 v1, v0
+; GCN-NEXT:    v_dual_mov_b32 v2, v0 :: v_dual_mov_b32 v3, v0
+; GCN-NEXT:    v_mov_b32_e32 v4, v0
+; GCN-NEXT:    ds_load_tr16_b128 v[32:35], v7
+; GCN-NEXT:    ds_load_tr16_b128 v[36:39], v7 offset:64
+; GCN-NEXT:    ds_load_tr16_b128 v[40:43], v7 offset:128
+; GCN-NEXT:    ds_load_tr16_b128 v[44:47], v7 offset:192
+; GCN-NEXT:    ds_load_tr16_b128 v[48:51], v7 offset:256
+; GCN-NEXT:    ds_load_tr16_b128 v[52:55], v7 offset:320
+; GCN-NEXT:    ds_load_tr16_b128 v[56:59], v7 offset:384
+; GCN-NEXT:    ds_load_tr16_b128 v[60:63], v7 offset:448
+; GCN-NEXT:    v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v6, v0
+; GCN-NEXT:    v_dual_mov_b32 v7, v0 :: v_dual_mov_b32 v8, v0
+; GCN-NEXT:    v_dual_mov_b32 v9, v0 :: v_dual_mov_b32 v10, v0
+; GCN-NEXT:    v_dual_mov_b32 v11, v0 :: v_dual_mov_b32 v12, v0
+; GCN-NEXT:    v_dual_mov_b32 v13, v0 :: v_dual_mov_b32 v14, v0
+; GCN-NEXT:    v_dual_mov_b32 v15, v0 :: v_dual_mov_b32 v16, v0
+; GCN-NEXT:    v_dual_mov_b32 v17, v0 :: v_dual_mov_b32 v18, v0
+; GCN-NEXT:    v_dual_mov_b32 v19, v0 :: v_dual_mov_b32 v20, v0
+; GCN-NEXT:    v_dual_mov_b32 v21, v0 :: v_dual_mov_b32 v22, v0
+; GCN-NEXT:    v_dual_mov_b32 v23, v0 :: v_dual_mov_b32 v24, v0
+; GCN-NEXT:    v_dual_mov_b32 v25, v0 :: v_dual_mov_b32 v26, v0
+; GCN-NEXT:    v_dual_mov_b32 v27, v0 :: v_dual_mov_b32 v28, v0
+; GCN-NEXT:    v_dual_mov_b32 v29, v0 :: v_dual_mov_b32 v30, v0
+; GCN-NEXT:    v_mov_b32_e32 v31, v0
+; GCN-NEXT:    s_cselect_b32 s3, -1, 0
+; GCN-NEXT:    s_add_co_i32 s0, s2, s1
+; GCN-NEXT:    s_xor_b32 s2, s3, -1
+; GCN-NEXT:    s_wait_dscnt 0x0
+; GCN-NEXT:  .LBB2_1: ; %loop
+; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT:    s_wait_dscnt 0x0
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[48:55], v[56:63], v[16:23]
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_mov_b64_e32 v[70:71], s[14:15]
+; GCN-NEXT:    v_mov_b64_e32 v[68:69], s[12:13]
+; GCN-NEXT:    v_mov_b64_e32 v[66:67], s[10:11]
+; GCN-NEXT:    v_mov_b64_e32 v[64:65], s[8:9]
+; GCN-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s2
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[32:39], v[40:47], v[24:31]
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[48:55], v[56:63], v[16:23]
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_mov_b32_e32 v60, s0
+; GCN-NEXT:    s_add_co_i32 s0, s0, s1
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[32:39], v[40:47], v[24:31]
+; GCN-NEXT:    ds_load_tr16_b128 v[32:35], v60
+; GCN-NEXT:    ds_load_tr16_b128 v[36:39], v60 offset:64
+; GCN-NEXT:    ds_load_tr16_b128 v[40:43], v60 offset:128
+; GCN-NEXT:    ds_load_tr16_b128 v[44:47], v60 offset:192
+; GCN-NEXT:    ds_load_tr16_b128 v[48:51], v60 offset:256
+; GCN-NEXT:    ds_load_tr16_b128 v[52:55], v60 offset:320
+; GCN-NEXT:    ds_load_tr16_b128 v[56:59], v60 offset:384
+; GCN-NEXT:    ds_load_tr16_b128 v[60:63], v60 offset:448
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[64:71], v[64:71], v[8:15]
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[64:71], v[64:71], v[0:7]
+; GCN-NEXT:    s_delay_alu instid0(TRANS32_DEP_2) | instskip(NEXT) | instid1(TRANS32_DEP_2)
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[64:71], v[64:71], v[8:15]
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[64:71], v[64:71], v[0:7]
+; GCN-NEXT:    s_cbranch_vccnz .LBB2_1
+; GCN-NEXT:  ; %bb.2: ; %end
+; GCN-NEXT:    s_load_b64 s[0:1], s[4:5], 0x8 nv
+; GCN-NEXT:    s_wait_dscnt 0x7
+; GCN-NEXT:    v_mov_b32_e32 v32, 0
+; GCN-NEXT:    s_wait_kmcnt 0x0
+; GCN-NEXT:    s_clause 0x7
+; GCN-NEXT:    global_store_b128 v32, v[28:31], s[0:1] offset:16
+; GCN-NEXT:    global_store_b128 v32, v[24:27], s[0:1]
+; GCN-NEXT:    global_store_b128 v32, v[20:23], s[0:1] offset:144
+; GCN-NEXT:    global_store_b128 v32, v[16:19], s[0:1] offset:128
+; GCN-NEXT:    global_store_b128 v32, v[12:15], s[0:1] offset:272
+; GCN-NEXT:    global_store_b128 v32, v[8:11], s[0:1] offset:256
+; GCN-NEXT:    global_store_b128 v32, v[4:7], s[0:1] offset:400
+; GCN-NEXT:    global_store_b128 v32, v[0:3], s[0:1] offset:384
+; GCN-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GCN-NEXT:    s_endpgm
+entry:
+
+  %p0 = getelementptr inbounds nuw i8, ptr addrspace(3) %base, i32 0
+  %p1 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 64
+  %p2 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 128
+  %p3 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 192
+  %p4 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 256
+  %p5 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 320
+  %p6 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 384
+  %p7 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 448
+  %p8 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 512
+  %p9 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 576
+  %p10 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 640
+  %p11 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 704
+  %p12 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 768
+  %p13 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 832
+  %p14 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 896
+  %p15 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 960
+  %l0 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) %p0)
+  %l1 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p1)
+  %l2 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p2)
+  %l3 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p3)
+  %l4 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p4)
+  %l5 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p5)
+  %l6 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p6)
+  %l7 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p7)
+  %l8 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p8)
+  %l9 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p9)
+  %l10 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p10)
+  %l11 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p11)
+  %l12 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p12)
+  %l13 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p13)
+  %l14 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p14)
+  %l15 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p15)
+  %vec0 = shufflevector <8 x half> %l0, <8 x half> %l1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec1 = shufflevector <8 x half> %l2, <8 x half> %l3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec2 = shufflevector <8 x half> %l4, <8 x half> %l5, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec3 = shufflevector <8 x half> %l6, <8 x half> %l7, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec4 = shufflevector <8 x half> %l8, <8 x half> %l9, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec5 = shufflevector <8 x half> %l10, <8 x half> %l11, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec6 = shufflevector <8 x half> %l12, <8 x half> %l13, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec7 = shufflevector <8 x half> %l14, <8 x half> %l15, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+
+  br label %loop
+
+loop:
+  %baseOff = phi i32 [ 0, %entry ], [ %newBaseOff, %loop ]
+  %wvec0 = phi <8 x float> [ <float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0>, %entry ], [ %wmma01,  %loop ]
+  %wvec1 = phi <8 x float> [ <float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0>, %entry ], [ %wmma11,  %loop ]
+  %wvec2 = phi <8 x float> [ <float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0>, %entry ], [ %wmma21,  %loop ]
+  %wvec3 = phi <8 x float> [ <float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0>, %entry ], [ %wmma31,  %loop ]
+  %invec0 = phi <16 x half> [ %vec0, %entry ], [ %vec0l,  %loop ]
+  %invec1 = phi <16 x half> [ %vec1, %entry ], [ %vec1l,  %loop ]
+  %invec2 = phi <16 x half> [ %vec2, %entry ], [ %vec2l,  %loop ]
+  %invec3 = phi <16 x half> [ %vec3, %entry ], [ %vec3l,  %loop ]
+  %wmma00 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %invec0, i1 false, <16 x half> %invec1, i16 0, <8 x float> %wvec0, i1 false, i1 false)
+  %wmma01 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %invec0, i1 false, <16 x half> %invec1, i16 0, <8 x float> %wmma00, i1 false, i1 false)
+  %wmma10 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %invec2, i1 false, <16 x half> %invec3, i16 0, <8 x float> %wvec1, i1 false, i1 false)
+  %wmma11 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %invec2, i1 false, <16 x half> %invec3, i16 0, <8 x float> %wmma10, i1 false, i1 false)
+  %wmma20 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> zeroinitializer, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %wvec2, i1 false, i1 false)
+  %wmma21 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> zeroinitializer, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %wmma20, i1 false, i1 false)
+  %wmma30 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> zeroinitializer, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %wvec3, i1 false, i1 false)
+  %wmma31 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> zeroinitializer, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %wmma30, i1 false, i1 false)
+  %newBaseOff = or disjoint i32 %baseOff, %delta
+  %p0l = getelementptr inbounds nuw i8, ptr addrspace(3) %base, i32 %newBaseOff
+  %p1l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 64
+  %p2l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 128
+  %p3l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 192
+  %p4l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 256
+  %p5l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 320
+  %p6l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 384
+  %p7l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 448
+  %l0l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) %p0l)
+  %l1l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p1l)
+  %l2l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p2l)
+  %l3l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p3l)
+  %l4l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p4l)
+  %l5l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p5l)
+  %l6l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p6l)
+  %l7l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p7l)
+  %vec0l = shufflevector <8 x half> %l0l, <8 x half> %l1l, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec1l = shufflevector <8 x half> %l2l, <8 x half> %l3l, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec2l = shufflevector <8 x half> %l4l, <8 x half> %l5l, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec3l = shufflevector <8 x half> %l6l, <8 x half> %l7l, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+
+
+  br i1 %br0, label %loop, label %end
+
+end:
+  %out1 = getelementptr inbounds nuw i8, ptr addrspace(1) %out, i32 128
+  %out2 = getelementptr inbounds nuw i8, ptr addrspace(1) %out, i32 256
+  %out3 = getelementptr inbounds nuw i8, ptr addrspace(1) %out, i32 384
+  store <8 x float> %wmma01, ptr addrspace(1) %out, align 16
+  store <8 x float> %wmma11, ptr addrspace(1) %out1, align 16
+  store <8 x float> %wmma21, ptr addrspace(1) %out2, align 16
+  store <8 x float> %wmma31, ptr addrspace(1) %out3, align 16
+  ret void
+}
+
+define amdgpu_kernel void @ds_wmma_loop_carried(ptr addrspace(3) %base, ptr addrspace(1) %out, i1 %br0, i32 %delta) local_unnamed_addr #0 {
+; COEXEC-LABEL: ds_wmma_loop_carried:
+; COEXEC:       ; %bb.0: ; %entry
+; COEXEC-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
+; COEXEC-NEXT:    s_mov_b32 s8, 0
+; COEXEC-NEXT:    s_clause 0x1
+; COEXEC-NEXT:    s_load_b32 s2, s[4:5], 0x0 nv
+; COEXEC-NEXT:    s_load_b64 s[0:1], s[4:5], 0x10 nv
+; COEXEC-NEXT:    s_mov_b32 s9, s8
+; COEXEC-NEXT:    s_mov_b32 s10, s8
+; COEXEC-NEXT:    s_mov_b32 s11, s8
+; COEXEC-NEXT:    s_mov_b32 s12, s8
+; COEXEC-NEXT:    s_mov_b32 s13, s8
+; COEXEC-NEXT:    s_mov_b32 s14, s8
+; COEXEC-NEXT:    s_mov_b32 s15, s8
+; COEXEC-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; COEXEC-NEXT:    v_mov_b64_e32 v[70:71], s[14:15]
+; COEXEC-NEXT:    v_mov_b64_e32 v[68:69], s[12:13]
+; COEXEC-NEXT:    v_mov_b64_e32 v[66:67], s[10:11]
+; COEXEC-NEXT:    v_mov_b64_e32 v[64:65], s[8:9]
+; COEXEC-NEXT:    s_wait_kmcnt 0x0
+; COEXEC-NEXT:    v_mov_b32_e32 v40, s2
+; COEXEC-NEXT:    s_bitcmp1_b32 s0, 0
+; COEXEC-NEXT:    s_cselect_b32 s3, -1, 0
+; COEXEC-NEXT:    s_add_co_i32 s0, s2, s1
+; COEXEC-NEXT:    ds_load_tr16_b128 v[8:11], v40
+; COEXEC-NEXT:    ds_load_tr16_b128 v[12:15], v40 offset:64
+; COEXEC-NEXT:    ds_load_tr16_b128 v[0:3], v40 offset:256
+; COEXEC-NEXT:    ds_load_tr16_b128 v[4:7], v40 offset:320
+; COEXEC-NEXT:    ds_load_tr16_b128 v[32:35], v40 offset:512
+; COEXEC-NEXT:    ds_load_tr16_b128 v[36:39], v40 offset:576
+; COEXEC-NEXT:    ds_load_tr16_b128 v[72:75], v40 offset:768
+; COEXEC-NEXT:    ds_load_tr16_b128 v[76:79], v40 offset:832
+; COEXEC-NEXT:    ds_load_tr16_b128 v[24:27], v40 offset:128
+; COEXEC-NEXT:    ds_load_tr16_b128 v[28:31], v40 offset:192
+; COEXEC-NEXT:    ds_load_tr16_b128 v[16:19], v40 offset:384
+; COEXEC-NEXT:    ds_load_tr16_b128 v[80:83], v40 offset:640
+; COEXEC-NEXT:    ds_load_tr16_b128 v[88:91], v40 offset:896
+; COEXEC-NEXT:    ds_load_tr16_b128 v[20:23], v40 offset:448
+; COEXEC-NEXT:    ds_load_tr16_b128 v[84:87], v40 offset:704
+; COEXEC-NEXT:    ds_load_tr16_b128 v[92:95], v40 offset:960
+; COEXEC-NEXT:    s_xor_b32 s2, s3, -1
+; COEXEC-NEXT:    s_wait_dscnt 0xe
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[8:15], v[64:71], 0
+; COEXEC-NEXT:    s_wait_dscnt 0xc
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[0:7], v[64:71], 0
+; COEXEC-NEXT:    s_wait_dscnt 0xa
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[40:47], v[32:39], v[64:71], 0
+; COEXEC-NEXT:    s_wait_dscnt 0x8
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[72:79], v[64:71], 0
+; COEXEC-NEXT:    s_wait_dscnt 0x6
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[24:31], v[64:71], v[56:63]
+; COEXEC-NEXT:    s_wait_dscnt 0x2
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[16:23], v[64:71], v[48:55]
+; COEXEC-NEXT:    s_wait_dscnt 0x1
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[40:47], v[80:87], v[64:71], v[40:47]
+; COEXEC-NEXT:    s_wait_dscnt 0x0
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[88:95], v[64:71], v[32:39]
+; COEXEC-NEXT:  .LBB3_1: ; %loop
+; COEXEC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; COEXEC-NEXT:    s_wait_dscnt 0x4
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[8:15], v[24:31], v[56:63]
+; COEXEC-NEXT:    s_wait_dscnt 0x0
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[0:7], v[16:23], v[48:55]
+; COEXEC-NEXT:    s_delay_alu instid0(TRANS32_DEP_2)
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[8:15], v[24:31], v[56:63]
+; COEXEC-NEXT:    v_nop
+; COEXEC-NEXT:    v_mov_b64_e32 v[70:71], s[14:15]
+; COEXEC-NEXT:    v_mov_b64_e32 v[68:69], s[12:13]
+; COEXEC-NEXT:    v_mov_b64_e32 v[66:67], s[10:11]
+; COEXEC-NEXT:    v_mov_b64_e32 v[64:65], s[8:9]
+; COEXEC-NEXT:    v_mov_b32_e32 v72, s0
+; COEXEC-NEXT:    s_add_co_i32 s0, s0, s1
+; COEXEC-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[40:47], v[64:71], v[64:71], v[40:47]
+; COEXEC-NEXT:    ds_load_tr16_b128 v[8:11], v72
+; COEXEC-NEXT:    ds_load_tr16_b128 v[24:27], v72 offset:128
+; COEXEC-NEXT:    ds_load_tr16_b128 v[12:15], v72 offset:64
+; COEXEC-NEXT:    ds_load_tr16_b128 v[28:31], v72 offset:192
+; COEXEC-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s2
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[64:71], v[64:71], v[32:39]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[0:7], v[16:23], v[48:55]
+; COEXEC-NEXT:    ds_load_tr16_b128 v[0:3], v72 offset:256
+; COEXEC-NEXT:    ds_load_tr16_b128 v[16:19], v72 offset:384
+; COEXEC-NEXT:    ds_load_tr16_b128 v[4:7], v72 offset:320
+; COEXEC-NEXT:    ds_load_tr16_b128 v[20:23], v72 offset:448
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[40:47], v[64:71], v[64:71], v[40:47]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[64:71], v[64:71], v[32:39]
+; COEXEC-NEXT:    s_cbranch_vccnz .LBB3_1
+; COEXEC-NEXT:  ; %bb.2: ; %end
+; COEXEC-NEXT:    s_load_b64 s[0:1], s[4:5], 0x8 nv
+; COEXEC-NEXT:    s_wait_dscnt 0x3
+; COEXEC-NEXT:    v_nop
+; COEXEC-NEXT:    v_nop
+; COEXEC-NEXT:    v_mov_b32_e32 v0, 0
+; COEXEC-NEXT:    s_wait_kmcnt 0x0
+; COEXEC-NEXT:    s_clause 0x7
+; COEXEC-NEXT:    global_store_b128 v0, v[60:63], s[0:1] offset:16
+; COEXEC-NEXT:    global_store_b128 v0, v[56:59], s[0:1]
+; COEXEC-NEXT:    global_store_b128 v0, v[52:55], s[0:1] offset:144
+; COEXEC-NEXT:    global_store_b128 v0, v[48:51], s[0:1] offset:128
+; COEXEC-NEXT:    global_store_b128 v0, v[44:47], s[0:1] offset:272
+; COEXEC-NEXT:    global_store_b128 v0, v[40:43], s[0:1] offset:256
+; COEXEC-NEXT:    global_store_b128 v0, v[36:39], s[0:1] offset:400
+; COEXEC-NEXT:    global_store_b128 v0, v[32:35], s[0:1] offset:384
+; COEXEC-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; COEXEC-NEXT:    s_endpgm
+;
+; GCN-LABEL: ds_wmma_loop_carried:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
+; GCN-NEXT:    s_clause 0x1
+; GCN-NEXT:    s_load_b32 s2, s[4:5], 0x0 nv
+; GCN-NEXT:    s_load_b64 s[0:1], s[4:5], 0x10 nv
+; GCN-NEXT:    s_mov_b32 s8, 0
+; GCN-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT:    s_mov_b32 s14, s8
+; GCN-NEXT:    s_mov_b32 s15, s8
+; GCN-NEXT:    s_mov_b32 s9, s8
+; GCN-NEXT:    s_mov_b32 s10, s8
+; GCN-NEXT:    s_mov_b32 s11, s8
+; GCN-NEXT:    s_mov_b32 s12, s8
+; GCN-NEXT:    s_mov_b32 s13, s8
+; GCN-NEXT:    v_mov_b64_e32 v[70:71], s[14:15]
+; GCN-NEXT:    v_mov_b64_e32 v[68:69], s[12:13]
+; GCN-NEXT:    v_mov_b64_e32 v[66:67], s[10:11]
+; GCN-NEXT:    v_mov_b64_e32 v[64:65], s[8:9]
+; GCN-NEXT:    s_wait_kmcnt 0x0
+; GCN-NEXT:    v_mov_b32_e32 v80, s2
+; GCN-NEXT:    s_bitcmp1_b32 s0, 0
+; GCN-NEXT:    ds_load_tr16_b128 v[8:11], v80 offset:512
+; GCN-NEXT:    ds_load_tr16_b128 v[12:15], v80 offset:576
+; GCN-NEXT:    ds_load_tr16_b128 v[48:51], v80 offset:768
+; GCN-NEXT:    ds_load_tr16_b128 v[52:55], v80 offset:832
+; GCN-NEXT:    ds_load_tr16_b128 v[72:75], v80 offset:640
+; GCN-NEXT:    ds_load_tr16_b128 v[76:79], v80 offset:704
+; GCN-NEXT:    ds_load_tr16_b128 v[0:3], v80
+; GCN-NEXT:    ds_load_tr16_b128 v[4:7], v80 offset:64
+; GCN-NEXT:    ds_load_tr16_b128 v[40:43], v80 offset:256
+; GCN-NEXT:    ds_load_tr16_b128 v[44:47], v80 offset:320
+; GCN-NEXT:    ds_load_tr16_b128 v[56:59], v80 offset:384
+; GCN-NEXT:    ds_load_tr16_b128 v[60:63], v80 offset:448
+; GCN-NEXT:    s_cselect_b32 s3, -1, 0
+; GCN-NEXT:    s_add_co_i32 s0, s2, s1
+; GCN-NEXT:    s_xor_b32 s2, s3, -1
+; GCN-NEXT:    s_wait_dscnt 0xa
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[8:15], v[64:71], 0
+; GCN-NEXT:    s_wait_dscnt 0x8
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[48:55], v[64:71], 0
+; GCN-NEXT:    ds_load_tr16_b128 v[48:51], v80 offset:128
+; GCN-NEXT:    ds_load_tr16_b128 v[52:55], v80 offset:192
+; GCN-NEXT:    s_wait_dscnt 0x8
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[72:79], v[64:71], v[16:23]
+; GCN-NEXT:    ds_load_tr16_b128 v[72:75], v80 offset:896
+; GCN-NEXT:    ds_load_tr16_b128 v[76:79], v80 offset:960
+; GCN-NEXT:    s_wait_dscnt 0x8
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[0:7], v[64:71], 0
+; GCN-NEXT:    s_wait_dscnt 0x6
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[40:47], v[64:71], 0
+; GCN-NEXT:    s_wait_dscnt 0x2
+; GCN-NEXT:    s_delay_alu instid0(TRANS32_DEP_2) | instskip(NEXT) | instid1(TRANS32_DEP_2)
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[48:55], v[64:71], v[32:39]
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[56:63], v[64:71], v[24:31]
+; GCN-NEXT:    s_wait_dscnt 0x0
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[72:79], v[64:71], v[8:15]
+; GCN-NEXT:  .LBB3_1: ; %loop
+; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT:    s_wait_dscnt 0x0
+; GCN-NEXT:    s_delay_alu instid0(TRANS32_DEP_2)
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[40:47], v[56:63], v[24:31]
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_mov_b64_e32 v[70:71], s[14:15]
+; GCN-NEXT:    v_mov_b64_e32 v[68:69], s[12:13]
+; GCN-NEXT:    v_mov_b64_e32 v[66:67], s[10:11]
+; GCN-NEXT:    v_mov_b64_e32 v[64:65], s[8:9]
+; GCN-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s2
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[0:7], v[48:55], v[32:39]
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[40:47], v[56:63], v[24:31]
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_mov_b32_e32 v60, s0
+; GCN-NEXT:    s_add_co_i32 s0, s0, s1
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[0:7], v[48:55], v[32:39]
+; GCN-NEXT:    ds_load_tr16_b128 v[0:3], v60
+; GCN-NEXT:    ds_load_tr16_b128 v[4:7], v60 offset:64
+; GCN-NEXT:    ds_load_tr16_b128 v[48:51], v60 offset:128
+; GCN-NEXT:    ds_load_tr16_b128 v[52:55], v60 offset:192
+; GCN-NEXT:    ds_load_tr16_b128 v[40:43], v60 offset:256
+; GCN-NEXT:    ds_load_tr16_b128 v[44:47], v60 offset:320
+; GCN-NEXT:    ds_load_tr16_b128 v[56:59], v60 offset:384
+; GCN-NEXT:    ds_load_tr16_b128 v[60:63], v60 offset:448
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[64:71], v[64:71], v[16:23]
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[64:71], v[64:71], v[8:15]
+; GCN-NEXT:    s_delay_alu instid0(TRANS32_DEP_2) | instskip(NEXT) | instid1(TRANS32_DEP_2)
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[64:71], v[64:71], v[16:23]
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[64:71], v[64:71], v[8:15]
+; GCN-NEXT:    s_cbranch_vccnz .LBB3_1
+; GCN-NEXT:  ; %bb.2: ; %end
+; GCN-NEXT:    s_load_b64 s[0:1], s[4:5], 0x8 nv
+; GCN-NEXT:    s_wait_dscnt 0x7
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_wait_kmcnt 0x0
+; GCN-NEXT:    s_clause 0x7
+; GCN-NEXT:    global_store_b128 v0, v[36:39], s[0:1] offset:16
+; GCN-NEXT:    global_store_b128 v0, v[32:35], s[0:1]
+; GCN-NEXT:    global_store_b128 v0, v[28:31], s[0:1] offset:144
+; GCN-NEXT:    global_store_b128 v0, v[24:27], s[0:1] offset:128
+; GCN-NEXT:    global_store_b128 v0, v[20:23], s[0:1] offset:272
+; GCN-NEXT:    global_store_b128 v0, v[16:19], s[0:1] offset:256
+; GCN-NEXT:    global_store_b128 v0, v[12:15], s[0:1] offset:400
+; GCN-NEXT:    global_store_b128 v0, v[8:11], s[0:1] offset:384
+; GCN-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GCN-NEXT:    s_endpgm
+entry:
+
+  %p0 = getelementptr inbounds nuw i8, ptr addrspace(3) %base, i32 0
+  %p1 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 64
+  %p2 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 128
+  %p3 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 192
+  %p4 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 256
+  %p5 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 320
+  %p6 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 384
+  %p7 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 448
+  %p8 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 512
+  %p9 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 576
+  %p10 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 640
+  %p11 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 704
+  %p12 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 768
+  %p13 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 832
+  %p14 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 896
+  %p15 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 960
+  %l0 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) %p0)
+  %l1 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p1)
+  %l2 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p2)
+  %l3 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p3)
+  %l4 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p4)
+  %l5 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p5)
+  %l6 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p6)
+  %l7 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p7)
+  %l8 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p8)
+  %l9 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p9)
+  %l10 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p10)
+  %l11 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p11)
+  %l12 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p12)
+  %l13 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p13)
+  %l14 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p14)
+  %l15 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p15)
+  %vec0 = shufflevector <8 x half> %l0, <8 x half> %l1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec1 = shufflevector <8 x half> %l2, <8 x half> %l3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec2 = shufflevector <8 x half> %l4, <8 x half> %l5, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec3 = shufflevector <8 x half> %l6, <8 x half> %l7, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec4 = shufflevector <8 x half> %l8, <8 x half> %l9, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec5 = shufflevector <8 x half> %l10, <8 x half> %l11, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec6 = shufflevector <8 x half> %l12, <8 x half> %l13, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec7 = shufflevector <8 x half> %l14, <8 x half> %l15, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %incwmma00 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec0, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> zeroinitializer, i1 false, i1 false)
+  %incwmma01 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec1, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %incwmma00, i1 false, i1 false)
+  %incwmma10 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec2, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> zeroinitializer, i1 false, i1 false)
+  %incwmma11 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec3, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %incwmma10, i1 false, i1 false)
+  %incwmma20 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec4, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> zeroinitializer, i1 false, i1 false)
+  %incwmma21 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec5, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %incwmma20, i1 false, i1 false)
+  %incwmma30 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec6, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> zeroinitializer, i1 false, i1 false)
+  %incwmma31 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec7, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %incwmma30, i1 false, i1 false)
+
+
+
+  br label %loop
+
+loop:
+  %baseOff = phi i32 [ 0, %entry ], [ %newBaseOff, %loop ]
+  %wvec0 = phi <8 x float> [ %incwmma01, %entry ], [ %wmma01,  %loop ]
+  %wvec1 = phi <8 x float> [ %incwmma11, %entry ], [ %wmma11,  %loop ]
+  %wvec2 = phi <8 x float> [ %incwmma21, %entry ], [ %wmma21,  %loop ]
+  %wvec3 = phi <8 x float> [ %incwmma31, %entry ], [ %wmma31,  %loop ]
+  %invec0 = phi <16 x half> [ %vec0, %entry ], [ %vec0l,  %loop ]
+  %invec1 = phi <16 x half> [ %vec1, %entry ], [ %vec1l,  %loop ]
+  %invec2 = phi <16 x half> [ %vec2, %entry ], [ %vec2l,  %loop ]
+  %invec3 = phi <16 x half> [ %vec3, %entry ], [ %vec3l,  %loop ]
+  %wmma00 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %invec0, i1 false, <16 x half> %invec1, i16 0, <8 x float> %wvec0, i1 false, i1 false)
+  %wmma01 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %invec0, i1 false, <16 x half> %invec1, i16 0, <8 x float> %wmma00, i1 false, i1 false)
+  %wmma10 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %invec2, i1 false, <16 x half> %invec3, i16 0, <8 x float> %wvec1, i1 false, i1 false)
+  %wmma11 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %invec2, i1 false, <16 x half> %invec3, i16 0, <8 x float> %wmma10, i1 false, i1 false)
+  %wmma20 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> zeroinitializer, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %wvec2, i1 false, i1 false)
+  %wmma21 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> zeroinitializer, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %wmma20, i1 false, i1 false)
+  %wmma30 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> zeroinitializer, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %wvec3, i1 false, i1 false)
+  %wmma31 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> zeroinitializer, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %wmma30, i1 false, i1 false)
+  %newBaseOff = or disjoint i32 %baseOff, %delta
+  %p0l = getelementptr inbounds nuw i8, ptr addrspace(3) %base, i32 %newBaseOff
+  %p1l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 64
+  %p2l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 128
+  %p3l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 192
+  %p4l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 256
+  %p5l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 320
+  %p6l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 384
+  %p7l = getelementptr inbounds nuw i8, ptr addrspace(3) %p0l, i32 448
+  %l0l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) %p0l)
+  %l1l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p1l)
+  %l2l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p2l)
+  %l3l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p3l)
+  %l4l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p4l)
+  %l5l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p5l)
+  %l6l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p6l)
+  %l7l = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p7l)
+  %vec0l = shufflevector <8 x half> %l0l, <8 x half> %l1l, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec1l = shufflevector <8 x half> %l2l, <8 x half> %l3l, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec2l = shufflevector <8 x half> %l4l, <8 x half> %l5l, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec3l = shufflevector <8 x half> %l6l, <8 x half> %l7l, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+
+
+  br i1 %br0, label %loop, label %end
+
+end:
+  %out1 = getelementptr inbounds nuw i8, ptr addrspace(1) %out, i32 128
+  %out2 = getelementptr inbounds nuw i8, ptr addrspace(1) %out, i32 256
+  %out3 = getelementptr inbounds nuw i8, ptr addrspace(1) %out, i32 384
+  store <8 x float> %wmma01, ptr addrspace(1) %out, align 16
+  store <8 x float> %wmma11, ptr addrspace(1) %out1, align 16
+  store <8 x float> %wmma21, ptr addrspace(1) %out2, align 16
+  store <8 x float> %wmma31, ptr addrspace(1) %out3, align 16
+  ret void
+}
+
+define amdgpu_kernel void @ds_wmma_no_block_carried(ptr addrspace(3) %base, ptr addrspace(1) %out, i1 %br0, i32 %delta) local_unnamed_addr #0 {
+; COEXEC-LABEL: ds_wmma_no_block_carried:
+; COEXEC:       ; %bb.0: ; %entry
+; COEXEC-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
+; COEXEC-NEXT:    s_mov_b32 s8, 0
+; COEXEC-NEXT:    s_clause 0x1
+; COEXEC-NEXT:    s_load_b32 s0, s[4:5], 0x0 nv
+; COEXEC-NEXT:    s_load_b32 s1, s[4:5], 0x10 nv
+; COEXEC-NEXT:    s_mov_b32 s9, s8
+; COEXEC-NEXT:    s_mov_b32 s10, s8
+; COEXEC-NEXT:    s_mov_b32 s11, s8
+; COEXEC-NEXT:    s_mov_b32 s12, s8
+; COEXEC-NEXT:    s_mov_b32 s13, s8
+; COEXEC-NEXT:    s_mov_b32 s14, s8
+; COEXEC-NEXT:    s_mov_b32 s15, s8
+; COEXEC-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; COEXEC-NEXT:    v_mov_b64_e32 v[102:103], s[14:15]
+; COEXEC-NEXT:    v_mov_b64_e32 v[100:101], s[12:13]
+; COEXEC-NEXT:    v_mov_b64_e32 v[98:99], s[10:11]
+; COEXEC-NEXT:    v_mov_b64_e32 v[96:97], s[8:9]
+; COEXEC-NEXT:    s_wait_kmcnt 0x0
+; COEXEC-NEXT:    v_mov_b32_e32 v60, s0
+; COEXEC-NEXT:    s_bitcmp1_b32 s1, 0
+; COEXEC-NEXT:    s_cselect_b32 s0, -1, 0
+; COEXEC-NEXT:    ds_load_tr16_b128 v[0:3], v60
+; COEXEC-NEXT:    ds_load_tr16_b128 v[4:7], v60 offset:64
+; COEXEC-NEXT:    ds_load_tr16_b128 v[8:11], v60 offset:256
+; COEXEC-NEXT:    ds_load_tr16_b128 v[12:15], v60 offset:320
+; COEXEC-NEXT:    ds_load_tr16_b128 v[16:19], v60 offset:512
+; COEXEC-NEXT:    ds_load_tr16_b128 v[20:23], v60 offset:576
+; COEXEC-NEXT:    ds_load_tr16_b128 v[24:27], v60 offset:768
+; COEXEC-NEXT:    ds_load_tr16_b128 v[28:31], v60 offset:832
+; COEXEC-NEXT:    ds_load_tr16_b128 v[32:35], v60 offset:128
+; COEXEC-NEXT:    ds_load_tr16_b128 v[36:39], v60 offset:192
+; COEXEC-NEXT:    ds_load_tr16_b128 v[40:43], v60 offset:384
+; COEXEC-NEXT:    ds_load_tr16_b128 v[48:51], v60 offset:640
+; COEXEC-NEXT:    ds_load_tr16_b128 v[56:59], v60 offset:896
+; COEXEC-NEXT:    ds_load_tr16_b128 v[44:47], v60 offset:448
+; COEXEC-NEXT:    ds_load_tr16_b128 v[52:55], v60 offset:704
+; COEXEC-NEXT:    ds_load_tr16_b128 v[60:63], v60 offset:960
+; COEXEC-NEXT:    s_xor_b32 s0, s0, -1
+; COEXEC-NEXT:    s_wait_dscnt 0xe
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[88:95], v[0:7], v[96:103], 0
+; COEXEC-NEXT:    s_wait_dscnt 0xc
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[80:87], v[8:15], v[96:103], 0
+; COEXEC-NEXT:    s_wait_dscnt 0xa
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[72:79], v[16:23], v[96:103], 0
+; COEXEC-NEXT:    s_wait_dscnt 0x8
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[64:71], v[24:31], v[96:103], 0
+; COEXEC-NEXT:    s_wait_dscnt 0x6
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[88:95], v[32:39], v[96:103], v[88:95]
+; COEXEC-NEXT:    s_wait_dscnt 0x2
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[80:87], v[40:47], v[96:103], v[80:87]
+; COEXEC-NEXT:    s_wait_dscnt 0x1
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[72:79], v[48:55], v[96:103], v[72:79]
+; COEXEC-NEXT:    s_wait_dscnt 0x0
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[64:71], v[56:63], v[96:103], v[64:71]
+; COEXEC-NEXT:  .LBB4_1: ; %loop
+; COEXEC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[88:95], v[0:7], v[32:39], v[88:95]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[80:87], v[8:15], v[40:47], v[80:87]
+; COEXEC-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s0
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[72:79], v[16:23], v[48:55], v[72:79]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[64:71], v[24:31], v[56:63], v[64:71]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[88:95], v[0:7], v[32:39], v[88:95]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[80:87], v[8:15], v[40:47], v[80:87]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[72:79], v[16:23], v[48:55], v[72:79]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[64:71], v[24:31], v[56:63], v[64:71]
+; COEXEC-NEXT:    s_cbranch_vccnz .LBB4_1
+; COEXEC-NEXT:  ; %bb.2: ; %end
+; COEXEC-NEXT:    s_load_b64 s[0:1], s[4:5], 0x8 nv
+; COEXEC-NEXT:    v_nop
+; COEXEC-NEXT:    v_mov_b32_e32 v0, 0
+; COEXEC-NEXT:    s_wait_kmcnt 0x0
+; COEXEC-NEXT:    s_clause 0x7
+; COEXEC-NEXT:    global_store_b128 v0, v[92:95], s[0:1] offset:16
+; COEXEC-NEXT:    global_store_b128 v0, v[88:91], s[0:1]
+; COEXEC-NEXT:    global_store_b128 v0, v[84:87], s[0:1] offset:144
+; COEXEC-NEXT:    global_store_b128 v0, v[80:83], s[0:1] offset:128
+; COEXEC-NEXT:    global_store_b128 v0, v[76:79], s[0:1] offset:272
+; COEXEC-NEXT:    global_store_b128 v0, v[72:75], s[0:1] offset:256
+; COEXEC-NEXT:    global_store_b128 v0, v[68:71], s[0:1] offset:400
+; COEXEC-NEXT:    global_store_b128 v0, v[64:67], s[0:1] offset:384
+; COEXEC-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; COEXEC-NEXT:    s_endpgm
+;
+; GCN-LABEL: ds_wmma_no_block_carried:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
+; GCN-NEXT:    s_clause 0x1
+; GCN-NEXT:    s_load_b32 s0, s[4:5], 0x0 nv
+; GCN-NEXT:    s_load_b32 s1, s[4:5], 0x10 nv
+; GCN-NEXT:    s_mov_b32 s8, 0
+; GCN-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT:    s_mov_b32 s14, s8
+; GCN-NEXT:    s_mov_b32 s15, s8
+; GCN-NEXT:    s_mov_b32 s9, s8
+; GCN-NEXT:    s_mov_b32 s10, s8
+; GCN-NEXT:    s_mov_b32 s11, s8
+; GCN-NEXT:    s_mov_b32 s12, s8
+; GCN-NEXT:    s_mov_b32 s13, s8
+; GCN-NEXT:    v_mov_b64_e32 v[102:103], s[14:15]
+; GCN-NEXT:    v_mov_b64_e32 v[100:101], s[12:13]
+; GCN-NEXT:    v_mov_b64_e32 v[98:99], s[10:11]
+; GCN-NEXT:    v_mov_b64_e32 v[96:97], s[8:9]
+; GCN-NEXT:    s_wait_kmcnt 0x0
+; GCN-NEXT:    v_mov_b32_e32 v92, s0
+; GCN-NEXT:    s_bitcmp1_b32 s1, 0
+; GCN-NEXT:    ds_load_tr16_b128 v[0:3], v92
+; GCN-NEXT:    ds_load_tr16_b128 v[4:7], v92 offset:64
+; GCN-NEXT:    ds_load_tr16_b128 v[40:43], v92 offset:256
+; GCN-NEXT:    ds_load_tr16_b128 v[44:47], v92 offset:320
+; GCN-NEXT:    ds_load_tr16_b128 v[48:51], v92 offset:512
+; GCN-NEXT:    ds_load_tr16_b128 v[52:55], v92 offset:576
+; GCN-NEXT:    ds_load_tr16_b128 v[56:59], v92 offset:768
+; GCN-NEXT:    ds_load_tr16_b128 v[60:63], v92 offset:832
+; GCN-NEXT:    ds_load_tr16_b128 v[64:67], v92 offset:128
+; GCN-NEXT:    ds_load_tr16_b128 v[68:71], v92 offset:192
+; GCN-NEXT:    ds_load_tr16_b128 v[72:75], v92 offset:384
+; GCN-NEXT:    ds_load_tr16_b128 v[76:79], v92 offset:448
+; GCN-NEXT:    ds_load_tr16_b128 v[80:83], v92 offset:640
+; GCN-NEXT:    ds_load_tr16_b128 v[84:87], v92 offset:704
+; GCN-NEXT:    ds_load_tr16_b128 v[88:91], v92 offset:896
+; GCN-NEXT:    ds_load_tr16_b128 v[92:95], v92 offset:960
+; GCN-NEXT:    s_cselect_b32 s0, -1, 0
+; GCN-NEXT:    s_wait_dscnt 0xe
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[0:7], v[96:103], 0
+; GCN-NEXT:    s_xor_b32 s0, s0, -1
+; GCN-NEXT:    s_wait_dscnt 0xc
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[40:47], v[96:103], 0
+; GCN-NEXT:    s_wait_dscnt 0xa
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[48:55], v[96:103], 0
+; GCN-NEXT:    s_wait_dscnt 0x8
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[56:63], v[96:103], 0
+; GCN-NEXT:    s_wait_dscnt 0x6
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[64:71], v[96:103], v[32:39]
+; GCN-NEXT:    s_wait_dscnt 0x4
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[72:79], v[96:103], v[24:31]
+; GCN-NEXT:    s_wait_dscnt 0x2
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[80:87], v[96:103], v[16:23]
+; GCN-NEXT:    s_wait_dscnt 0x0
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[88:95], v[96:103], v[8:15]
+; GCN-NEXT:  .LBB4_1: ; %loop
+; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[0:7], v[64:71], v[32:39]
+; GCN-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s0
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[40:47], v[72:79], v[24:31]
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[48:55], v[80:87], v[16:23]
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[56:63], v[88:95], v[8:15]
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[0:7], v[64:71], v[32:39]
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[40:47], v[72:79], v[24:31]
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[48:55], v[80:87], v[16:23]
+; GCN-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[56:63], v[88:95], v[8:15]
+; GCN-NEXT:    s_cbranch_vccnz .LBB4_1
+; GCN-NEXT:  ; %bb.2: ; %end
+; GCN-NEXT:    s_load_b64 s[0:1], s[4:5], 0x8 nv
+; GCN-NEXT:    v_nop
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_wait_kmcnt 0x0
+; GCN-NEXT:    s_clause 0x7
+; GCN-NEXT:    global_store_b128 v0, v[36:39], s[0:1] offset:16
+; GCN-NEXT:    global_store_b128 v0, v[32:35], s[0:1]
+; GCN-NEXT:    global_store_b128 v0, v[28:31], s[0:1] offset:144
+; GCN-NEXT:    global_store_b128 v0, v[24:27], s[0:1] offset:128
+; GCN-NEXT:    global_store_b128 v0, v[20:23], s[0:1] offset:272
+; GCN-NEXT:    global_store_b128 v0, v[16:19], s[0:1] offset:256
+; GCN-NEXT:    global_store_b128 v0, v[12:15], s[0:1] offset:400
+; GCN-NEXT:    global_store_b128 v0, v[8:11], s[0:1] offset:384
+; GCN-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GCN-NEXT:    s_endpgm
+entry:
+
+  %p0 = getelementptr inbounds nuw i8, ptr addrspace(3) %base, i32 0
+  %p1 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 64
+  %p2 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 128
+  %p3 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 192
+  %p4 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 256
+  %p5 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 320
+  %p6 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 384
+  %p7 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 448
+  %p8 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 512
+  %p9 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 576
+  %p10 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 640
+  %p11 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 704
+  %p12 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 768
+  %p13 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 832
+  %p14 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 896
+  %p15 = getelementptr inbounds nuw i8, ptr addrspace(3) %p0, i32 960
+  %l0 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) %p0)
+  %l1 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p1)
+  %l2 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p2)
+  %l3 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p3)
+  %l4 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p4)
+  %l5 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p5)
+  %l6 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p6)
+  %l7 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p7)
+  %l8 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p8)
+  %l9 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p9)
+  %l10 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p10)
+  %l11 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p11)
+  %l12 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p12)
+  %l13 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p13)
+  %l14 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p14)
+  %l15 = tail call <8 x half> @llvm.amdgcn.ds.load.tr16.b128.v8f16(ptr addrspace(3) nonnull %p15)
+  %vec0 = shufflevector <8 x half> %l0, <8 x half> %l1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec1 = shufflevector <8 x half> %l2, <8 x half> %l3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec2 = shufflevector <8 x half> %l4, <8 x half> %l5, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec3 = shufflevector <8 x half> %l6, <8 x half> %l7, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec4 = shufflevector <8 x half> %l8, <8 x half> %l9, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec5 = shufflevector <8 x half> %l10, <8 x half> %l11, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec6 = shufflevector <8 x half> %l12, <8 x half> %l13, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %vec7 = shufflevector <8 x half> %l14, <8 x half> %l15, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %incwmma00 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec0, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> zeroinitializer, i1 false, i1 false)
+  %incwmma01 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec1, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %incwmma00, i1 false, i1 false)
+  %incwmma10 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec2, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> zeroinitializer, i1 false, i1 false)
+  %incwmma11 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec3, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %incwmma10, i1 false, i1 false)
+  %incwmma20 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec4, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> zeroinitializer, i1 false, i1 false)
+  %incwmma21 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec5, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %incwmma20, i1 false, i1 false)
+  %incwmma30 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec6, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> zeroinitializer, i1 false, i1 false)
+  %incwmma31 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec7, i1 false, <16 x half> zeroinitializer, i16 0, <8 x float> %incwmma30, i1 false, i1 false)
+
+
+
+  br label %loop
+
+loop:
+  %baseOff = phi i32 [ 0, %entry ], [ %newBaseOff, %loop ]
+  %wvec0 = phi <8 x float> [ %incwmma01, %entry ], [ %wmma01,  %loop ]
+  %wvec1 = phi <8 x float> [ %incwmma11, %entry ], [ %wmma11,  %loop ]
+  %wvec2 = phi <8 x float> [ %incwmma21, %entry ], [ %wmma21,  %loop ]
+  %wvec3 = phi <8 x float> [ %incwmma31, %entry ], [ %wmma31,  %loop ]
+  %wmma00 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec0, i1 false, <16 x half> %vec1, i16 0, <8 x float> %wvec0, i1 false, i1 false)
+  %wmma01 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec0, i1 false, <16 x half> %vec1, i16 0, <8 x float> %wmma00, i1 false, i1 false)
+  %wmma10 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec2, i1 false, <16 x half> %vec3, i16 0, <8 x float> %wvec1, i1 false, i1 false)
+  %wmma11 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec2, i1 false, <16 x half> %vec3, i16 0, <8 x float> %wmma10, i1 false, i1 false)
+  %wmma20 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec4, i1 false, <16 x half> %vec5, i16 0, <8 x float> %wvec2, i1 false, i1 false)
+  %wmma21 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec4, i1 false, <16 x half> %vec5, i16 0, <8 x float> %wmma20, i1 false, i1 false)
+  %wmma30 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec6, i1 false, <16 x half> %vec7, i16 0, <8 x float> %wvec3, i1 false, i1 false)
+  %wmma31 = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x32.f16.v8f32.v16f16(i1 false, <16 x half> %vec6, i1 false, <16 x half> %vec7, i16 0, <8 x float> %wmma30, i1 false, i1 false)
+  %newBaseOff = or disjoint i32 %baseOff, %delta
+  br i1 %br0, label %loop, label %end
+
+end:
+  %out1 = getelementptr inbounds nuw i8, ptr addrspace(1) %out, i32 128
+  %out2 = getelementptr inbounds nuw i8, ptr addrspace(1) %out, i32 256
+  %out3 = getelementptr inbounds nuw i8, ptr addrspace(1) %out, i32 384
+  store <8 x float> %wmma01, ptr addrspace(1) %out, align 16
+  store <8 x float> %wmma11, ptr addrspace(1) %out1, align 16
+  store <8 x float> %wmma21, ptr addrspace(1) %out2, align 16
+  store <8 x float> %wmma31, ptr addrspace(1) %out3, align 16
+  ret void
+}
+
+
 attributes #0 = { "amdgpu-flat-work-group-size"="32,32" "amdgpu-waves-per-eu"="1,1" }

>From f4180a72024b30efc50d2e44fb0a8f925c1e6725 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Tue, 24 Mar 2026 15:54:23 -0700
Subject: [PATCH 2/3] Claude Code review

Change-Id: Iab06de2981b27667cc29a56931dd378ecf7a1b0c
---
 .../AMDGPU/AMDGPUCoExecSchedStrategy.cpp      |  42 ++--
 .../Target/AMDGPU/AMDGPUCoExecSchedStrategy.h |   5 +
 llvm/test/CodeGen/AMDGPU/coexec-scheduler.ll  | 216 +++++++++---------
 3 files changed, 132 insertions(+), 131 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.cpp
index db19581779211..af773a50b8345 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.cpp
@@ -176,14 +176,12 @@ CandidateHeuristics::getHWUIFromFlavor(InstructionFlavor Flavor) {
   return nullptr;
 }
 
-unsigned CandidateHeuristics::getHWUICyclesForSU(SUnit *SU) {
-  assert(SchedModel && SchedModel->hasInstrSchedModel());
-  MachineInstr *MI = SU->getInstr();
+unsigned CandidateHeuristics::getMaxBlockingCycles(const MCSchedClassDesc *SC, const MachineInstr *MI) {
   // Loads and stores are not pipelined
   if (MI->mayLoadOrStore())
     return SchedModel->computeInstrLatency(MI, false);
+
   unsigned ReleaseAtCycle = 0;
-  const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
   for (TargetSchedModel::ProcResIter PI = SchedModel->getWriteProcResBegin(SC),
                                      PE = SchedModel->getWriteProcResEnd(SC);
        PI != PE; ++PI) {
@@ -192,27 +190,22 @@ unsigned CandidateHeuristics::getHWUICyclesForSU(SUnit *SU) {
   return ReleaseAtCycle;
 }
 
-void CandidateHeuristics::updateForScheduling(SUnit *SU) {
-  HardwareUnitInfo *HWUI =
-      getHWUIFromFlavor(classifyFlavor(*SU->getInstr(), *SII));
-  assert(HWUI);
-  HWUI->markScheduled(SU, getHWUICyclesForSU(SU));
+unsigned CandidateHeuristics::getHWUICyclesForSU(SUnit *SU) {
+  assert(SchedModel && SchedModel->hasInstrSchedModel());
+  return getMaxBlockingCycles(DAG->getSchedClass(SU), SU->getInstr());
+
 }
 
 unsigned CandidateHeuristics::getHWUICyclesForMI(MachineInstr *MI) {
   assert(SchedModel && SchedModel->hasInstrSchedModel());
-  // Loads and stores are not pipelined
-  if (MI->mayLoadOrStore())
-    return SchedModel->computeInstrLatency(MI, false);
+  return getMaxBlockingCycles(SchedModel->resolveSchedClass(MI), MI);
+}
 
-  unsigned ReleaseAtCycle = 0;
-  const MCSchedClassDesc *SC = SchedModel->resolveSchedClass(MI);
-  for (TargetSchedModel::ProcResIter PI = SchedModel->getWriteProcResBegin(SC),
-                                     PE = SchedModel->getWriteProcResEnd(SC);
-       PI != PE; ++PI) {
-    ReleaseAtCycle = std::max(ReleaseAtCycle, (unsigned)PI->ReleaseAtCycle);
-  }
-  return ReleaseAtCycle;
+void CandidateHeuristics::updateForScheduling(SUnit *SU) {
+  HardwareUnitInfo *HWUI =
+      getHWUIFromFlavor(classifyFlavor(*SU->getInstr(), *SII));
+  assert(HWUI);
+  HWUI->markScheduled(SU, getHWUICyclesForSU(SU));
 }
 
 void CandidateHeuristics::initialize(ScheduleDAGMI *SchedDAG,
@@ -375,7 +368,7 @@ unsigned CandidateHeuristics::getStructuralStallCycles(SchedBoundary &Zone,
   }
 
   // Query HazardRecognizer for sequence-dependent hazard penalties.
-  if (Zone.HazardRec && Zone.HazardRec->isEnabled()) {
+  if (!DAG->hasVRegLiveness() && Zone.HazardRec && Zone.HazardRec->isEnabled()) {
     auto *HR = static_cast<GCNHazardRecognizer *>(Zone.HazardRec);
     Stall = std::max(Stall, HR->getHazardWaitStates(MI));
   }
@@ -383,7 +376,6 @@ unsigned CandidateHeuristics::getStructuralStallCycles(SchedBoundary &Zone,
   return Stall;
 }
 
-
 bool CandidateHeuristics::tryEffectiveStall(
     GenericSchedulerBase::SchedCandidate &Cand,
     GenericSchedulerBase::SchedCandidate &TryCand, SchedBoundary &Zone) {
@@ -404,10 +396,8 @@ bool CandidateHeuristics::tryEffectiveStall(
     Costs.Ready = ReadyCycle > CurrCycle ? ReadyCycle - CurrCycle : 0;
     Costs.Structural = getStructuralStallCycles(Zone, SU);
     Costs.Latency = Zone.getLatencyStallCycles(SU);
-    unsigned TryCarriedLatency = CarriedLatencies.contains(TryCand.SU->getInstr())
-                                   ? CarriedLatencies[TryCand.SU->getInstr()]
-                                   : 0;
-    Costs.Carried = TryCarriedLatency > CurrCycle ? TryCarriedLatency - CurrCycle : 0;
+    unsigned CarriedLatency = CarriedLatencies.lookup_or(SU->getInstr(), 0);
+    Costs.Carried = CarriedLatency > CurrCycle ? CarriedLatency - CurrCycle : 0;
 
     Costs.Effective = std::max({Costs.Ready, Costs.Structural, Costs.Latency, Costs.Carried});
     return Costs;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.h b/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.h
index 3d9b09a36b259..831f97e60395e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.h
@@ -256,6 +256,11 @@ class CandidateHeuristics {
   /// heuristics.
   void collectRegionSummary();
 
+
+  /// \returns the maximum blocking cycles according to the SchedModel for a given
+  /// MCSchedClassDesc \p SC
+  unsigned getMaxBlockingCycles(const MCSchedClassDesc *SC, const MachineInstr *MI);
+
   /// Compute the blocking cycles for the appropriate HardwareUnit given an \p
   /// SU
   unsigned getHWUICyclesForSU(SUnit *SU);
diff --git a/llvm/test/CodeGen/AMDGPU/coexec-scheduler.ll b/llvm/test/CodeGen/AMDGPU/coexec-scheduler.ll
index 8fc2947bbf3c1..b596608bb22a9 100644
--- a/llvm/test/CodeGen/AMDGPU/coexec-scheduler.ll
+++ b/llvm/test/CodeGen/AMDGPU/coexec-scheduler.ll
@@ -285,72 +285,72 @@ define amdgpu_kernel void @ds_wmma_permute(ptr addrspace(3) %base, ptr addrspace
 ; COEXEC-NEXT:    v_mov_b32_e32 v31, v0
 ; COEXEC-NEXT:  .LBB1_1: ; %loop
 ; COEXEC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; COEXEC-NEXT:    s_and_b32 vcc_lo, exec_lo, s0
 ; COEXEC-NEXT:    s_add_co_i32 s7, s2, s6
-; COEXEC-NEXT:    s_add_co_i32 s8, s3, s6
-; COEXEC-NEXT:    s_add_co_i32 s6, s6, s1
 ; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_nop
-; COEXEC-NEXT:    v_mov_b32_e32 v124, s7
-; COEXEC-NEXT:    s_and_b32 vcc_lo, exec_lo, s0
-; COEXEC-NEXT:    v_mov_b32_e32 v156, s8
-; COEXEC-NEXT:    ds_load_tr16_b128 v[32:35], v124
-; COEXEC-NEXT:    ds_load_tr16_b128 v[36:39], v124 offset:64
-; COEXEC-NEXT:    ds_load_tr16_b128 v[40:43], v156
-; COEXEC-NEXT:    ds_load_tr16_b128 v[44:47], v156 offset:64
-; COEXEC-NEXT:    ds_load_tr16_b128 v[48:51], v124 offset:256
-; COEXEC-NEXT:    ds_load_tr16_b128 v[56:59], v156 offset:256
-; COEXEC-NEXT:    ds_load_tr16_b128 v[52:55], v124 offset:320
-; COEXEC-NEXT:    ds_load_tr16_b128 v[60:63], v156 offset:320
-; COEXEC-NEXT:    ds_load_tr16_b128 v[64:67], v124 offset:512
-; COEXEC-NEXT:    ds_load_tr16_b128 v[72:75], v156 offset:512
-; COEXEC-NEXT:    ds_load_tr16_b128 v[68:71], v124 offset:576
-; COEXEC-NEXT:    ds_load_tr16_b128 v[76:79], v156 offset:576
-; COEXEC-NEXT:    ds_load_tr16_b128 v[80:83], v124 offset:768
-; COEXEC-NEXT:    ds_load_tr16_b128 v[88:91], v156 offset:768
-; COEXEC-NEXT:    ds_load_tr16_b128 v[84:87], v124 offset:832
-; COEXEC-NEXT:    ds_load_tr16_b128 v[92:95], v156 offset:832
-; COEXEC-NEXT:    ds_load_tr16_b128 v[96:99], v124 offset:128
-; COEXEC-NEXT:    ds_load_tr16_b128 v[104:107], v124 offset:384
-; COEXEC-NEXT:    ds_load_tr16_b128 v[112:115], v124 offset:640
-; COEXEC-NEXT:    ds_load_tr16_b128 v[120:123], v124 offset:896
+; COEXEC-NEXT:    v_mov_b32_e32 v92, s7
+; COEXEC-NEXT:    ds_load_tr16_b128 v[32:35], v92
+; COEXEC-NEXT:    ds_load_tr16_b128 v[36:39], v92 offset:64
+; COEXEC-NEXT:    ds_load_tr16_b128 v[40:43], v92 offset:128
+; COEXEC-NEXT:    ds_load_tr16_b128 v[48:51], v92 offset:256
+; COEXEC-NEXT:    ds_load_tr16_b128 v[56:59], v92 offset:384
+; COEXEC-NEXT:    ds_load_tr16_b128 v[64:67], v92 offset:512
+; COEXEC-NEXT:    ds_load_tr16_b128 v[72:75], v92 offset:640
+; COEXEC-NEXT:    ds_load_tr16_b128 v[80:83], v92 offset:768
+; COEXEC-NEXT:    ds_load_tr16_b128 v[88:91], v92 offset:896
+; COEXEC-NEXT:    ds_load_tr16_b128 v[44:47], v92 offset:192
+; COEXEC-NEXT:    ds_load_tr16_b128 v[52:55], v92 offset:320
+; COEXEC-NEXT:    ds_load_tr16_b128 v[60:63], v92 offset:448
+; COEXEC-NEXT:    ds_load_tr16_b128 v[68:71], v92 offset:576
+; COEXEC-NEXT:    ds_load_tr16_b128 v[76:79], v92 offset:704
+; COEXEC-NEXT:    ds_load_tr16_b128 v[84:87], v92 offset:832
+; COEXEC-NEXT:    ds_load_tr16_b128 v[92:95], v92 offset:960
+; COEXEC-NEXT:    s_add_co_i32 s7, s3, s6
+; COEXEC-NEXT:    s_add_co_i32 s6, s6, s1
+; COEXEC-NEXT:    v_mov_b32_e32 v156, s7
+; COEXEC-NEXT:    ds_load_tr16_b128 v[96:99], v156
+; COEXEC-NEXT:    ds_load_tr16_b128 v[100:103], v156 offset:64
+; COEXEC-NEXT:    ds_load_tr16_b128 v[104:107], v156 offset:256
+; COEXEC-NEXT:    ds_load_tr16_b128 v[108:111], v156 offset:320
+; COEXEC-NEXT:    ds_load_tr16_b128 v[112:115], v156 offset:512
+; COEXEC-NEXT:    ds_load_tr16_b128 v[116:119], v156 offset:576
+; COEXEC-NEXT:    ds_load_tr16_b128 v[120:123], v156 offset:768
+; COEXEC-NEXT:    ds_load_tr16_b128 v[124:127], v156 offset:832
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[128:131], v156 offset:128
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[136:139], v156 offset:384
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[144:147], v156 offset:640
-; COEXEC-NEXT:    s_wait_dscnt 0x13
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[32:39], v[40:47], v[24:31]
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[152:155], v156 offset:896
-; COEXEC-NEXT:    ds_load_tr16_b128 v[100:103], v124 offset:192
-; COEXEC-NEXT:    ds_load_tr16_b128 v[108:111], v124 offset:448
-; COEXEC-NEXT:    ds_load_tr16_b128 v[116:119], v124 offset:704
-; COEXEC-NEXT:    ds_load_tr16_b128 v[124:127], v124 offset:960
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[132:135], v156 offset:192
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[140:143], v156 offset:448
-; COEXEC-NEXT:    s_wait_dscnt 0x16
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[48:55], v[56:63], v[16:23]
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[148:151], v156 offset:704
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[156:159], v156 offset:960
-; COEXEC-NEXT:    s_wait_dscnt 0x14
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[64:71], v[72:79], v[8:15]
-; COEXEC-NEXT:    s_wait_dscnt 0x10
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[80:87], v[88:95], v[0:7]
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[32:39], v[40:47], v[24:31]
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[48:55], v[56:63], v[16:23]
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[64:71], v[72:79], v[8:15]
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[80:87], v[88:95], v[0:7]
+; COEXEC-NEXT:    s_wait_dscnt 0xe
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[32:39], v[96:103], v[24:31]
+; COEXEC-NEXT:    s_wait_dscnt 0xc
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[48:55], v[104:111], v[16:23]
+; COEXEC-NEXT:    s_wait_dscnt 0xa
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[64:71], v[112:119], v[8:15]
+; COEXEC-NEXT:    s_wait_dscnt 0x8
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[80:87], v[120:127], v[0:7]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[32:39], v[96:103], v[24:31]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[48:55], v[104:111], v[16:23]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[64:71], v[112:119], v[8:15]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[80:87], v[120:127], v[0:7]
 ; COEXEC-NEXT:    s_wait_dscnt 0x3
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[96:103], v[128:135], v[24:31]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[40:47], v[128:135], v[24:31]
 ; COEXEC-NEXT:    s_wait_dscnt 0x2
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[104:111], v[136:143], v[16:23]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[56:63], v[136:143], v[16:23]
 ; COEXEC-NEXT:    s_wait_dscnt 0x1
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[112:119], v[144:151], v[8:15]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[72:79], v[144:151], v[8:15]
 ; COEXEC-NEXT:    s_wait_dscnt 0x0
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[120:127], v[152:159], v[0:7]
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[96:103], v[128:135], v[24:31]
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[104:111], v[136:143], v[16:23]
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[112:119], v[144:151], v[8:15]
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[120:127], v[152:159], v[0:7]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[88:95], v[152:159], v[0:7]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[24:31], v[40:47], v[128:135], v[24:31]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[16:23], v[56:63], v[136:143], v[16:23]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[8:15], v[72:79], v[144:151], v[8:15]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[88:95], v[152:159], v[0:7]
 ; COEXEC-NEXT:    s_cbranch_vccnz .LBB1_1
 ; COEXEC-NEXT:  ; %bb.2: ; %end
 ; COEXEC-NEXT:    s_load_b64 s[0:1], s[4:5], 0x8 nv
@@ -627,15 +627,15 @@ define amdgpu_kernel void @ds_wmma_block_carried(ptr addrspace(3) %base, ptr add
 ; COEXEC-NEXT:    s_bitcmp1_b32 s0, 0
 ; COEXEC-NEXT:    v_mov_b32_e32 v4, v0
 ; COEXEC-NEXT:    s_cselect_b32 s3, -1, 0
-; COEXEC-NEXT:    ds_load_tr16_b128 v[24:27], v6
-; COEXEC-NEXT:    ds_load_tr16_b128 v[32:35], v6 offset:128
-; COEXEC-NEXT:    ds_load_tr16_b128 v[8:11], v6 offset:256
-; COEXEC-NEXT:    ds_load_tr16_b128 v[16:19], v6 offset:384
-; COEXEC-NEXT:    ds_load_tr16_b128 v[28:31], v6 offset:64
-; COEXEC-NEXT:    ds_load_tr16_b128 v[36:39], v6 offset:192
-; COEXEC-NEXT:    ds_load_tr16_b128 v[12:15], v6 offset:320
+; COEXEC-NEXT:    ds_load_tr16_b128 v[8:11], v6
+; COEXEC-NEXT:    ds_load_tr16_b128 v[16:19], v6 offset:128
+; COEXEC-NEXT:    ds_load_tr16_b128 v[24:27], v6 offset:256
+; COEXEC-NEXT:    ds_load_tr16_b128 v[32:35], v6 offset:384
+; COEXEC-NEXT:    ds_load_tr16_b128 v[12:15], v6 offset:64
+; COEXEC-NEXT:    ds_load_tr16_b128 v[20:23], v6 offset:192
+; COEXEC-NEXT:    ds_load_tr16_b128 v[28:31], v6 offset:320
 ; COEXEC-NEXT:    v_mov_b32_e32 v5, v0
-; COEXEC-NEXT:    ds_load_tr16_b128 v[20:23], v6 offset:448
+; COEXEC-NEXT:    ds_load_tr16_b128 v[36:39], v6 offset:448
 ; COEXEC-NEXT:    s_add_co_i32 s0, s2, s1
 ; COEXEC-NEXT:    s_xor_b32 s2, s3, -1
 ; COEXEC-NEXT:    v_dual_mov_b32 v6, v0 :: v_dual_mov_b32 v7, v0
@@ -654,12 +654,9 @@ define amdgpu_kernel void @ds_wmma_block_carried(ptr addrspace(3) %base, ptr add
 ; COEXEC-NEXT:    s_wait_dscnt 0x0
 ; COEXEC-NEXT:  .LBB2_1: ; %loop
 ; COEXEC-NEXT:    ; =>This Inner Loop Header: Depth=1
-; COEXEC-NEXT:    s_wait_dscnt 0x4
-; COEXEC-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(TRANS32_DEP_2)
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[24:31], v[32:39], v[56:63]
-; COEXEC-NEXT:    s_wait_dscnt 0x0
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[8:15], v[16:23], v[48:55]
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[24:31], v[32:39], v[56:63]
+; COEXEC-NEXT:    v_nop
+; COEXEC-NEXT:    v_nop
+; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_mov_b64_e32 v[70:71], s[14:15]
 ; COEXEC-NEXT:    v_mov_b64_e32 v[68:69], s[12:13]
@@ -667,25 +664,31 @@ define amdgpu_kernel void @ds_wmma_block_carried(ptr addrspace(3) %base, ptr add
 ; COEXEC-NEXT:    v_mov_b64_e32 v[64:65], s[8:9]
 ; COEXEC-NEXT:    v_mov_b32_e32 v72, s0
 ; COEXEC-NEXT:    s_add_co_i32 s0, s0, s1
-; COEXEC-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; COEXEC-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(TRANS32_DEP_1)
 ; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[40:47], v[64:71], v[64:71], v[40:47]
-; COEXEC-NEXT:    ds_load_tr16_b128 v[24:27], v72
-; COEXEC-NEXT:    ds_load_tr16_b128 v[32:35], v72 offset:128
-; COEXEC-NEXT:    ds_load_tr16_b128 v[28:31], v72 offset:64
-; COEXEC-NEXT:    ds_load_tr16_b128 v[36:39], v72 offset:192
 ; COEXEC-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s2
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[64:71], v[64:71], v[0:7]
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[8:15], v[16:23], v[48:55]
-; COEXEC-NEXT:    ds_load_tr16_b128 v[8:11], v72 offset:256
-; COEXEC-NEXT:    ds_load_tr16_b128 v[16:19], v72 offset:384
-; COEXEC-NEXT:    ds_load_tr16_b128 v[12:15], v72 offset:320
-; COEXEC-NEXT:    ds_load_tr16_b128 v[20:23], v72 offset:448
 ; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[40:47], v[64:71], v[64:71], v[40:47]
+; COEXEC-NEXT:    s_wait_dscnt 0x4
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[8:15], v[16:23], v[56:63]
+; COEXEC-NEXT:    s_wait_dscnt 0x0
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[24:31], v[32:39], v[48:55]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[64:71], v[64:71], v[0:7]
+; COEXEC-NEXT:    s_delay_alu instid0(TRANS32_DEP_3)
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[8:15], v[16:23], v[56:63]
+; COEXEC-NEXT:    ds_load_tr16_b128 v[8:11], v72
+; COEXEC-NEXT:    ds_load_tr16_b128 v[16:19], v72 offset:128
+; COEXEC-NEXT:    ds_load_tr16_b128 v[12:15], v72 offset:64
+; COEXEC-NEXT:    ds_load_tr16_b128 v[20:23], v72 offset:192
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[24:31], v[32:39], v[48:55]
+; COEXEC-NEXT:    ds_load_tr16_b128 v[24:27], v72 offset:256
+; COEXEC-NEXT:    ds_load_tr16_b128 v[32:35], v72 offset:384
+; COEXEC-NEXT:    ds_load_tr16_b128 v[28:31], v72 offset:320
+; COEXEC-NEXT:    ds_load_tr16_b128 v[36:39], v72 offset:448
 ; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[0:7], v[64:71], v[64:71], v[0:7]
 ; COEXEC-NEXT:    s_cbranch_vccnz .LBB2_1
 ; COEXEC-NEXT:  ; %bb.2: ; %end
 ; COEXEC-NEXT:    s_load_b64 s[0:1], s[4:5], 0x8 nv
-; COEXEC-NEXT:    s_wait_dscnt 0x3
+; COEXEC-NEXT:    s_wait_dscnt 0x7
 ; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_mov_b32_e32 v8, 0
@@ -924,47 +927,44 @@ define amdgpu_kernel void @ds_wmma_loop_carried(ptr addrspace(3) %base, ptr addr
 ; COEXEC-NEXT:    s_bitcmp1_b32 s0, 0
 ; COEXEC-NEXT:    s_cselect_b32 s3, -1, 0
 ; COEXEC-NEXT:    s_add_co_i32 s0, s2, s1
-; COEXEC-NEXT:    ds_load_tr16_b128 v[8:11], v40
-; COEXEC-NEXT:    ds_load_tr16_b128 v[12:15], v40 offset:64
-; COEXEC-NEXT:    ds_load_tr16_b128 v[0:3], v40 offset:256
-; COEXEC-NEXT:    ds_load_tr16_b128 v[4:7], v40 offset:320
+; COEXEC-NEXT:    ds_load_tr16_b128 v[0:3], v40
+; COEXEC-NEXT:    ds_load_tr16_b128 v[4:7], v40 offset:64
+; COEXEC-NEXT:    ds_load_tr16_b128 v[8:11], v40 offset:256
+; COEXEC-NEXT:    ds_load_tr16_b128 v[12:15], v40 offset:320
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[32:35], v40 offset:512
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[36:39], v40 offset:576
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[72:75], v40 offset:768
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[76:79], v40 offset:832
-; COEXEC-NEXT:    ds_load_tr16_b128 v[24:27], v40 offset:128
-; COEXEC-NEXT:    ds_load_tr16_b128 v[28:31], v40 offset:192
-; COEXEC-NEXT:    ds_load_tr16_b128 v[16:19], v40 offset:384
+; COEXEC-NEXT:    ds_load_tr16_b128 v[16:19], v40 offset:128
+; COEXEC-NEXT:    ds_load_tr16_b128 v[20:23], v40 offset:192
+; COEXEC-NEXT:    ds_load_tr16_b128 v[24:27], v40 offset:384
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[80:83], v40 offset:640
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[88:91], v40 offset:896
-; COEXEC-NEXT:    ds_load_tr16_b128 v[20:23], v40 offset:448
+; COEXEC-NEXT:    ds_load_tr16_b128 v[28:31], v40 offset:448
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[84:87], v40 offset:704
 ; COEXEC-NEXT:    ds_load_tr16_b128 v[92:95], v40 offset:960
 ; COEXEC-NEXT:    s_xor_b32 s2, s3, -1
 ; COEXEC-NEXT:    s_wait_dscnt 0xe
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[8:15], v[64:71], 0
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[0:7], v[64:71], 0
 ; COEXEC-NEXT:    s_wait_dscnt 0xc
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[0:7], v[64:71], 0
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[8:15], v[64:71], 0
 ; COEXEC-NEXT:    s_wait_dscnt 0xa
 ; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[40:47], v[32:39], v[64:71], 0
 ; COEXEC-NEXT:    s_wait_dscnt 0x8
 ; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[72:79], v[64:71], 0
 ; COEXEC-NEXT:    s_wait_dscnt 0x6
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[24:31], v[64:71], v[56:63]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[16:23], v[64:71], v[56:63]
 ; COEXEC-NEXT:    s_wait_dscnt 0x2
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[16:23], v[64:71], v[48:55]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[24:31], v[64:71], v[48:55]
 ; COEXEC-NEXT:    s_wait_dscnt 0x1
 ; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[40:47], v[80:87], v[64:71], v[40:47]
 ; COEXEC-NEXT:    s_wait_dscnt 0x0
 ; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[88:95], v[64:71], v[32:39]
 ; COEXEC-NEXT:  .LBB3_1: ; %loop
 ; COEXEC-NEXT:    ; =>This Inner Loop Header: Depth=1
-; COEXEC-NEXT:    s_wait_dscnt 0x4
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[8:15], v[24:31], v[56:63]
-; COEXEC-NEXT:    s_wait_dscnt 0x0
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[0:7], v[16:23], v[48:55]
-; COEXEC-NEXT:    s_delay_alu instid0(TRANS32_DEP_2)
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[8:15], v[24:31], v[56:63]
+; COEXEC-NEXT:    v_nop
+; COEXEC-NEXT:    v_nop
+; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_mov_b64_e32 v[70:71], s[14:15]
 ; COEXEC-NEXT:    v_mov_b64_e32 v[68:69], s[12:13]
@@ -972,25 +972,31 @@ define amdgpu_kernel void @ds_wmma_loop_carried(ptr addrspace(3) %base, ptr addr
 ; COEXEC-NEXT:    v_mov_b64_e32 v[64:65], s[8:9]
 ; COEXEC-NEXT:    v_mov_b32_e32 v72, s0
 ; COEXEC-NEXT:    s_add_co_i32 s0, s0, s1
-; COEXEC-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; COEXEC-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(TRANS32_DEP_1)
 ; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[40:47], v[64:71], v[64:71], v[40:47]
-; COEXEC-NEXT:    ds_load_tr16_b128 v[8:11], v72
-; COEXEC-NEXT:    ds_load_tr16_b128 v[24:27], v72 offset:128
-; COEXEC-NEXT:    ds_load_tr16_b128 v[12:15], v72 offset:64
-; COEXEC-NEXT:    ds_load_tr16_b128 v[28:31], v72 offset:192
 ; COEXEC-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s2
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[64:71], v[64:71], v[32:39]
-; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[0:7], v[16:23], v[48:55]
-; COEXEC-NEXT:    ds_load_tr16_b128 v[0:3], v72 offset:256
-; COEXEC-NEXT:    ds_load_tr16_b128 v[16:19], v72 offset:384
-; COEXEC-NEXT:    ds_load_tr16_b128 v[4:7], v72 offset:320
-; COEXEC-NEXT:    ds_load_tr16_b128 v[20:23], v72 offset:448
 ; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[40:47], v[64:71], v[64:71], v[40:47]
+; COEXEC-NEXT:    s_wait_dscnt 0x4
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[0:7], v[16:23], v[56:63]
+; COEXEC-NEXT:    s_wait_dscnt 0x0
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[8:15], v[24:31], v[48:55]
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[64:71], v[64:71], v[32:39]
+; COEXEC-NEXT:    s_delay_alu instid0(TRANS32_DEP_3)
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[56:63], v[0:7], v[16:23], v[56:63]
+; COEXEC-NEXT:    ds_load_tr16_b128 v[0:3], v72
+; COEXEC-NEXT:    ds_load_tr16_b128 v[16:19], v72 offset:128
+; COEXEC-NEXT:    ds_load_tr16_b128 v[4:7], v72 offset:64
+; COEXEC-NEXT:    ds_load_tr16_b128 v[20:23], v72 offset:192
+; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[48:55], v[8:15], v[24:31], v[48:55]
+; COEXEC-NEXT:    ds_load_tr16_b128 v[8:11], v72 offset:256
+; COEXEC-NEXT:    ds_load_tr16_b128 v[24:27], v72 offset:384
+; COEXEC-NEXT:    ds_load_tr16_b128 v[12:15], v72 offset:320
+; COEXEC-NEXT:    ds_load_tr16_b128 v[28:31], v72 offset:448
 ; COEXEC-NEXT:    v_wmma_f32_16x16x32_f16 v[32:39], v[64:71], v[64:71], v[32:39]
 ; COEXEC-NEXT:    s_cbranch_vccnz .LBB3_1
 ; COEXEC-NEXT:  ; %bb.2: ; %end
 ; COEXEC-NEXT:    s_load_b64 s[0:1], s[4:5], 0x8 nv
-; COEXEC-NEXT:    s_wait_dscnt 0x3
+; COEXEC-NEXT:    s_wait_dscnt 0x7
 ; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_nop
 ; COEXEC-NEXT:    v_mov_b32_e32 v0, 0

>From 2a74c82905905c85ad517358e56239092e2e1387 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Tue, 24 Mar 2026 15:55:22 -0700
Subject: [PATCH 3/3] Formatting

Change-Id: I3d89fba145471141ef945b1de15330caa245e82d
---
 llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.cpp | 7 ++++---
 llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.h   | 8 ++++----
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.cpp
index af773a50b8345..9e070dbdea16b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.cpp
@@ -176,7 +176,8 @@ CandidateHeuristics::getHWUIFromFlavor(InstructionFlavor Flavor) {
   return nullptr;
 }
 
-unsigned CandidateHeuristics::getMaxBlockingCycles(const MCSchedClassDesc *SC, const MachineInstr *MI) {
+unsigned CandidateHeuristics::getMaxBlockingCycles(const MCSchedClassDesc *SC,
+                                                   const MachineInstr *MI) {
   // Loads and stores are not pipelined
   if (MI->mayLoadOrStore())
     return SchedModel->computeInstrLatency(MI, false);
@@ -193,7 +194,6 @@ unsigned CandidateHeuristics::getMaxBlockingCycles(const MCSchedClassDesc *SC, c
 unsigned CandidateHeuristics::getHWUICyclesForSU(SUnit *SU) {
   assert(SchedModel && SchedModel->hasInstrSchedModel());
   return getMaxBlockingCycles(DAG->getSchedClass(SU), SU->getInstr());
-
 }
 
 unsigned CandidateHeuristics::getHWUICyclesForMI(MachineInstr *MI) {
@@ -368,7 +368,8 @@ unsigned CandidateHeuristics::getStructuralStallCycles(SchedBoundary &Zone,
   }
 
   // Query HazardRecognizer for sequence-dependent hazard penalties.
-  if (!DAG->hasVRegLiveness() && Zone.HazardRec && Zone.HazardRec->isEnabled()) {
+  if (!DAG->hasVRegLiveness() && Zone.HazardRec &&
+      Zone.HazardRec->isEnabled()) {
     auto *HR = static_cast<GCNHazardRecognizer *>(Zone.HazardRec);
     Stall = std::max(Stall, HR->getHazardWaitStates(MI));
   }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.h b/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.h
index 831f97e60395e..363bf6d767f19 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCoExecSchedStrategy.h
@@ -256,10 +256,10 @@ class CandidateHeuristics {
   /// heuristics.
   void collectRegionSummary();
 
-
-  /// \returns the maximum blocking cycles according to the SchedModel for a given
-  /// MCSchedClassDesc \p SC
-  unsigned getMaxBlockingCycles(const MCSchedClassDesc *SC, const MachineInstr *MI);
+  /// \returns the maximum blocking cycles according to the SchedModel for a
+  /// given MCSchedClassDesc \p SC
+  unsigned getMaxBlockingCycles(const MCSchedClassDesc *SC,
+                                const MachineInstr *MI);
 
   /// Compute the blocking cycles for the appropriate HardwareUnit given an \p
   /// SU



More information about the llvm-branch-commits mailing list