[llvm] 1c8d7ea - [AMDGPU] Implement pipeline solver for non-trivial pipelines

Jeffrey Byrnes via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 17 16:22:35 PDT 2022


Author: Jeffrey Byrnes
Date: 2022-08-17T16:21:59-07:00
New Revision: 1c8d7ea973290aac4d14f5464f944c4918191cb5

URL: https://github.com/llvm/llvm-project/commit/1c8d7ea973290aac4d14f5464f944c4918191cb5
DIFF: https://github.com/llvm/llvm-project/commit/1c8d7ea973290aac4d14f5464f944c4918191cb5.diff

LOG: [AMDGPU] Implement pipeline solver for non-trivial pipelines

Requested SchedGroup pipelines may be non-trivial to satisify. A minimimal example is if the requested pipeline is {2 VMEM, 2 VALU, 2 VMEM} and the original order of SUnits is {VMEM, VALU, VMEM, VALU, VMEM}. Because of existing dependencies, the choice of which SchedGroup the middle VMEM goes into impacts how closely we are able to match the requested pipeline. It seems minimizing the degree of misfit (as measured by the number of edges we can't add) w.r.t the choice we make when mapping an instruction -> SchedGroup is an NP problem. This patch implements the PipelineSolver class which produces a solution for the defined problem for the sched_group_barrier mutation. The solver has both an exponential time exact algorithm and a greedy algorithm. The patch includes some controls which allows the user to select the greedy/exact algorithm.

Differential Revision: https://reviews.llvm.org/D130797

Added: 
    llvm/test/CodeGen/AMDGPU/igrouplp-dag-mutation.ll
    llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
    llvm/test/CodeGen/AMDGPU/igrouplp-dag-mutation.mir
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll
    llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
index fdf9f457c3e77..360fc65e63a74 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
@@ -27,7 +27,7 @@
 
 using namespace llvm;
 
-#define DEBUG_TYPE "machine-scheduler"
+#define DEBUG_TYPE "igrouplp"
 
 namespace {
 
@@ -37,29 +37,35 @@ static cl::opt<bool>
                             "their ordering for scheduling"),
                    cl::init(false));
 
-static cl::opt<Optional<unsigned>>
-    VMEMGroupMaxSize("amdgpu-igrouplp-vmem-group-size", cl::init(None),
-                     cl::Hidden,
-                     cl::desc("The maximum number of instructions to include "
-                              "in VMEM group."));
-
-static cl::opt<Optional<unsigned>>
-    MFMAGroupMaxSize("amdgpu-igrouplp-mfma-group-size", cl::init(None),
-                     cl::Hidden,
-                     cl::desc("The maximum number of instructions to include "
-                              "in MFMA group."));
-
-static cl::opt<Optional<unsigned>>
-    LDRGroupMaxSize("amdgpu-igrouplp-ldr-group-size", cl::init(None),
-                    cl::Hidden,
-                    cl::desc("The maximum number of instructions to include "
-                             "in lds/gds read group."));
-
-static cl::opt<Optional<unsigned>>
-    LDWGroupMaxSize("amdgpu-igrouplp-ldw-group-size", cl::init(None),
-                    cl::Hidden,
-                    cl::desc("The maximum number of instructions to include "
-                             "in lds/gds write group."));
+static cl::opt<bool> EnableExactSolver(
+    "amdgpu-igrouplp-exact-solver", cl::Hidden,
+    cl::desc("Whether to use the exponential time solver to fit "
+             "the instructions to the pipeline as closely as "
+             "possible."),
+    cl::init(false));
+
+static cl::opt<unsigned> CutoffForExact(
+    "amdgpu-igrouplp-exact-solver-cutoff", cl::init(0), cl::Hidden,
+    cl::desc("The maximum number of scheduling group conflicts "
+             "which we attempt to solve with the exponential time "
+             "exact solver. Problem sizes greater than this will"
+             "be solved by the less accurate greedy algorithm. Selecting "
+             "solver by size is superseded by manually selecting "
+             "the solver (e.g. by amdgpu-igrouplp-exact-solver"));
+
+static cl::opt<uint64_t> MaxBranchesExplored(
+    "amdgpu-igrouplp-exact-solver-max-branches", cl::init(0), cl::Hidden,
+    cl::desc("The amount of branches that we are willing to explore with"
+             "the exact algorithm before giving up."));
+
+static cl::opt<bool> UseCostHeur(
+    "amdgpu-igrouplp-exact-solver-cost-heur", cl::init(true), cl::Hidden,
+    cl::desc("Whether to use the cost heuristic to make choices as we "
+             "traverse the search space using the exact solver. Defaulted "
+             "to on, and if turned off, we will use the node order -- "
+             "attempting to put the later nodes in the later sched groups. "
+             "Experimentally, results are mixed, so this should be set on a "
+             "case-by-case basis."));
 
 // Components of the mask that determines which instruction types may be may be
 // classified into a SchedGroup.
@@ -80,6 +86,8 @@ enum class SchedGroupMask {
   LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ ALL)
 };
 
+typedef DenseMap<SUnit *, SmallVector<int, 4>> SUnitsToCandidateSGsMap;
+
 // Classify instructions into groups to enable fine tuned control over the
 // scheduler. These groups may be more specific than current SchedModel
 // instruction classes.
@@ -97,8 +105,8 @@ class SchedGroup {
   // SyncID.
   int SyncID = 0;
 
-  // Collection of SUnits that are classified as members of this group.
-  SmallVector<SUnit *, 32> Collection;
+  // SGID is used to map instructions to candidate SchedGroups
+  int SGID;
 
   ScheduleDAGInstrs *DAG;
 
@@ -111,58 +119,625 @@ class SchedGroup {
   // SchedGroup object.
   bool canAddMI(const MachineInstr &MI) const;
 
+public:
+  // Collection of SUnits that are classified as members of this group.
+  SmallVector<SUnit *, 32> Collection;
+
   // Returns true if SU can be added to this SchedGroup.
   bool canAddSU(SUnit &SU) const;
 
-  // Returns true if no more instructions may be added to this group.
-  bool isFull() const;
-
-  // Add SU to the SchedGroup.
-  void add(SUnit &SU) {
-    LLVM_DEBUG(dbgs() << "For SchedGroup with mask "
-                      << format_hex((int)SGMask, 10, true) << " adding "
-                      << *SU.getInstr());
-    Collection.push_back(&SU);
-  }
-
-public:
   // Add DAG dependencies from all SUnits in this SchedGroup and this SU. If
   // MakePred is true, SU will be a predecessor of the SUnits in this
   // SchedGroup, otherwise SU will be a successor.
   void link(SUnit &SU, bool MakePred = false);
 
-  // Add DAG dependencies from all SUnits in this SchedGroup and this SU. Use
-  // the predicate to determine whether SU should be a predecessor (P = true)
-  // or a successor (P = false) of this SchedGroup.
+  // Add DAG dependencies and track which edges are added, and the count of
+  // missed edges
+  int link(SUnit &SU, bool MakePred,
+           std::vector<std::pair<SUnit *, SUnit *>> &AddedEdges);
+
+  // Add DAG dependencies from all SUnits in this SchedGroup and this SU.
+  // Use the predicate to determine whether SU should be a predecessor (P =
+  // true) or a successor (P = false) of this SchedGroup.
   void link(SUnit &SU, function_ref<bool(const SUnit *A, const SUnit *B)> P);
 
   // Add DAG dependencies such that SUnits in this group shall be ordered
   // before SUnits in OtherGroup.
   void link(SchedGroup &OtherGroup);
 
+  // Returns true if no more instructions may be added to this group.
+  bool isFull() const { return MaxSize && Collection.size() >= *MaxSize; }
+
+  // Add SU to the SchedGroup.
+  void add(SUnit &SU) {
+    LLVM_DEBUG(dbgs() << "For SchedGroup with mask "
+                      << format_hex((int)SGMask, 10, true) << " adding "
+                      << *SU.getInstr());
+    Collection.push_back(&SU);
+  }
+
+  // Remove last element in the SchedGroup
+  void pop() { Collection.pop_back(); }
+
   // Identify and add all relevant SUs from the DAG to this SchedGroup.
   void initSchedGroup();
 
   // Add instructions to the SchedGroup bottom up starting from RIter.
-  // ConflictedInstrs is a set of instructions that should not be added to the
+  // PipelineInstrs is a set of instructions that should not be added to the
   // SchedGroup even when the other conditions for adding it are satisfied.
   // RIter will be added to the SchedGroup as well, and dependencies will be
   // added so that RIter will always be scheduled at the end of the group.
   void initSchedGroup(std::vector<SUnit>::reverse_iterator RIter,
-                      DenseSet<SUnit *> &ConflictedInstrs);
+                      SUnitsToCandidateSGsMap &SyncedInstrs);
+
+  void initSchedGroup(SUnitsToCandidateSGsMap &SyncedInstrs);
 
   int getSyncID() { return SyncID; }
 
+  int getSGID() { return SGID; }
+
+  SchedGroupMask getMask() { return SGMask; }
+
   SchedGroup(SchedGroupMask SGMask, Optional<unsigned> MaxSize,
              ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
       : SGMask(SGMask), MaxSize(MaxSize), DAG(DAG), TII(TII) {}
 
   SchedGroup(SchedGroupMask SGMask, Optional<unsigned> MaxSize, int SyncID,
-             ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
-      : SGMask(SGMask), MaxSize(MaxSize), SyncID(SyncID), DAG(DAG), TII(TII) {}
+             int SGID, ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
+      : SGMask(SGMask), MaxSize(MaxSize), SyncID(SyncID), SGID(SGID), DAG(DAG),
+        TII(TII) {}
 };
 
+// Remove all existing edges from a SCHED_BARRIER or SCHED_GROUP_BARRIER.
+static void resetEdges(SUnit &SU, ScheduleDAGInstrs *DAG) {
+  assert(SU.getInstr()->getOpcode() == AMDGPU::SCHED_BARRIER ||
+         SU.getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER);
+
+  while (!SU.Preds.empty())
+    for (auto &P : SU.Preds)
+      SU.removePred(P);
+
+  while (!SU.Succs.empty())
+    for (auto &S : SU.Succs)
+      for (auto &SP : S.getSUnit()->Preds)
+        if (SP.getSUnit() == &SU)
+          S.getSUnit()->removePred(SP);
+}
+
+typedef std::pair<SUnit *, SmallVector<int, 4>> SUToCandSGsPair;
+typedef SmallVector<SUToCandSGsPair, 4> SUsToCandSGsVec;
+
+// The PipelineSolver is used to assign SUnits to SchedGroups in a pipeline
+// in non-trivial cases. For example, if the requested pipeline is
+// {VMEM_READ, VALU, MFMA, VMEM_READ} and we encounter a VMEM_READ instruction
+// in the DAG, then we will have an instruction that can not be trivially
+// assigned to a SchedGroup. The PipelineSolver class implements two algorithms
+// to find a good solution to the pipeline -- a greedy algorithm and an exact
+// algorithm. The exact algorithm has an exponential time complexity and should
+// only be used for small sized problems or medium sized problems where an exact
+// solution is highly desired.
+class PipelineSolver {
+  ScheduleDAGMI *DAG;
+
+  // Instructions that can be assigned to multiple SchedGroups
+  DenseMap<int, SUnitsToCandidateSGsMap> SyncedInstrs;
+  SmallVector<SUsToCandSGsVec, 4> PipelineInstrs;
+  DenseMap<int, SmallVector<SchedGroup, 4>> SyncedSchedGroups;
+  // The current working pipeline
+  SmallVector<SmallVector<SchedGroup, 4>, 4> CurrPipeline;
+  // The pipeline that has the best solution found so far
+  SmallVector<SmallVector<SchedGroup, 4>, 4> BestPipeline;
+
+  // Whether or not we actually have any SyncedInstrs to try to solve.
+  bool NeedsSolver = false;
+
+  // Compute an estimate of the size of search tree -- the true size is
+  // the product of each conflictedInst.Matches.size() across all SyncPipelines
+  unsigned computeProblemSize();
+
+  // The cost penalty of not assigning a SU to a SchedGroup
+  int MissPenalty = 0;
+
+  // Costs in terms of the number of edges we are unable to add
+  int BestCost = -1;
+  int CurrCost = 0;
+
+  // Index pointing to the conflicting instruction that is currently being
+  // fitted
+  int CurrConflInstNo = 0;
+  // Index to the pipeline that is currently being fitted
+  int CurrSyncGroupIdx = 0;
+  // The first non trivial pipeline
+  int BeginSyncGroupIdx = 0;
+
+  // How many branches we have explored
+  uint64_t BranchesExplored = 0;
+
+  // Update indices to fit next conflicting instruction
+  void advancePosition();
+  // Recede indices to attempt to find better fit for previous conflicting
+  // instruction
+  void retreatPosition();
+
+  // The exponential time algorithm which finds the provably best fit
+  bool solveExact();
+  // The polynomial time algorithm which attempts to find a good fit
+  bool solveGreedy();
+  // Whether or not the current solution is optimal
+  bool checkOptimal();
+  // Populate the ready list, prioiritizing fewest missed edges first
+  void populateReadyList(SUToCandSGsPair &CurrSU,
+                         SmallVectorImpl<std::pair<int, int>> &ReadyList,
+                         SmallVectorImpl<SchedGroup> &SyncPipeline);
+  // Add edges corresponding to the SchedGroups as assigned by solver
+  void makePipeline();
+  // Add the edges from the SU to the other SchedGroups in pipeline, and
+  // return the number of edges missed.
+  int addEdges(SmallVectorImpl<SchedGroup> &SyncPipeline, SUnit *SU, int SGID,
+               std::vector<std::pair<SUnit *, SUnit *>> &AddedEdges);
+  // Remove the edges passed via AddedEdges
+  void removeEdges(const std::vector<std::pair<SUnit *, SUnit *>> &AddedEdges);
+  // Convert the passed in maps to arrays for bidirectional iterators
+  void convertSyncMapsToArrays();
+
+  void reset();
+
+public:
+  // Invoke the solver to map instructions to instruction groups. Heuristic &&
+  // command-line-option determines to use exact or greedy algorithm.
+  void solve();
+
+  PipelineSolver(DenseMap<int, SmallVector<SchedGroup, 4>> &SyncedSchedGroups,
+                 DenseMap<int, SUnitsToCandidateSGsMap> &SyncedInstrs,
+                 ScheduleDAGMI *DAG)
+      : DAG(DAG), SyncedInstrs(SyncedInstrs),
+        SyncedSchedGroups(SyncedSchedGroups) {
+
+    for (auto &PipelineInstrs : SyncedInstrs) {
+      if (PipelineInstrs.second.size() > 0) {
+        NeedsSolver = true;
+        break;
+      }
+    }
+
+    if (!NeedsSolver)
+      return;
+
+    convertSyncMapsToArrays();
+
+    CurrPipeline = BestPipeline;
+
+    while (static_cast<size_t>(BeginSyncGroupIdx) < PipelineInstrs.size() &&
+           PipelineInstrs[BeginSyncGroupIdx].size() == 0)
+      ++BeginSyncGroupIdx;
+
+    if (static_cast<size_t>(BeginSyncGroupIdx) >= PipelineInstrs.size())
+      return;
+  }
+};
+
+void PipelineSolver::reset() {
+
+  for (auto &SyncPipeline : CurrPipeline) {
+    for (auto &SG : SyncPipeline) {
+      SmallVector<SUnit *, 32> TempCollection = SG.Collection;
+      SG.Collection.clear();
+      auto SchedBarr = std::find_if(
+          TempCollection.begin(), TempCollection.end(), [](SUnit *SU) {
+            return SU->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER;
+          });
+      if (SchedBarr != TempCollection.end())
+        SG.Collection.push_back(*SchedBarr);
+    }
+  }
+
+  CurrSyncGroupIdx = BeginSyncGroupIdx;
+  CurrConflInstNo = 0;
+  CurrCost = 0;
+}
+
+void PipelineSolver::convertSyncMapsToArrays() {
+  for (auto &SyncPipe : SyncedSchedGroups) {
+    BestPipeline.insert(BestPipeline.begin(), SyncPipe.second);
+  }
+
+  int PipelineIDx = SyncedInstrs.size() - 1;
+  PipelineInstrs.resize(SyncedInstrs.size());
+  for (auto &SyncInstrMap : SyncedInstrs) {
+    for (auto &SUsToCandSGs : SyncInstrMap.second) {
+      if (PipelineInstrs[PipelineIDx].size() == 0) {
+        PipelineInstrs[PipelineIDx].push_back(
+            std::make_pair(SUsToCandSGs.first, SUsToCandSGs.second));
+        continue;
+      }
+      auto SortPosition = PipelineInstrs[PipelineIDx].begin();
+      // Insert them in sorted order -- this allows for good parsing order in
+      // the greedy algorithm
+      while (SortPosition != PipelineInstrs[PipelineIDx].end() &&
+             SUsToCandSGs.first->NodeNum > SortPosition->first->NodeNum)
+        ++SortPosition;
+      PipelineInstrs[PipelineIDx].insert(
+          SortPosition,
+          std::make_pair(SUsToCandSGs.first, SUsToCandSGs.second));
+    }
+    --PipelineIDx;
+  }
+}
+
+void PipelineSolver::makePipeline() {
+  // Preserve the order of barrier for subsequent SchedGroupBarrier mutations
+  for (auto &SyncPipeline : BestPipeline) {
+    for (auto &SG : SyncPipeline) {
+      SUnit *SGBarr = nullptr;
+      for (auto &SU : SG.Collection) {
+        if (SU->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER)
+          SGBarr = SU;
+      }
+      // Command line requested IGroupLP doesn't have SGBarr
+      if (!SGBarr)
+        continue;
+      resetEdges(*SGBarr, DAG);
+      SG.link(*SGBarr, false);
+    }
+  }
+
+  for (auto &SyncPipeline : BestPipeline) {
+    auto I = SyncPipeline.rbegin();
+    auto E = SyncPipeline.rend();
+    for (; I != E; ++I) {
+      auto &GroupA = *I;
+      for (auto J = std::next(I); J != E; ++J) {
+        auto &GroupB = *J;
+        GroupA.link(GroupB);
+      }
+    }
+  }
+}
+
+int PipelineSolver::addEdges(
+    SmallVectorImpl<SchedGroup> &SyncPipeline, SUnit *SU, int SGID,
+    std::vector<std::pair<SUnit *, SUnit *>> &AddedEdges) {
+  int AddedCost = 0;
+  bool MakePred = false;
+
+  // The groups in the pipeline are in reverse order. Thus,
+  // by traversing them from last to first, we are traversing
+  // them in the order as they were introduced in the code. After we
+  // pass the group the SU is being assigned to, it should be
+  // linked as a predecessor of the subsequent SchedGroups
+  auto GroupNo = (int)SyncPipeline.size() - 1;
+  for (; GroupNo >= 0; GroupNo--) {
+    if (SyncPipeline[GroupNo].getSGID() == SGID) {
+      MakePred = true;
+      continue;
+    }
+    auto Group = &SyncPipeline[GroupNo];
+    AddedCost += Group->link(*SU, MakePred, AddedEdges);
+    assert(AddedCost >= 0);
+  }
+
+  return AddedCost;
+}
+
+void PipelineSolver::removeEdges(
+    const std::vector<std::pair<SUnit *, SUnit *>> &EdgesToRemove) {
+  // Only remove the edges that we have added when testing
+  // the fit.
+  for (auto &PredSuccPair : EdgesToRemove) {
+    SUnit *Pred = PredSuccPair.first;
+    SUnit *Succ = PredSuccPair.second;
+
+    auto Match =
+        std::find_if(Succ->Preds.begin(), Succ->Preds.end(),
+                     [&Pred](SDep &P) { return P.getSUnit() == Pred; });
+    if (Match != Succ->Preds.end()) {
+      assert(Match->isArtificial());
+      Succ->removePred(*Match);
+    }
+  }
+}
+
+void PipelineSolver::advancePosition() {
+  ++CurrConflInstNo;
+
+  if (static_cast<size_t>(CurrConflInstNo) >=
+      PipelineInstrs[CurrSyncGroupIdx].size()) {
+    CurrConflInstNo = 0;
+    ++CurrSyncGroupIdx;
+    // Advance to next non-trivial pipeline
+    while (static_cast<size_t>(CurrSyncGroupIdx) < PipelineInstrs.size() &&
+           PipelineInstrs[CurrSyncGroupIdx].size() == 0)
+      ++CurrSyncGroupIdx;
+  }
+}
+
+void PipelineSolver::retreatPosition() {
+  assert(CurrConflInstNo >= 0);
+  assert(CurrSyncGroupIdx >= 0);
+
+  if (CurrConflInstNo > 0) {
+    --CurrConflInstNo;
+    return;
+  }
+
+  if (CurrConflInstNo == 0) {
+    // If we return to the starting position, we have explored
+    // the entire tree
+    if (CurrSyncGroupIdx == BeginSyncGroupIdx)
+      return;
+
+    --CurrSyncGroupIdx;
+    // Go to previous non-trivial pipeline
+    while (PipelineInstrs[CurrSyncGroupIdx].size() == 0)
+      --CurrSyncGroupIdx;
+
+    CurrConflInstNo = PipelineInstrs[CurrSyncGroupIdx].size() - 1;
+  }
+}
+
+bool PipelineSolver::checkOptimal() {
+  if (static_cast<size_t>(CurrSyncGroupIdx) == PipelineInstrs.size()) {
+    if (BestCost == -1 || CurrCost < BestCost) {
+      BestPipeline = CurrPipeline;
+      BestCost = CurrCost;
+      LLVM_DEBUG(dbgs() << "Found Fit with cost " << BestCost << "\n");
+    }
+    assert(BestCost >= 0);
+  }
+
+  bool DoneExploring = false;
+  if (MaxBranchesExplored > 0 && BranchesExplored >= MaxBranchesExplored)
+    DoneExploring = true;
+
+  return (DoneExploring || BestCost == 0);
+}
+
+void PipelineSolver::populateReadyList(
+    SUToCandSGsPair &CurrSU, SmallVectorImpl<std::pair<int, int>> &ReadyList,
+    SmallVectorImpl<SchedGroup> &SyncPipeline) {
+  assert(CurrSU.second.size() >= 1);
+  auto I = CurrSU.second.rbegin();
+  auto E = CurrSU.second.rend();
+  for (; I != E; ++I) {
+    std::vector<std::pair<SUnit *, SUnit *>> AddedEdges;
+    int CandSGID = *I;
+    SchedGroup *Match;
+    for (auto &SG : SyncPipeline) {
+      if (SG.getSGID() == CandSGID)
+        Match = &SG;
+    }
+
+    if (UseCostHeur) {
+      if (Match->isFull()) {
+        ReadyList.push_back(std::make_pair(*I, MissPenalty));
+        continue;
+      }
+
+      int TempCost = addEdges(SyncPipeline, CurrSU.first, CandSGID, AddedEdges);
+      ReadyList.push_back(std::make_pair(*I, TempCost));
+      removeEdges(AddedEdges);
+    } else
+      ReadyList.push_back(std::make_pair(*I, -1));
+  }
+
+  if (UseCostHeur) {
+    std::sort(ReadyList.begin(), ReadyList.end(),
+              [](std::pair<int, int> A, std::pair<int, int> B) {
+                return A.second < B.second;
+              });
+  }
+
+  assert(ReadyList.size() == CurrSU.second.size());
+}
+
+bool PipelineSolver::solveExact() {
+  if (checkOptimal())
+    return true;
+
+  if (static_cast<size_t>(CurrSyncGroupIdx) == PipelineInstrs.size())
+    return false;
+
+  assert(static_cast<size_t>(CurrSyncGroupIdx) < PipelineInstrs.size());
+  assert(static_cast<size_t>(CurrConflInstNo) <
+         PipelineInstrs[CurrSyncGroupIdx].size());
+  SUToCandSGsPair CurrSU = PipelineInstrs[CurrSyncGroupIdx][CurrConflInstNo];
+  LLVM_DEBUG(dbgs() << "Fitting SU(" << CurrSU.first->NodeNum
+                    << ") in Pipeline # " << CurrSyncGroupIdx << "\n");
+
+  // SchedGroup -> Cost pairs
+  SmallVector<std::pair<int, int>, 4> ReadyList;
+  // Prioritize the candidate sched groups in terms of lowest cost first
+  populateReadyList(CurrSU, ReadyList, CurrPipeline[CurrSyncGroupIdx]);
+
+  auto I = ReadyList.begin();
+  auto E = ReadyList.end();
+  for (; I != E; ++I) {
+    // If we are trying SGs in least cost order, and the current SG is cost
+    // infeasible, then all subsequent SGs will also be cost infeasible, so we
+    // can prune.
+    if (BestCost != -1 && (CurrCost + I->second > BestCost))
+      return false;
+
+    int CandSGID = I->first;
+    int AddedCost = 0;
+    std::vector<std::pair<SUnit *, SUnit *>> AddedEdges;
+    auto &SyncPipeline = CurrPipeline[CurrSyncGroupIdx];
+    SchedGroup *Match;
+    for (auto &SG : SyncPipeline) {
+      if (SG.getSGID() == CandSGID)
+        Match = &SG;
+    }
+
+    if (Match->isFull())
+      continue;
+
+    LLVM_DEBUG(dbgs() << "Assigning to SchedGroup with Mask "
+                      << (int)Match->getMask() << "and ID " << CandSGID
+                      << "\n");
+    Match->add(*CurrSU.first);
+    AddedCost = addEdges(SyncPipeline, CurrSU.first, CandSGID, AddedEdges);
+    LLVM_DEBUG(dbgs() << "Cost of Assignment: " << AddedCost << "\n");
+    CurrCost += AddedCost;
+    advancePosition();
+    ++BranchesExplored;
+    bool FinishedExploring = false;
+    // If the Cost after adding edges is greater than a known solution,
+    // backtrack
+    if (CurrCost < BestCost || BestCost == -1) {
+      if (solveExact()) {
+        FinishedExploring = BestCost != 0;
+        if (!FinishedExploring)
+          return true;
+      }
+    }
+
+    retreatPosition();
+    CurrCost -= AddedCost;
+    removeEdges(AddedEdges);
+    Match->pop();
+    CurrPipeline[CurrSyncGroupIdx] = SyncPipeline;
+    if (FinishedExploring)
+      return true;
+  }
+
+  // Try the pipeline where the current instruction is omitted
+  // Potentially if we omit a problematic instruction from the pipeline,
+  // all the other instructions can nicely fit.
+  CurrCost += MissPenalty;
+  advancePosition();
+
+  LLVM_DEBUG(dbgs() << "NOT Assigned (" << CurrSU.first->NodeNum << ")\n");
+
+  bool FinishedExploring = false;
+  if (CurrCost < BestCost || BestCost == -1) {
+    if (solveExact()) {
+      bool FinishedExploring = BestCost != 0;
+      if (!FinishedExploring)
+        return true;
+    }
+  }
+
+  retreatPosition();
+  CurrCost -= MissPenalty;
+  return FinishedExploring;
+}
+
+bool PipelineSolver::solveGreedy() {
+  BestCost = 0;
+  std::vector<std::pair<SUnit *, SUnit *>> AddedEdges;
+
+  while (static_cast<size_t>(CurrSyncGroupIdx) < PipelineInstrs.size()) {
+    SUToCandSGsPair CurrSU = PipelineInstrs[CurrSyncGroupIdx][CurrConflInstNo];
+    int BestNodeCost = -1;
+    int TempCost;
+    SchedGroup *BestGroup = nullptr;
+    int BestGroupID = -1;
+    auto &SyncPipeline = CurrPipeline[CurrSyncGroupIdx];
+    LLVM_DEBUG(dbgs() << "Fitting SU(" << CurrSU.first->NodeNum
+                      << ") in Pipeline # " << CurrSyncGroupIdx << "\n");
+
+    // Since we have added the potential SchedGroups from bottom up, but
+    // traversed the DAG from top down, parse over the groups from last to
+    // first. If we fail to do this for the greedy algorithm, the solution will
+    // likely not be good in more complex cases.
+    auto I = CurrSU.second.rbegin();
+    auto E = CurrSU.second.rend();
+    for (; I != E; ++I) {
+      std::vector<std::pair<SUnit *, SUnit *>> AddedEdges;
+      int CandSGID = *I;
+      SchedGroup *Match;
+      for (auto &SG : SyncPipeline) {
+        if (SG.getSGID() == CandSGID)
+          Match = &SG;
+      }
+
+      LLVM_DEBUG(dbgs() << "Trying SGID # " << CandSGID << " with Mask "
+                        << (int)Match->getMask() << "\n");
+
+      if (Match->isFull()) {
+        LLVM_DEBUG(dbgs() << "SGID # " << CandSGID << " is full\n");
+        continue;
+      }
+      TempCost = addEdges(SyncPipeline, CurrSU.first, CandSGID, AddedEdges);
+      LLVM_DEBUG(dbgs() << "Cost of Group " << TempCost << "\n");
+      if (TempCost < BestNodeCost || BestNodeCost == -1) {
+        BestGroup = Match;
+        BestNodeCost = TempCost;
+        BestGroupID = CandSGID;
+      }
+      removeEdges(AddedEdges);
+      if (BestNodeCost == 0)
+        break;
+    }
+
+    if (BestGroupID != -1) {
+      BestGroup->add(*CurrSU.first);
+      addEdges(SyncPipeline, CurrSU.first, BestGroupID, AddedEdges);
+      LLVM_DEBUG(dbgs() << "Best Group has ID: " << BestGroupID << " and Mask"
+                        << (int)BestGroup->getMask() << "\n");
+      BestCost += TempCost;
+    } else
+      BestCost += MissPenalty;
+
+    CurrPipeline[CurrSyncGroupIdx] = SyncPipeline;
+    advancePosition();
+  }
+  BestPipeline = CurrPipeline;
+  removeEdges(AddedEdges);
+  return false;
+}
+
+unsigned PipelineSolver::computeProblemSize() {
+  unsigned ProblemSize = 0;
+  for (auto &PipeConflicts : PipelineInstrs) {
+    ProblemSize += PipeConflicts.size();
+  }
+
+  return ProblemSize;
+}
+
+void PipelineSolver::solve() {
+  if (!NeedsSolver)
+    return;
+
+  unsigned ProblemSize = computeProblemSize();
+  assert(ProblemSize > 0);
+
+  bool BelowCutoff = (CutoffForExact > 0) && ProblemSize <= CutoffForExact;
+  MissPenalty = (ProblemSize / 2) + 1;
+
+  LLVM_DEBUG(DAG->dump());
+  if (EnableExactSolver || BelowCutoff) {
+    LLVM_DEBUG(dbgs() << "Starting Greedy pipeline solver\n");
+    solveGreedy();
+    reset();
+    LLVM_DEBUG(dbgs() << "Greedy produced best cost of " << BestCost << "\n");
+    if (BestCost > 0) {
+      LLVM_DEBUG(dbgs() << "Starting EXACT pipeline solver\n");
+      solveExact();
+      LLVM_DEBUG(dbgs() << "Exact produced best cost of " << BestCost << "\n");
+    }
+  } else { // Use the Greedy Algorithm by default
+    LLVM_DEBUG(dbgs() << "Starting GREEDY pipeline solver\n");
+    solveGreedy();
+  }
+
+  makePipeline();
+}
+
 class IGroupLPDAGMutation : public ScheduleDAGMutation {
+private:
+  // Organize lists of SchedGroups by their SyncID. SchedGroups /
+  // SCHED_GROUP_BARRIERs with 
diff erent SyncIDs will have no edges added
+  // between then.
+  DenseMap<int, SmallVector<SchedGroup, 4>> SyncedSchedGroups;
+
+  // The number of created sched groups -- also used as SGID
+  int NumCreatedSchedGroups = 0;
+
+  // Used to track instructions that can be mapped to multiple sched groups
+  DenseMap<int, SUnitsToCandidateSGsMap> SyncedInstrs;
+
 public:
   const SIInstrInfo *TII;
   ScheduleDAGMI *DAG;
@@ -183,11 +758,13 @@ class SchedBarrierDAGMutation : public ScheduleDAGMutation {
   // Organize lists of SchedGroups by their SyncID. SchedGroups /
   // SCHED_GROUP_BARRIERs with 
diff erent SyncIDs will have no edges added
   // between then.
-  DenseMap<int, SmallVector<SchedGroup, 4>> SyncedSchedGroupsMap;
+  DenseMap<int, SmallVector<SchedGroup, 4>> SyncedSchedGroups;
 
-  // Used to track instructions that are already to added to a 
diff erent
-  // SchedGroup with the same SyncID.
-  DenseMap<int, DenseSet<SUnit *>> SyncedInstrsMap;
+  // The number of create sched groups -- also used as SGID
+  int NumCreatedSchedGroups = 0;
+
+  // Used to track instructions that can be mapped to multiple sched groups
+  DenseMap<int, SUnitsToCandidateSGsMap> SyncedInstrs;
 
   // Add DAG edges that enforce SCHED_BARRIER ordering.
   void addSchedBarrierEdges(SUnit &SU);
@@ -204,11 +781,8 @@ class SchedBarrierDAGMutation : public ScheduleDAGMutation {
   SchedGroupMask invertSchedBarrierMask(SchedGroupMask Mask) const;
 
   // Create SchedGroups for a SCHED_GROUP_BARRIER.
-  void initSchedGroupBarrier(std::vector<SUnit>::reverse_iterator RIter);
-
-  // Add DAG edges that try to enforce ordering defined by SCHED_GROUP_BARRIER
-  // instructions.
-  void addSchedGroupBarrierEdges();
+  void initSchedGroupBarrierPipelineStage(
+      std::vector<SUnit>::reverse_iterator RIter);
 
 public:
   void apply(ScheduleDAGInstrs *DAGInstrs) override;
@@ -219,9 +793,6 @@ class SchedBarrierDAGMutation : public ScheduleDAGMutation {
 bool SchedGroup::tryAddEdge(SUnit *A, SUnit *B) {
   if (A != B && DAG->canAddEdge(B, A)) {
     DAG->addEdge(B, SDep(A, SDep::Artificial));
-    LLVM_DEBUG(dbgs() << "Adding edge...\n"
-                      << "from: SU(" << A->NodeNum << ") " << *A->getInstr()
-                      << "to: SU(" << B->NodeNum << ") " << *B->getInstr());
     return true;
   }
   return false;
@@ -281,9 +852,35 @@ bool SchedGroup::canAddMI(const MachineInstr &MI) const {
   return Result;
 }
 
+int SchedGroup::link(SUnit &SU, bool MakePred,
+                     std::vector<std::pair<SUnit *, SUnit *>> &AddedEdges) {
+  int MissedEdges = 0;
+  for (auto A : Collection) {
+    SUnit *B = &SU;
+    if (A == B || A->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER)
+      continue;
+    if (MakePred)
+      std::swap(A, B);
+
+    if (DAG->IsReachable(B, A))
+      continue;
+    // tryAddEdge returns false if there is a dependency that makes adding
+    // the A->B edge impossible, otherwise it returns true;
+    bool Added = tryAddEdge(A, B);
+    if (Added)
+      AddedEdges.push_back(std::make_pair(A, B));
+    else
+      ++MissedEdges;
+  }
+
+  return MissedEdges;
+}
+
 void SchedGroup::link(SUnit &SU, bool MakePred) {
   for (auto A : Collection) {
     SUnit *B = &SU;
+    if (A->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER)
+      continue;
     if (MakePred)
       std::swap(A, B);
 
@@ -307,10 +904,6 @@ void SchedGroup::link(SchedGroup &OtherGroup) {
     link(*B);
 }
 
-bool SchedGroup::isFull() const {
-  return MaxSize && Collection.size() >= *MaxSize;
-}
-
 bool SchedGroup::canAddSU(SUnit &SU) const {
   MachineInstr &MI = *SU.getInstr();
   if (MI.getOpcode() != TargetOpcode::BUNDLE)
@@ -336,27 +929,16 @@ void SchedGroup::initSchedGroup() {
   }
 }
 
-static bool canFitIntoPipeline(SUnit &SU, ScheduleDAGInstrs *DAG,
-                               DenseSet<SUnit *> &ConflictedInstrs) {
-  return llvm::all_of(ConflictedInstrs, [DAG, &SU](SUnit *SuccSU) {
-    return DAG->canAddEdge(SuccSU, &SU);
-  });
-}
-
 void SchedGroup::initSchedGroup(std::vector<SUnit>::reverse_iterator RIter,
-                                DenseSet<SUnit *> &ConflictedInstrs) {
+                                SUnitsToCandidateSGsMap &SyncedInstrs) {
   SUnit &InitSU = *RIter;
   for (auto E = DAG->SUnits.rend(); RIter != E; ++RIter) {
     auto &SU = *RIter;
     if (isFull())
       break;
 
-    if (canAddSU(SU) && !ConflictedInstrs.count(&SU) &&
-        canFitIntoPipeline(SU, DAG, ConflictedInstrs)) {
-      add(SU);
-      ConflictedInstrs.insert(&SU);
-      tryAddEdge(&SU, &InitSU);
-    }
+    if (canAddSU(SU))
+      SyncedInstrs[&SU].push_back(SGID);
   }
 
   add(InitSU);
@@ -364,31 +946,16 @@ void SchedGroup::initSchedGroup(std::vector<SUnit>::reverse_iterator RIter,
   (*MaxSize)++;
 }
 
-// Create a pipeline from the SchedGroups in PipelineOrderGroups such that we
-// try to enforce the relative ordering of instructions in each group.
-static void makePipeline(SmallVectorImpl<SchedGroup> &PipelineOrderGroups) {
-  auto I = PipelineOrderGroups.begin();
-  auto E = PipelineOrderGroups.end();
+void SchedGroup::initSchedGroup(SUnitsToCandidateSGsMap &SyncedInstrs) {
+  auto I = DAG->SUnits.rbegin();
+  auto E = DAG->SUnits.rend();
   for (; I != E; ++I) {
-    auto &GroupA = *I;
-    for (auto J = std::next(I); J != E; ++J) {
-      auto &GroupB = *J;
-      GroupA.link(GroupB);
-    }
-  }
-}
+    auto &SU = *I;
+    if (isFull())
+      break;
 
-// Same as makePipeline but with reverse ordering.
-static void
-makeReversePipeline(SmallVectorImpl<SchedGroup> &PipelineOrderGroups) {
-  auto I = PipelineOrderGroups.rbegin();
-  auto E = PipelineOrderGroups.rend();
-  for (; I != E; ++I) {
-    auto &GroupA = *I;
-    for (auto J = std::next(I); J != E; ++J) {
-      auto &GroupB = *J;
-      GroupA.link(GroupB);
-    }
+    if (canAddSU(SU))
+      SyncedInstrs[&SU].push_back(SGID);
   }
 }
 
@@ -396,6 +963,17 @@ void IGroupLPDAGMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
   const GCNSubtarget &ST = DAGInstrs->MF.getSubtarget<GCNSubtarget>();
   TII = ST.getInstrInfo();
   DAG = static_cast<ScheduleDAGMI *>(DAGInstrs);
+
+  // IGroupLP and sched_group_barrier are mutually exclusive mutations.
+  // Check for sched_group_barriers as that mutation gets priority.
+  for (auto R = DAG->SUnits.rbegin(), E = DAG->SUnits.rend(); R != E; ++R) {
+    if (R->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER) {
+      return;
+    }
+  }
+
+  SyncedSchedGroups.clear();
+  SyncedInstrs.clear();
   const TargetSchedModel *TSchedModel = DAGInstrs->getSchedModel();
   if (!TSchedModel || DAG->SUnits.empty())
     return;
@@ -406,32 +984,36 @@ void IGroupLPDAGMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
   // order in which edges will be added. In other words, given the
   // present ordering, we will try to make each VMEMRead instruction
   // a predecessor of each DSRead instruction, and so on.
-  SmallVector<SchedGroup, 4> PipelineOrderGroups = {
-      SchedGroup(SchedGroupMask::VMEM, VMEMGroupMaxSize, DAG, TII),
-      SchedGroup(SchedGroupMask::DS_READ, LDRGroupMaxSize, DAG, TII),
-      SchedGroup(SchedGroupMask::MFMA, MFMAGroupMaxSize, DAG, TII),
-      SchedGroup(SchedGroupMask::DS_WRITE, LDWGroupMaxSize, DAG, TII)};
 
-  for (auto &SG : PipelineOrderGroups)
-    SG.initSchedGroup();
+  struct SGParams {
+    SchedGroupMask Mask;
+    Optional<unsigned> Size;
+    int SyncID;
 
-  makePipeline(PipelineOrderGroups);
-}
+    SGParams(SchedGroupMask Mask, Optional<unsigned> Size, int SyncID)
+        : Mask(Mask), Size(Size), SyncID(SyncID) {}
+  };
 
-// Remove all existing edges from a SCHED_BARRIER or SCHED_GROUP_BARRIER.
-static void resetEdges(SUnit &SU, ScheduleDAGInstrs *DAG) {
-  assert(SU.getInstr()->getOpcode() == AMDGPU::SCHED_BARRIER ||
-         SU.getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER);
+  SmallVector<SGParams, 16> PipelineOrderGroups;
 
-  while (!SU.Preds.empty())
-    for (auto &P : SU.Preds)
-      SU.removePred(P);
+  for (size_t i = 0; i < DAG->SUnits.size() / 4; i++) {
+    PipelineOrderGroups.push_back({SchedGroupMask::DS_READ, 8, 0});
+    PipelineOrderGroups.push_back({SchedGroupMask::MFMA, 1, 0});
+    PipelineOrderGroups.push_back({SchedGroupMask::DS_WRITE, 8, 0});
+  }
 
-  while (!SU.Succs.empty())
-    for (auto &S : SU.Succs)
-      for (auto &SP : S.getSUnit()->Preds)
-        if (SP.getSUnit() == &SU)
-          S.getSUnit()->removePred(SP);
+  auto I = PipelineOrderGroups.rbegin();
+  auto E = PipelineOrderGroups.rend();
+  for (; I < E; I++) {
+    auto &SG = SyncedSchedGroups[I->SyncID].emplace_back(
+        I->Mask, I->Size, I->SyncID, NumCreatedSchedGroups++, DAG, TII);
+    SG.initSchedGroup(SyncedInstrs[SG.getSyncID()]);
+  }
+
+  PipelineSolver PS(SyncedSchedGroups, SyncedInstrs, DAG);
+  // PipelineSolver performs the mutation by adding the edges it
+  // determined as the best
+  PS.solve();
 }
 
 void SchedBarrierDAGMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
@@ -443,19 +1025,20 @@ void SchedBarrierDAGMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
   const GCNSubtarget &ST = DAGInstrs->MF.getSubtarget<GCNSubtarget>();
   TII = ST.getInstrInfo();
   DAG = static_cast<ScheduleDAGMI *>(DAGInstrs);
-  SyncedInstrsMap.clear();
-  SyncedSchedGroupsMap.clear();
+  SyncedSchedGroups.clear();
+  SyncedInstrs.clear();
   for (auto R = DAG->SUnits.rbegin(), E = DAG->SUnits.rend(); R != E; ++R) {
     if (R->getInstr()->getOpcode() == AMDGPU::SCHED_BARRIER)
       addSchedBarrierEdges(*R);
 
     else if (R->getInstr()->getOpcode() == AMDGPU::SCHED_GROUP_BARRIER)
-      initSchedGroupBarrier(R);
+      initSchedGroupBarrierPipelineStage(R);
   }
 
-  // SCHED_GROUP_BARRIER edges can only be added after we have found and
-  // initialized all of the SCHED_GROUP_BARRIER SchedGroups.
-  addSchedGroupBarrierEdges();
+  PipelineSolver PS(SyncedSchedGroups, SyncedInstrs, DAG);
+  // PipelineSolver performs the mutation by adding the edges it
+  // determined as the best
+  PS.solve();
 }
 
 void SchedBarrierDAGMutation::addSchedBarrierEdges(SUnit &SchedBarrier) {
@@ -510,7 +1093,7 @@ SchedBarrierDAGMutation::invertSchedBarrierMask(SchedGroupMask Mask) const {
   return InvertedMask;
 }
 
-void SchedBarrierDAGMutation::initSchedGroupBarrier(
+void SchedBarrierDAGMutation::initSchedGroupBarrierPipelineStage(
     std::vector<SUnit>::reverse_iterator RIter) {
   // Remove all existing edges from the SCHED_GROUP_BARRIER that were added due
   // to the instruction having side effects.
@@ -520,23 +1103,11 @@ void SchedBarrierDAGMutation::initSchedGroupBarrier(
   int32_t SGMask = SGB.getOperand(0).getImm();
   int32_t Size = SGB.getOperand(1).getImm();
   int32_t SyncID = SGB.getOperand(2).getImm();
-  // Create a new SchedGroup and add it to a list that is mapped to the SyncID.
-  // SchedGroups only enforce ordering between SchedGroups with the same SyncID.
-  auto &SG = SyncedSchedGroupsMap[SyncID].emplace_back((SchedGroupMask)SGMask,
-                                                       Size, SyncID, DAG, TII);
-
-  // SyncedInstrsMap is used here is used to avoid adding the same SUs in
-  // multiple SchedGroups that have the same SyncID. This only matters for
-  // SCHED_GROUP_BARRIER and not SCHED_BARRIER.
-  SG.initSchedGroup(RIter, SyncedInstrsMap[SG.getSyncID()]);
-}
 
-void SchedBarrierDAGMutation::addSchedGroupBarrierEdges() {
-  // Since we traversed the DAG in reverse order when initializing
-  // SCHED_GROUP_BARRIERs we need to reverse the order in the vector to maintain
-  // user intentions and program order.
-  for (auto &SchedGroups : SyncedSchedGroupsMap)
-    makeReversePipeline(SchedGroups.second);
+  auto &SG = SyncedSchedGroups[SyncID].emplace_back(
+      (SchedGroupMask)SGMask, Size, SyncID, NumCreatedSchedGroups++, DAG, TII);
+
+  SG.initSchedGroup(RIter, SyncedInstrs[SG.getSyncID()]);
 }
 
 } // namespace

diff  --git a/llvm/test/CodeGen/AMDGPU/igrouplp-dag-mutation.ll b/llvm/test/CodeGen/AMDGPU/igrouplp-dag-mutation.ll
new file mode 100644
index 0000000000000..743f03179f3f0
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/igrouplp-dag-mutation.ll
@@ -0,0 +1,277 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=gfx90a -amdgpu-igrouplp=1 < %s | FileCheck -check-prefix=GREEDY %s
+; RUN: llc -march=amdgcn -mcpu=gfx90a -amdgpu-igrouplp-exact-solver -amdgpu-igrouplp=1 < %s | FileCheck -check-prefix=EXACT %s
+
+define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(<32 x float> addrspace(3)* noalias %in, <32 x float> addrspace(3)* noalias %out) #0 {
+; GREEDY-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave:
+; GREEDY:       ; %bb.0: ; %entry
+; GREEDY-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GREEDY-NEXT:    v_lshlrev_b32_e32 v33, 7, v0
+; GREEDY-NEXT:    v_mov_b32_e32 v34, 1.0
+; GREEDY-NEXT:    v_mov_b32_e32 v35, 2.0
+; GREEDY-NEXT:    s_waitcnt lgkmcnt(0)
+; GREEDY-NEXT:    v_add_u32_e32 v32, s0, v33
+; GREEDY-NEXT:    ds_read_b128 v[28:31], v32 offset:112
+; GREEDY-NEXT:    ds_read_b128 v[24:27], v32 offset:96
+; GREEDY-NEXT:    ds_read_b128 v[20:23], v32 offset:80
+; GREEDY-NEXT:    ds_read_b128 v[16:19], v32 offset:64
+; GREEDY-NEXT:    ds_read_b128 v[0:3], v32
+; GREEDY-NEXT:    ds_read_b128 v[4:7], v32 offset:16
+; GREEDY-NEXT:    ds_read_b128 v[8:11], v32 offset:32
+; GREEDY-NEXT:    ds_read_b128 v[12:15], v32 offset:48
+; GREEDY-NEXT:    v_add_u32_e32 v33, s1, v33
+; GREEDY-NEXT:    s_waitcnt lgkmcnt(0)
+; GREEDY-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v34, v35, v[0:31]
+; GREEDY-NEXT:    s_nop 7
+; GREEDY-NEXT:    s_nop 7
+; GREEDY-NEXT:    s_nop 2
+; GREEDY-NEXT:    ds_write_b128 v33, v[28:31] offset:112
+; GREEDY-NEXT:    ds_write_b128 v33, v[24:27] offset:96
+; GREEDY-NEXT:    ds_write_b128 v33, v[20:23] offset:80
+; GREEDY-NEXT:    ds_write_b128 v33, v[16:19] offset:64
+; GREEDY-NEXT:    ds_write_b128 v33, v[12:15] offset:48
+; GREEDY-NEXT:    ds_write_b128 v33, v[8:11] offset:32
+; GREEDY-NEXT:    ds_write_b128 v33, v[4:7] offset:16
+; GREEDY-NEXT:    ds_write_b128 v33, v[0:3]
+; GREEDY-NEXT:    ds_read_b128 v[64:67], v32 offset:8304
+; GREEDY-NEXT:    ds_read_b128 v[60:63], v32 offset:8288
+; GREEDY-NEXT:    ds_read_b128 v[56:59], v32 offset:8272
+; GREEDY-NEXT:    ds_read_b128 v[52:55], v32 offset:8256
+; GREEDY-NEXT:    ds_read_b128 v[48:51], v32 offset:8240
+; GREEDY-NEXT:    ds_read_b128 v[44:47], v32 offset:8224
+; GREEDY-NEXT:    ds_read_b128 v[40:43], v32 offset:8208
+; GREEDY-NEXT:    ds_read_b128 v[36:39], v32 offset:8192
+; GREEDY-NEXT:    v_mov_b32_e32 v0, s1
+; GREEDY-NEXT:    v_add_u32_e32 v1, 0x6000, v32
+; GREEDY-NEXT:    s_waitcnt lgkmcnt(0)
+; GREEDY-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v34, v35, v[36:67]
+; GREEDY-NEXT:    s_nop 7
+; GREEDY-NEXT:    s_nop 7
+; GREEDY-NEXT:    s_nop 2
+; GREEDY-NEXT:    ds_write_b128 v0, v[60:63] offset:8288
+; GREEDY-NEXT:    ds_write_b128 v0, v[64:67] offset:8304
+; GREEDY-NEXT:    ds_write_b128 v0, v[52:55] offset:8256
+; GREEDY-NEXT:    ds_write_b128 v0, v[56:59] offset:8272
+; GREEDY-NEXT:    ds_write_b128 v0, v[44:47] offset:8224
+; GREEDY-NEXT:    ds_write_b128 v0, v[48:51] offset:8240
+; GREEDY-NEXT:    ds_write_b128 v0, v[36:39] offset:8192
+; GREEDY-NEXT:    ds_write_b128 v0, v[40:43] offset:8208
+; GREEDY-NEXT:    ds_read_b128 v[64:67], v32 offset:24688
+; GREEDY-NEXT:    ds_read_b128 v[60:63], v32 offset:24672
+; GREEDY-NEXT:    ds_read_b128 v[56:59], v32 offset:24656
+; GREEDY-NEXT:    ds_read_b128 v[52:55], v32 offset:24640
+; GREEDY-NEXT:    ds_read_b128 v[48:51], v32 offset:24624
+; GREEDY-NEXT:    ds_read_b128 v[44:47], v32 offset:24608
+; GREEDY-NEXT:    ds_read_b128 v[40:43], v32 offset:24592
+; GREEDY-NEXT:    ds_read_b128 v[36:39], v32 offset:24576
+; GREEDY-NEXT:    s_waitcnt lgkmcnt(0)
+; GREEDY-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v34, v35, v[36:67]
+; GREEDY-NEXT:    s_nop 7
+; GREEDY-NEXT:    s_nop 7
+; GREEDY-NEXT:    s_nop 2
+; GREEDY-NEXT:    ds_write_b128 v0, v[60:63] offset:16480
+; GREEDY-NEXT:    ds_write_b128 v0, v[64:67] offset:16496
+; GREEDY-NEXT:    ds_write_b128 v0, v[52:55] offset:16448
+; GREEDY-NEXT:    ds_write_b128 v0, v[56:59] offset:16464
+; GREEDY-NEXT:    ds_write_b128 v0, v[44:47] offset:16416
+; GREEDY-NEXT:    ds_write_b128 v0, v[48:51] offset:16432
+; GREEDY-NEXT:    ds_write_b128 v0, v[36:39] offset:16384
+; GREEDY-NEXT:    ds_write_b128 v0, v[40:43] offset:16400
+; GREEDY-NEXT:    ds_read_b128 v[64:67], v32 offset:49264
+; GREEDY-NEXT:    ds_read_b128 v[60:63], v32 offset:49248
+; GREEDY-NEXT:    ds_read_b128 v[56:59], v32 offset:49232
+; GREEDY-NEXT:    ds_read_b128 v[52:55], v32 offset:49216
+; GREEDY-NEXT:    ds_read_b128 v[48:51], v32 offset:49200
+; GREEDY-NEXT:    ds_read_b128 v[44:47], v32 offset:49184
+; GREEDY-NEXT:    ds_read_b128 v[40:43], v32 offset:49168
+; GREEDY-NEXT:    ds_read_b128 v[36:39], v32 offset:49152
+; GREEDY-NEXT:    s_waitcnt lgkmcnt(0)
+; GREEDY-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v34, v35, v[36:67]
+; GREEDY-NEXT:    s_nop 7
+; GREEDY-NEXT:    s_nop 7
+; GREEDY-NEXT:    s_nop 2
+; GREEDY-NEXT:    ds_write_b128 v0, v[60:63] offset:24672
+; GREEDY-NEXT:    ds_write_b128 v0, v[64:67] offset:24688
+; GREEDY-NEXT:    ds_write_b128 v0, v[52:55] offset:24640
+; GREEDY-NEXT:    ds_write_b128 v0, v[56:59] offset:24656
+; GREEDY-NEXT:    ds_write_b128 v0, v[44:47] offset:24608
+; GREEDY-NEXT:    ds_write_b128 v0, v[48:51] offset:24624
+; GREEDY-NEXT:    ds_write_b128 v0, v[36:39] offset:24576
+; GREEDY-NEXT:    ds_write_b128 v0, v[40:43] offset:24592
+; GREEDY-NEXT:    ds_read_b128 v[30:33], v1 offset:57456
+; GREEDY-NEXT:    ds_read_b128 v[26:29], v1 offset:57440
+; GREEDY-NEXT:    ds_read_b128 v[22:25], v1 offset:57424
+; GREEDY-NEXT:    ds_read_b128 v[18:21], v1 offset:57408
+; GREEDY-NEXT:    ds_read_b128 v[2:5], v1 offset:57344
+; GREEDY-NEXT:    ds_read_b128 v[6:9], v1 offset:57360
+; GREEDY-NEXT:    ds_read_b128 v[10:13], v1 offset:57376
+; GREEDY-NEXT:    ds_read_b128 v[14:17], v1 offset:57392
+; GREEDY-NEXT:    s_waitcnt lgkmcnt(0)
+; GREEDY-NEXT:    v_mfma_f32_32x32x1f32 v[2:33], v34, v35, v[2:33]
+; GREEDY-NEXT:    s_nop 7
+; GREEDY-NEXT:    s_nop 7
+; GREEDY-NEXT:    s_nop 2
+; GREEDY-NEXT:    ds_write_b128 v0, v[26:29] offset:32864
+; GREEDY-NEXT:    ds_write_b128 v0, v[30:33] offset:32880
+; GREEDY-NEXT:    ds_write_b128 v0, v[18:21] offset:32832
+; GREEDY-NEXT:    ds_write_b128 v0, v[22:25] offset:32848
+; GREEDY-NEXT:    ds_write_b128 v0, v[10:13] offset:32800
+; GREEDY-NEXT:    ds_write_b128 v0, v[14:17] offset:32816
+; GREEDY-NEXT:    ds_write_b128 v0, v[2:5] offset:32768
+; GREEDY-NEXT:    ds_write_b128 v0, v[6:9] offset:32784
+; GREEDY-NEXT:    s_endpgm
+;
+; EXACT-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave:
+; EXACT:       ; %bb.0: ; %entry
+; EXACT-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; EXACT-NEXT:    v_lshlrev_b32_e32 v33, 7, v0
+; EXACT-NEXT:    v_mov_b32_e32 v34, 1.0
+; EXACT-NEXT:    v_mov_b32_e32 v35, 2.0
+; EXACT-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACT-NEXT:    v_add_u32_e32 v32, s0, v33
+; EXACT-NEXT:    ds_read_b128 v[28:31], v32 offset:112
+; EXACT-NEXT:    ds_read_b128 v[24:27], v32 offset:96
+; EXACT-NEXT:    ds_read_b128 v[20:23], v32 offset:80
+; EXACT-NEXT:    ds_read_b128 v[16:19], v32 offset:64
+; EXACT-NEXT:    ds_read_b128 v[0:3], v32
+; EXACT-NEXT:    ds_read_b128 v[4:7], v32 offset:16
+; EXACT-NEXT:    ds_read_b128 v[8:11], v32 offset:32
+; EXACT-NEXT:    ds_read_b128 v[12:15], v32 offset:48
+; EXACT-NEXT:    v_add_u32_e32 v33, s1, v33
+; EXACT-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACT-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v34, v35, v[0:31]
+; EXACT-NEXT:    s_nop 7
+; EXACT-NEXT:    s_nop 7
+; EXACT-NEXT:    s_nop 2
+; EXACT-NEXT:    ds_write_b128 v33, v[28:31] offset:112
+; EXACT-NEXT:    ds_write_b128 v33, v[24:27] offset:96
+; EXACT-NEXT:    ds_write_b128 v33, v[20:23] offset:80
+; EXACT-NEXT:    ds_write_b128 v33, v[16:19] offset:64
+; EXACT-NEXT:    ds_write_b128 v33, v[12:15] offset:48
+; EXACT-NEXT:    ds_write_b128 v33, v[8:11] offset:32
+; EXACT-NEXT:    ds_write_b128 v33, v[4:7] offset:16
+; EXACT-NEXT:    ds_write_b128 v33, v[0:3]
+; EXACT-NEXT:    ds_read_b128 v[64:67], v32 offset:8304
+; EXACT-NEXT:    ds_read_b128 v[60:63], v32 offset:8288
+; EXACT-NEXT:    ds_read_b128 v[56:59], v32 offset:8272
+; EXACT-NEXT:    ds_read_b128 v[52:55], v32 offset:8256
+; EXACT-NEXT:    ds_read_b128 v[48:51], v32 offset:8240
+; EXACT-NEXT:    ds_read_b128 v[44:47], v32 offset:8224
+; EXACT-NEXT:    ds_read_b128 v[40:43], v32 offset:8208
+; EXACT-NEXT:    ds_read_b128 v[36:39], v32 offset:8192
+; EXACT-NEXT:    v_mov_b32_e32 v0, s1
+; EXACT-NEXT:    v_add_u32_e32 v1, 0x6000, v32
+; EXACT-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACT-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v34, v35, v[36:67]
+; EXACT-NEXT:    s_nop 7
+; EXACT-NEXT:    s_nop 7
+; EXACT-NEXT:    s_nop 2
+; EXACT-NEXT:    ds_write_b128 v0, v[60:63] offset:8288
+; EXACT-NEXT:    ds_write_b128 v0, v[64:67] offset:8304
+; EXACT-NEXT:    ds_write_b128 v0, v[52:55] offset:8256
+; EXACT-NEXT:    ds_write_b128 v0, v[56:59] offset:8272
+; EXACT-NEXT:    ds_write_b128 v0, v[44:47] offset:8224
+; EXACT-NEXT:    ds_write_b128 v0, v[48:51] offset:8240
+; EXACT-NEXT:    ds_write_b128 v0, v[36:39] offset:8192
+; EXACT-NEXT:    ds_write_b128 v0, v[40:43] offset:8208
+; EXACT-NEXT:    ds_read_b128 v[64:67], v32 offset:24688
+; EXACT-NEXT:    ds_read_b128 v[60:63], v32 offset:24672
+; EXACT-NEXT:    ds_read_b128 v[56:59], v32 offset:24656
+; EXACT-NEXT:    ds_read_b128 v[52:55], v32 offset:24640
+; EXACT-NEXT:    ds_read_b128 v[48:51], v32 offset:24624
+; EXACT-NEXT:    ds_read_b128 v[44:47], v32 offset:24608
+; EXACT-NEXT:    ds_read_b128 v[40:43], v32 offset:24592
+; EXACT-NEXT:    ds_read_b128 v[36:39], v32 offset:24576
+; EXACT-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACT-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v34, v35, v[36:67]
+; EXACT-NEXT:    s_nop 7
+; EXACT-NEXT:    s_nop 7
+; EXACT-NEXT:    s_nop 2
+; EXACT-NEXT:    ds_write_b128 v0, v[60:63] offset:16480
+; EXACT-NEXT:    ds_write_b128 v0, v[64:67] offset:16496
+; EXACT-NEXT:    ds_write_b128 v0, v[52:55] offset:16448
+; EXACT-NEXT:    ds_write_b128 v0, v[56:59] offset:16464
+; EXACT-NEXT:    ds_write_b128 v0, v[44:47] offset:16416
+; EXACT-NEXT:    ds_write_b128 v0, v[48:51] offset:16432
+; EXACT-NEXT:    ds_write_b128 v0, v[36:39] offset:16384
+; EXACT-NEXT:    ds_write_b128 v0, v[40:43] offset:16400
+; EXACT-NEXT:    ds_read_b128 v[64:67], v32 offset:49264
+; EXACT-NEXT:    ds_read_b128 v[60:63], v32 offset:49248
+; EXACT-NEXT:    ds_read_b128 v[56:59], v32 offset:49232
+; EXACT-NEXT:    ds_read_b128 v[52:55], v32 offset:49216
+; EXACT-NEXT:    ds_read_b128 v[48:51], v32 offset:49200
+; EXACT-NEXT:    ds_read_b128 v[44:47], v32 offset:49184
+; EXACT-NEXT:    ds_read_b128 v[40:43], v32 offset:49168
+; EXACT-NEXT:    ds_read_b128 v[36:39], v32 offset:49152
+; EXACT-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACT-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v34, v35, v[36:67]
+; EXACT-NEXT:    s_nop 7
+; EXACT-NEXT:    s_nop 7
+; EXACT-NEXT:    s_nop 2
+; EXACT-NEXT:    ds_write_b128 v0, v[60:63] offset:24672
+; EXACT-NEXT:    ds_write_b128 v0, v[64:67] offset:24688
+; EXACT-NEXT:    ds_write_b128 v0, v[52:55] offset:24640
+; EXACT-NEXT:    ds_write_b128 v0, v[56:59] offset:24656
+; EXACT-NEXT:    ds_write_b128 v0, v[44:47] offset:24608
+; EXACT-NEXT:    ds_write_b128 v0, v[48:51] offset:24624
+; EXACT-NEXT:    ds_write_b128 v0, v[36:39] offset:24576
+; EXACT-NEXT:    ds_write_b128 v0, v[40:43] offset:24592
+; EXACT-NEXT:    ds_read_b128 v[30:33], v1 offset:57456
+; EXACT-NEXT:    ds_read_b128 v[26:29], v1 offset:57440
+; EXACT-NEXT:    ds_read_b128 v[22:25], v1 offset:57424
+; EXACT-NEXT:    ds_read_b128 v[18:21], v1 offset:57408
+; EXACT-NEXT:    ds_read_b128 v[2:5], v1 offset:57344
+; EXACT-NEXT:    ds_read_b128 v[6:9], v1 offset:57360
+; EXACT-NEXT:    ds_read_b128 v[10:13], v1 offset:57376
+; EXACT-NEXT:    ds_read_b128 v[14:17], v1 offset:57392
+; EXACT-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACT-NEXT:    v_mfma_f32_32x32x1f32 v[2:33], v34, v35, v[2:33]
+; EXACT-NEXT:    s_nop 7
+; EXACT-NEXT:    s_nop 7
+; EXACT-NEXT:    s_nop 2
+; EXACT-NEXT:    ds_write_b128 v0, v[26:29] offset:32864
+; EXACT-NEXT:    ds_write_b128 v0, v[30:33] offset:32880
+; EXACT-NEXT:    ds_write_b128 v0, v[18:21] offset:32832
+; EXACT-NEXT:    ds_write_b128 v0, v[22:25] offset:32848
+; EXACT-NEXT:    ds_write_b128 v0, v[10:13] offset:32800
+; EXACT-NEXT:    ds_write_b128 v0, v[14:17] offset:32816
+; EXACT-NEXT:    ds_write_b128 v0, v[2:5] offset:32768
+; EXACT-NEXT:    ds_write_b128 v0, v[6:9] offset:32784
+; EXACT-NEXT:    s_endpgm
+entry:
+  %idx = call i32 @llvm.amdgcn.workitem.id.x()
+  %load.0.addr = getelementptr <32 x float>, <32 x float> addrspace(3)* %in, i32 %idx
+  %load.0 = load <32 x float>, <32 x float> addrspace(3)* %load.0.addr
+  %load.1.addr = getelementptr <32 x float>, <32 x float> addrspace(3)* %load.0.addr, i32 64
+  %load.1 = load <32 x float>, <32 x float> addrspace(3)* %load.1.addr
+  %load.2.addr = getelementptr <32 x float>, <32 x float> addrspace(3)* %load.1.addr, i32 128
+  %load.2 = load <32 x float>, <32 x float> addrspace(3)* %load.2.addr
+  %load.3.addr = getelementptr <32 x float>, <32 x float> addrspace(3)* %load.2.addr, i32 192
+  %load.3 = load <32 x float>, <32 x float> addrspace(3)* %load.3.addr
+  %load.4.addr = getelementptr <32 x float>, <32 x float> addrspace(3)* %load.3.addr, i32 256
+  %load.4 = load <32 x float>, <32 x float> addrspace(3)* %load.4.addr
+  %mai.0 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.0, i32 0, i32 0, i32 0)
+  %mai.1 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.1, i32 0, i32 0, i32 0)
+  %mai.2 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.2, i32 0, i32 0, i32 0)
+  %mai.3 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.3, i32 0, i32 0, i32 0)
+  %mai.4 = tail call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %load.4, i32 0, i32 0, i32 0)
+  %store.0.addr = getelementptr <32 x float>, <32 x float> addrspace(3)* %out, i32 %idx
+  store <32 x float> %mai.0, <32 x float> addrspace(3)* %store.0.addr
+  %store.1.addr = getelementptr <32 x float>, <32 x float> addrspace(3)* %out, i32 64
+  store <32 x float> %mai.1, <32 x float> addrspace(3)* %store.1.addr
+  %store.2.addr = getelementptr <32 x float>, <32 x float> addrspace(3)* %out, i32 128
+  store <32 x float> %mai.2, <32 x float> addrspace(3)* %store.2.addr
+  %store.3.addr = getelementptr <32 x float>, <32 x float> addrspace(3)* %out, i32 192
+  store <32 x float> %mai.3, <32 x float> addrspace(3)* %store.3.addr
+  %store.4.addr = getelementptr <32 x float>, <32 x float> addrspace(3)* %out, i32 256
+  store <32 x float> %mai.4, <32 x float> addrspace(3)* %store.4.addr
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #2
+declare void @llvm.amdgcn.sched.group.barrier(i32, i32, i32) #1
+declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32, i32, i32) #1
+
+attributes #0 = { nounwind "amdgpu-flat-workgroup-size"="1,256" }
+attributes #1 = { nounwind }
+attributes #2 = { nounwind readnone speculatable }

diff  --git a/llvm/test/CodeGen/AMDGPU/igrouplp-dag-mutation.mir b/llvm/test/CodeGen/AMDGPU/igrouplp-dag-mutation.mir
index 4d8c011c64302..59dbea34691e7 100644
--- a/llvm/test/CodeGen/AMDGPU/igrouplp-dag-mutation.mir
+++ b/llvm/test/CodeGen/AMDGPU/igrouplp-dag-mutation.mir
@@ -1,6 +1,7 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -march=amdgcn -mcpu=gfx90a -start-before=machine-scheduler -stop-after=postmisched %s -o - 2>&1 | FileCheck -check-prefix=DEFAULT %s
 # RUN: llc -march=amdgcn -mcpu=gfx90a -start-before=machine-scheduler -stop-after=postmisched %s -o - -amdgpu-igrouplp=1 2>&1 | FileCheck -check-prefix=PIPELINE %s
+# RUN: llc -march=amdgcn -mcpu=gfx90a -start-before=machine-scheduler -stop-after=postmisched %s -o - -amdgpu-igrouplp=1 -amdgpu-igrouplp-exact-solver 2>&1 | FileCheck -check-prefix=EXACT %s
 
 ---
 name: no_pipeline
@@ -34,6 +35,19 @@ body:             |
     ; PIPELINE-NEXT: $vgpr6 = V_MUL_LO_U32_e64 killed $vgpr1, killed $sgpr0, implicit $exec
     ; PIPELINE-NEXT: $vgpr8 = V_MOV_B32_e32 0, implicit $exec
     ; PIPELINE-NEXT: $vgpr9 = V_MOV_B32_e32 9, implicit $exec
+    ; EXACT-LABEL: name: no_pipeline
+    ; EXACT: liveins: $sgpr0, $vgpr10_vgpr11
+    ; EXACT-NEXT: {{  $}}
+    ; EXACT-NEXT: $vgpr1 = V_MOV_B32_e32 1, implicit $exec
+    ; EXACT-NEXT: $vgpr0 = V_MOV_B32_e32 1, implicit $exec
+    ; EXACT-NEXT: $vgpr1 = V_ADD_F16_e32 killed $vgpr1, $vgpr0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD killed $vgpr10_vgpr11, $vgpr1, 0, 0, implicit $exec
+    ; EXACT-NEXT: $vgpr2 = V_MOV_B32_e32 1, implicit $exec
+    ; EXACT-NEXT: $vgpr3 = DS_READ_U16_gfx9 killed $vgpr2, 0, 0, implicit $exec
+    ; EXACT-NEXT: $vgpr5 = V_XOR_B32_e32 $vgpr1, killed $vgpr0, implicit $exec
+    ; EXACT-NEXT: $vgpr6 = V_MUL_LO_U32_e64 killed $vgpr1, killed $sgpr0, implicit $exec
+    ; EXACT-NEXT: $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+    ; EXACT-NEXT: $vgpr9 = V_MOV_B32_e32 9, implicit $exec
     $vgpr1 = V_MOV_B32_e32 1, implicit $exec
     $vgpr0 = V_MOV_B32_e32 1, implicit $exec
     $vgpr8 = V_MOV_B32_e32 0, implicit $exec
@@ -127,22 +141,68 @@ body:             |
     ; PIPELINE-NEXT:   $vgpr15 = DS_READ_U16_gfx9 $vgpr7, 0, 4096, implicit $exec
     ; PIPELINE-NEXT:   $vgpr16 = DS_READ_U16_gfx9 $vgpr7, 0, 2048, implicit $exec
     ; PIPELINE-NEXT: }
+    ; PIPELINE-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
     ; PIPELINE-NEXT: DS_WRITE_B32 $vgpr3, $vgpr1, 0, 16, implicit $m0, implicit $exec
     ; PIPELINE-NEXT: BUNDLE implicit-def $vgpr19, implicit-def $vgpr19_lo16, implicit-def $vgpr19_hi16, implicit-def $vgpr20, implicit-def $vgpr20_lo16, implicit-def $vgpr20_hi16, implicit killed $vgpr26_vgpr27, implicit $exec {
     ; PIPELINE-NEXT:   $vgpr19 = GLOBAL_LOAD_USHORT $vgpr26_vgpr27, 0, 0, implicit $exec
     ; PIPELINE-NEXT:   $vgpr20 = GLOBAL_LOAD_USHORT killed $vgpr26_vgpr27, 0, 0, implicit $exec
     ; PIPELINE-NEXT: }
+    ; PIPELINE-NEXT: BUNDLE implicit $vgpr0, implicit killed $vgpr7, implicit $m0, implicit $exec, implicit killed $vgpr23, implicit $vgpr3 {
+    ; PIPELINE-NEXT:   DS_WRITE_B32 $vgpr0, killed $vgpr7, 0, 16, implicit $m0, implicit $exec
+    ; PIPELINE-NEXT:   DS_WRITE_B32 killed $vgpr23, $vgpr3, 0, 16, implicit $m0, implicit $exec
+    ; PIPELINE-NEXT: }
+    ; PIPELINE-NEXT: DS_WRITE_B32 killed $vgpr9, killed $vgpr24, 0, 16, implicit $m0, implicit $exec
     ; PIPELINE-NEXT: $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32_e64 $vgpr3, $vgpr4, killed $agpr4_agpr5_agpr6_agpr7, 0, 0, 0, implicit $mode, implicit $exec
-    ; PIPELINE-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
-    ; PIPELINE-NEXT: $agpr8_agpr9_agpr10_agpr11 = V_MFMA_F32_4X4X1F32_e64 $vgpr3, killed $vgpr4, killed $agpr8_agpr9_agpr10_agpr11, 0, 0, 0, implicit $mode, implicit $exec
+    ; PIPELINE-NEXT: $agpr8_agpr9_agpr10_agpr11 = V_MFMA_F32_4X4X1F32_e64 killed $vgpr3, killed $vgpr4, killed $agpr8_agpr9_agpr10_agpr11, 0, 0, 0, implicit $mode, implicit $exec
     ; PIPELINE-NEXT: $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32_e64 killed $vgpr5, killed $vgpr6, killed $agpr4_agpr5_agpr6_agpr7, 0, 0, 0, implicit $mode, implicit $exec
     ; PIPELINE-NEXT: $agpr16_agpr17_agpr18_agpr19 = V_MFMA_F32_4X4X1F32_e64 killed $vgpr10, killed $vgpr11, killed $agpr16_agpr17_agpr18_agpr19, 0, 0, 0, implicit $mode, implicit $exec
-    ; PIPELINE-NEXT: $agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_4X4X1F32_e64 killed $vgpr1, $vgpr0, killed $agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
-    ; PIPELINE-NEXT: BUNDLE implicit killed $vgpr0, implicit killed $vgpr7, implicit $m0, implicit $exec, implicit killed $vgpr23, implicit killed $vgpr3 {
-    ; PIPELINE-NEXT:   DS_WRITE_B32 killed $vgpr0, killed $vgpr7, 0, 16, implicit $m0, implicit $exec
-    ; PIPELINE-NEXT:   DS_WRITE_B32 killed $vgpr23, killed $vgpr3, 0, 16, implicit $m0, implicit $exec
-    ; PIPELINE-NEXT: }
-    ; PIPELINE-NEXT: DS_WRITE_B32 killed $vgpr9, killed $vgpr24, 0, 16, implicit $m0, implicit $exec
+    ; PIPELINE-NEXT: $agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_4X4X1F32_e64 killed $vgpr1, killed $vgpr0, killed $agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-LABEL: name: full_pipe
+    ; EXACT: liveins: $sgpr0, $agpr0_agpr1_agpr2_agpr3, $agpr4_agpr5_agpr6_agpr7, $agpr8_agpr9_agpr10_agpr11, $agpr12_agpr13_agpr14_agpr15, $agpr16_agpr17_agpr18_agpr19, $vgpr10_vgpr11
+    ; EXACT-NEXT: {{  $}}
+    ; EXACT-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+    ; EXACT-NEXT: $vgpr1 = V_MOV_B32_e32 1, implicit $exec
+    ; EXACT-NEXT: $vgpr2 = V_MOV_B32_e32 2, implicit $exec
+    ; EXACT-NEXT: $vgpr3 = V_MOV_B32_e32 3, implicit $exec
+    ; EXACT-NEXT: $vgpr6 = GLOBAL_LOAD_USHORT $vgpr0_vgpr1, 0, 0, implicit $exec
+    ; EXACT-NEXT: $vgpr7 = GLOBAL_LOAD_USHORT $vgpr2_vgpr3, 0, 0, implicit $exec
+    ; EXACT-NEXT: $vgpr4 = V_MOV_B32_e32 4, implicit $exec
+    ; EXACT-NEXT: $vgpr5 = V_MOV_B32_e32 5, implicit $exec
+    ; EXACT-NEXT: $vgpr8 = GLOBAL_LOAD_USHORT $vgpr4_vgpr5, 0, 0, implicit $exec
+    ; EXACT-NEXT: $vgpr1 = V_ADD_F16_e32 killed $vgpr1, $vgpr0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: $vgpr26 = V_MOV_B32_e32 1, implicit $exec
+    ; EXACT-NEXT: $vgpr27 = V_MOV_B32_e32 1, implicit $exec
+    ; EXACT-NEXT: $vgpr9 = V_MOV_B32_e32 1, implicit $exec
+    ; EXACT-NEXT: $vgpr24 = V_MOV_B32_e32 1, implicit $exec
+    ; EXACT-NEXT: $vgpr23 = V_XOR_B32_e32 $vgpr1, $vgpr0, implicit $exec
+    ; EXACT-NEXT: $vgpr22 = V_XOR_B32_e32 $vgpr1, $vgpr0, implicit $exec
+    ; EXACT-NEXT: $vgpr21 = V_MUL_LO_U32_e64 $vgpr1, killed $sgpr0, implicit $exec
+    ; EXACT-NEXT: $vgpr30 = V_MOV_B32_e32 30, implicit $exec
+    ; EXACT-NEXT: $vgpr17 = V_MOV_B32_e32 1, implicit $exec
+    ; EXACT-NEXT: $vgpr18 = V_MOV_B32_e32 1, implicit $exec
+    ; EXACT-NEXT: BUNDLE implicit-def $vgpr10, implicit-def $vgpr10_lo16, implicit-def $vgpr10_hi16, implicit-def $vgpr11, implicit-def $vgpr11_lo16, implicit-def $vgpr11_hi16, implicit-def $vgpr12, implicit-def $vgpr12_lo16, implicit-def $vgpr12_hi16, implicit-def $vgpr15, implicit-def $vgpr15_lo16, implicit-def $vgpr15_hi16, implicit-def $vgpr16, implicit-def $vgpr16_lo16, implicit-def $vgpr16_hi16, implicit $vgpr7, implicit $exec {
+    ; EXACT-NEXT:   $vgpr10 = DS_READ_U16_gfx9 $vgpr7, 0, 512, implicit $exec
+    ; EXACT-NEXT:   $vgpr11 = DS_READ_U16_gfx9 $vgpr7, 0, 2048, implicit $exec
+    ; EXACT-NEXT:   $vgpr12 = DS_READ_U16_gfx9 $vgpr7, 0, 1024, implicit $exec
+    ; EXACT-NEXT:   $vgpr15 = DS_READ_U16_gfx9 $vgpr7, 0, 4096, implicit $exec
+    ; EXACT-NEXT:   $vgpr16 = DS_READ_U16_gfx9 $vgpr7, 0, 2048, implicit $exec
+    ; EXACT-NEXT: }
+    ; EXACT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: DS_WRITE_B32 $vgpr3, $vgpr1, 0, 16, implicit $m0, implicit $exec
+    ; EXACT-NEXT: BUNDLE implicit-def $vgpr19, implicit-def $vgpr19_lo16, implicit-def $vgpr19_hi16, implicit-def $vgpr20, implicit-def $vgpr20_lo16, implicit-def $vgpr20_hi16, implicit killed $vgpr26_vgpr27, implicit $exec {
+    ; EXACT-NEXT:   $vgpr19 = GLOBAL_LOAD_USHORT $vgpr26_vgpr27, 0, 0, implicit $exec
+    ; EXACT-NEXT:   $vgpr20 = GLOBAL_LOAD_USHORT killed $vgpr26_vgpr27, 0, 0, implicit $exec
+    ; EXACT-NEXT: }
+    ; EXACT-NEXT: BUNDLE implicit $vgpr0, implicit killed $vgpr7, implicit $m0, implicit $exec, implicit killed $vgpr23, implicit $vgpr3 {
+    ; EXACT-NEXT:   DS_WRITE_B32 $vgpr0, killed $vgpr7, 0, 16, implicit $m0, implicit $exec
+    ; EXACT-NEXT:   DS_WRITE_B32 killed $vgpr23, $vgpr3, 0, 16, implicit $m0, implicit $exec
+    ; EXACT-NEXT: }
+    ; EXACT-NEXT: DS_WRITE_B32 killed $vgpr9, killed $vgpr24, 0, 16, implicit $m0, implicit $exec
+    ; EXACT-NEXT: $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32_e64 $vgpr3, $vgpr4, killed $agpr4_agpr5_agpr6_agpr7, 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: $agpr8_agpr9_agpr10_agpr11 = V_MFMA_F32_4X4X1F32_e64 killed $vgpr3, killed $vgpr4, killed $agpr8_agpr9_agpr10_agpr11, 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32_e64 killed $vgpr5, killed $vgpr6, killed $agpr4_agpr5_agpr6_agpr7, 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: $agpr16_agpr17_agpr18_agpr19 = V_MFMA_F32_4X4X1F32_e64 killed $vgpr10, killed $vgpr11, killed $agpr16_agpr17_agpr18_agpr19, 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: $agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_4X4X1F32_e64 killed $vgpr1, killed $vgpr0, killed $agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
     $vgpr0 = V_MOV_B32_e32 0, implicit $exec
     $vgpr1 = V_MOV_B32_e32 1, implicit $exec
     $vgpr2 = V_MOV_B32_e32 2, implicit $exec
@@ -210,6 +270,17 @@ body:             |
     ; PIPELINE-NEXT:   $vgpr16 = DS_READ_U16_gfx9 killed $vgpr7, 0, 2048, implicit $exec
     ; PIPELINE-NEXT: }
     ; PIPELINE-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-LABEL: name: block_ends_in_bundle
+    ; EXACT: liveins: $vgpr0, $vgpr1, $vgpr7, $agpr0_agpr1_agpr2_agpr3
+    ; EXACT-NEXT: {{  $}}
+    ; EXACT-NEXT: BUNDLE implicit-def $vgpr10, implicit-def $vgpr10_lo16, implicit-def $vgpr10_hi16, implicit-def $vgpr11, implicit-def $vgpr11_lo16, implicit-def $vgpr11_hi16, implicit-def $vgpr12, implicit-def $vgpr12_lo16, implicit-def $vgpr12_hi16, implicit-def $vgpr15, implicit-def $vgpr15_lo16, implicit-def $vgpr15_hi16, implicit-def $vgpr16, implicit-def $vgpr16_lo16, implicit-def $vgpr16_hi16, implicit killed $vgpr7, implicit $exec {
+    ; EXACT-NEXT:   $vgpr10 = DS_READ_U16_gfx9 $vgpr7, 0, 512, implicit $exec
+    ; EXACT-NEXT:   $vgpr11 = DS_READ_U16_gfx9 $vgpr7, 0, 2048, implicit $exec
+    ; EXACT-NEXT:   $vgpr12 = DS_READ_U16_gfx9 $vgpr7, 0, 1024, implicit $exec
+    ; EXACT-NEXT:   $vgpr15 = DS_READ_U16_gfx9 $vgpr7, 0, 4096, implicit $exec
+    ; EXACT-NEXT:   $vgpr16 = DS_READ_U16_gfx9 killed $vgpr7, 0, 2048, implicit $exec
+    ; EXACT-NEXT: }
+    ; EXACT-NEXT: $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
       $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32_e64 $vgpr1, $vgpr0, $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
       BUNDLE implicit-def $vgpr10, implicit-def $vgpr10_lo16, implicit-def $vgpr10_hi16, implicit-def $vgpr11, implicit-def $vgpr11_lo16, implicit-def $vgpr11_hi16, implicit-def $vgpr12, implicit-def $vgpr12_lo16, implicit-def $vgpr12_hi16, implicit-def $vgpr15, implicit-def $vgpr15_lo16, implicit-def $vgpr15_hi16, implicit-def $vgpr16, implicit-def $vgpr16_lo16, implicit-def $vgpr16_hi16, implicit $vgpr7, implicit $exec {
         $vgpr10 = DS_READ_U16_gfx9 $vgpr7, 0, 512, implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll
index baa775fc17096..3aa921859b630 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -misched-cluster=0  < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -misched-cluster=0 -amdgpu-igrouplp-exact-solver-max-branches=250000 < %s | FileCheck -check-prefix=EXACTCUTOFF %s
 
 define amdgpu_kernel void @test_sched_group_barrier() #0 {
 ; GCN-LABEL: test_sched_group_barrier:
@@ -9,6 +10,14 @@ define amdgpu_kernel void @test_sched_group_barrier() #0 {
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000004) size(8) SyncID(16)
 ; GCN-NEXT:    ; sched_group_barrier mask(0x0000000F) size(10000) SyncID(-1)
 ; GCN-NEXT:    s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier:
+; EXACTCUTOFF:       ; %bb.0: ; %entry
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000000) size(1) SyncID(2)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000001) size(2) SyncID(4)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000004) size(8) SyncID(16)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x0000000F) size(10000) SyncID(-1)
+; EXACTCUTOFF-NEXT:    s_endpgm
 entry:
   call void @llvm.amdgcn.sched.group.barrier(i32 0, i32 1, i32 2) #1
   call void @llvm.amdgcn.sched.group.barrier(i32 1, i32 2, i32 4) #1
@@ -67,7 +76,6 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_READ_VALU_WRITE(<32
 ; GCN-NEXT:    v_mul_lo_u32 v26, v26, v26
 ; GCN-NEXT:    v_mul_lo_u32 v25, v25, v25
 ; GCN-NEXT:    v_mul_lo_u32 v24, v24, v24
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(30) SyncID(0)
 ; GCN-NEXT:    global_store_dwordx4 v32, v[28:31], s[2:3] offset:112
 ; GCN-NEXT:    global_store_dwordx4 v32, v[24:27], s[2:3] offset:96
 ; GCN-NEXT:    global_store_dwordx4 v32, v[20:23], s[2:3] offset:80
@@ -76,8 +84,70 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_READ_VALU_WRITE(<32
 ; GCN-NEXT:    global_store_dwordx4 v32, v[8:11], s[2:3] offset:32
 ; GCN-NEXT:    global_store_dwordx4 v32, v[4:7], s[2:3] offset:16
 ; GCN-NEXT:    global_store_dwordx4 v32, v[0:3], s[2:3]
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(30) SyncID(0)
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000040) size(8) SyncID(0)
 ; GCN-NEXT:    s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_READ_VALU_WRITE:
+; EXACTCUTOFF:       ; %bb.0:
+; EXACTCUTOFF-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; EXACTCUTOFF-NEXT:    v_lshlrev_b32_e32 v32, 7, v0
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[0:3], v32, s[0:1]
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[4:7], v32, s[0:1] offset:16
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[8:11], v32, s[0:1] offset:32
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[12:15], v32, s[0:1] offset:48
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[16:19], v32, s[0:1] offset:64
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[20:23], v32, s[0:1] offset:80
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[24:27], v32, s[0:1] offset:96
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[28:31], v32, s[0:1] offset:112
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(8) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(7)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v3, v3, v3
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v2, v2, v2
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v1, v1, v1
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v0, v0, v0
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(6)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v7, v7, v7
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v6, v6, v6
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v5, v5, v5
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v31, v31, v31
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v30, v30, v30
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v29, v29, v29
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v28, v28, v28
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v4, v4, v4
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v11, v11, v11
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v10, v10, v10
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v9, v9, v9
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v8, v8, v8
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v15, v15, v15
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v14, v14, v14
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v13, v13, v13
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v12, v12, v12
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v19, v19, v19
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v18, v18, v18
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v17, v17, v17
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v16, v16, v16
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v23, v23, v23
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v22, v22, v22
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v21, v21, v21
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v20, v20, v20
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v27, v27, v27
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v26, v26, v26
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v25, v25, v25
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v24, v24, v24
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[28:31], s[2:3] offset:112
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[24:27], s[2:3] offset:96
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[20:23], s[2:3] offset:80
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[16:19], s[2:3] offset:64
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[12:15], s[2:3] offset:48
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[8:11], s[2:3] offset:32
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[4:7], s[2:3] offset:16
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[0:3], s[2:3]
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(30) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000040) size(8) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x() #2
   %gep1 = getelementptr <32 x i32>, <32 x i32> addrspace(1)* %in, i32 %tid
   %load = load <32 x i32>, <32 x i32> addrspace(1)* %gep1
@@ -97,82 +167,162 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_alternating_READ_VA
 ; GCN-LABEL: test_sched_group_barrier_pipeline_alternating_READ_VALU:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v32, 7, v0
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    global_load_dwordx4 v[0:3], v32, s[0:1] offset:80
-; GCN-NEXT:    global_load_dwordx4 v[4:7], v32, s[0:1] offset:96
-; GCN-NEXT:    global_load_dwordx4 v[8:11], v32, s[0:1] offset:112
+; GCN-NEXT:    global_load_dwordx4 v[8:11], v32, s[0:1] offset:96
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mul_lo_u32 v9, v9, v9
+; GCN-NEXT:    global_load_dwordx4 v[0:3], v32, s[0:1]
+; GCN-NEXT:    v_mul_lo_u32 v8, v8, v8
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
-; GCN-NEXT:    s_waitcnt vmcnt(2)
+; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_mul_lo_u32 v3, v3, v3
 ; GCN-NEXT:    v_mul_lo_u32 v2, v2, v2
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
-; GCN-NEXT:    global_load_dwordx4 v[12:15], v32, s[0:1] offset:64
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; GCN-NEXT:    global_load_dwordx4 v[4:7], v32, s[0:1] offset:112
 ; GCN-NEXT:    v_mul_lo_u32 v1, v1, v1
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v11, v11, v11
+; GCN-NEXT:    v_mul_lo_u32 v10, v10, v10
+; GCN-NEXT:    global_load_dwordx4 v[12:15], v32, s[0:1] offset:48
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
-; GCN-NEXT:    global_load_dwordx4 v[16:19], v32, s[0:1] offset:48
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
-; GCN-NEXT:    s_waitcnt vmcnt(3)
-; GCN-NEXT:    v_mul_lo_u32 v7, v7, v7
-; GCN-NEXT:    v_mul_lo_u32 v6, v6, v6
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
-; GCN-NEXT:    global_load_dwordx4 v[20:23], v32, s[0:1] offset:32
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; GCN-NEXT:    s_waitcnt vmcnt(1)
+; GCN-NEXT:    v_mul_lo_u32 v7, v7, v7
+; GCN-NEXT:    v_mul_lo_u32 v6, v6, v6
 ; GCN-NEXT:    v_mul_lo_u32 v5, v5, v5
 ; GCN-NEXT:    v_mul_lo_u32 v4, v4, v4
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mul_lo_u32 v13, v13, v13
+; GCN-NEXT:    v_mul_lo_u32 v15, v15, v15
+; GCN-NEXT:    global_load_dwordx4 v[16:19], v32, s[0:1] offset:80
+; GCN-NEXT:    v_mul_lo_u32 v14, v14, v14
+; GCN-NEXT:    v_mul_lo_u32 v12, v12, v12
+; GCN-NEXT:    global_load_dwordx4 v[20:23], v32, s[0:1] offset:64
+; GCN-NEXT:    global_load_dwordx4 v[24:27], v32, s[0:1] offset:32
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
-; GCN-NEXT:    global_load_dwordx4 v[24:27], v32, s[0:1] offset:16
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
-; GCN-NEXT:    s_waitcnt vmcnt(4)
-; GCN-NEXT:    v_mul_lo_u32 v11, v11, v11
-; GCN-NEXT:    v_mul_lo_u32 v10, v10, v10
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
-; GCN-NEXT:    global_load_dwordx4 v[28:31], v32, s[0:1]
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
-; GCN-NEXT:    v_mul_lo_u32 v9, v9, v9
-; GCN-NEXT:    v_mul_lo_u32 v8, v8, v8
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
-; GCN-NEXT:    global_store_dwordx4 v32, v[8:11], s[2:3] offset:112
-; GCN-NEXT:    global_store_dwordx4 v32, v[4:7], s[2:3] offset:96
-; GCN-NEXT:    global_store_dwordx4 v32, v[0:3], s[2:3] offset:80
-; GCN-NEXT:    s_waitcnt vmcnt(7)
-; GCN-NEXT:    v_mul_lo_u32 v15, v15, v15
-; GCN-NEXT:    v_mul_lo_u32 v14, v14, v14
-; GCN-NEXT:    v_mul_lo_u32 v13, v13, v13
-; GCN-NEXT:    v_mul_lo_u32 v12, v12, v12
-; GCN-NEXT:    s_waitcnt vmcnt(6)
+; GCN-NEXT:    s_waitcnt vmcnt(2)
 ; GCN-NEXT:    v_mul_lo_u32 v19, v19, v19
 ; GCN-NEXT:    v_mul_lo_u32 v18, v18, v18
-; GCN-NEXT:    s_waitcnt vmcnt(5)
-; GCN-NEXT:    v_mul_lo_u32 v11, v23, v23
-; GCN-NEXT:    v_mul_lo_u32 v10, v22, v22
-; GCN-NEXT:    v_mul_lo_u32 v9, v21, v21
-; GCN-NEXT:    s_waitcnt vmcnt(4)
-; GCN-NEXT:    v_mul_lo_u32 v7, v27, v27
-; GCN-NEXT:    v_mul_lo_u32 v6, v26, v26
-; GCN-NEXT:    v_mul_lo_u32 v5, v25, v25
-; GCN-NEXT:    s_waitcnt vmcnt(3)
-; GCN-NEXT:    v_mul_lo_u32 v3, v31, v31
-; GCN-NEXT:    v_mul_lo_u32 v2, v30, v30
-; GCN-NEXT:    v_mul_lo_u32 v1, v29, v29
-; GCN-NEXT:    v_mul_lo_u32 v0, v28, v28
-; GCN-NEXT:    v_mul_lo_u32 v4, v24, v24
-; GCN-NEXT:    v_mul_lo_u32 v8, v20, v20
+; GCN-NEXT:    s_waitcnt vmcnt(1)
+; GCN-NEXT:    v_mul_lo_u32 v23, v23, v23
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mul_lo_u32 v25, v25, v25
+; GCN-NEXT:    v_mul_lo_u32 v24, v24, v24
+; GCN-NEXT:    global_load_dwordx4 v[28:31], v32, s[0:1] offset:16
+; GCN-NEXT:    v_mul_lo_u32 v27, v27, v27
+; GCN-NEXT:    v_mul_lo_u32 v26, v26, v26
+; GCN-NEXT:    v_mul_lo_u32 v22, v22, v22
+; GCN-NEXT:    v_mul_lo_u32 v21, v21, v21
+; GCN-NEXT:    v_mul_lo_u32 v20, v20, v20
 ; GCN-NEXT:    v_mul_lo_u32 v17, v17, v17
 ; GCN-NEXT:    v_mul_lo_u32 v16, v16, v16
-; GCN-NEXT:    global_store_dwordx4 v32, v[12:15], s[2:3] offset:64
-; GCN-NEXT:    global_store_dwordx4 v32, v[16:19], s[2:3] offset:48
-; GCN-NEXT:    global_store_dwordx4 v32, v[8:11], s[2:3] offset:32
-; GCN-NEXT:    global_store_dwordx4 v32, v[4:7], s[2:3] offset:16
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mul_lo_u32 v29, v29, v29
+; GCN-NEXT:    v_mul_lo_u32 v28, v28, v28
+; GCN-NEXT:    v_mul_lo_u32 v31, v31, v31
+; GCN-NEXT:    v_mul_lo_u32 v30, v30, v30
+; GCN-NEXT:    global_store_dwordx4 v32, v[4:7], s[2:3] offset:112
+; GCN-NEXT:    global_store_dwordx4 v32, v[8:11], s[2:3] offset:96
+; GCN-NEXT:    global_store_dwordx4 v32, v[16:19], s[2:3] offset:80
+; GCN-NEXT:    global_store_dwordx4 v32, v[20:23], s[2:3] offset:64
+; GCN-NEXT:    global_store_dwordx4 v32, v[12:15], s[2:3] offset:48
+; GCN-NEXT:    global_store_dwordx4 v32, v[24:27], s[2:3] offset:32
+; GCN-NEXT:    global_store_dwordx4 v32, v[28:31], s[2:3] offset:16
 ; GCN-NEXT:    global_store_dwordx4 v32, v[0:3], s[2:3]
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000040) size(8) SyncID(0)
 ; GCN-NEXT:    s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_alternating_READ_VALU:
+; EXACTCUTOFF:       ; %bb.0:
+; EXACTCUTOFF-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; EXACTCUTOFF-NEXT:    v_lshlrev_b32_e32 v32, 7, v0
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[8:11], v32, s[0:1] offset:96
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v9, v9, v9
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[0:3], v32, s[0:1]
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v8, v8, v8
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v3, v3, v3
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v2, v2, v2
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[4:7], v32, s[0:1] offset:112
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v1, v1, v1
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v0, v0, v0
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v11, v11, v11
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v10, v10, v10
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[12:15], v32, s[0:1] offset:48
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(1)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v7, v7, v7
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v6, v6, v6
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v5, v5, v5
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v4, v4, v4
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v13, v13, v13
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v15, v15, v15
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[16:19], v32, s[0:1] offset:80
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v14, v14, v14
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v12, v12, v12
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[20:23], v32, s[0:1] offset:64
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[24:27], v32, s[0:1] offset:32
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(2)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v19, v19, v19
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v18, v18, v18
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(1)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v23, v23, v23
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v25, v25, v25
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v24, v24, v24
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[28:31], v32, s[0:1] offset:16
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v27, v27, v27
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v26, v26, v26
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v22, v22, v22
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v21, v21, v21
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v20, v20, v20
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v17, v17, v17
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v16, v16, v16
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v29, v29, v29
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v28, v28, v28
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v31, v31, v31
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v30, v30, v30
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[4:7], s[2:3] offset:112
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[8:11], s[2:3] offset:96
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[16:19], s[2:3] offset:80
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[20:23], s[2:3] offset:64
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[12:15], s[2:3] offset:48
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[24:27], s[2:3] offset:32
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[28:31], s[2:3] offset:16
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v32, v[0:3], s[2:3]
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000040) size(8) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x() #2
   %gep1 = getelementptr <32 x i32>, <32 x i32> addrspace(1)* %in, i32 %tid
   %load = load <32 x i32>, <32 x i32> addrspace(1)* %gep1
@@ -220,90 +370,178 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_alternating_READ_VA
 ; GCN-LABEL: test_sched_group_barrier_pipeline_alternating_READ_VALU_WRITE:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
 ; GCN-NEXT:    v_lshlrev_b32_e32 v16, 7, v0
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    global_load_dwordx4 v[0:3], v16, s[0:1] offset:80
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    global_load_dwordx4 v[0:3], v16, s[0:1]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_mul_lo_u32 v3, v3, v3
 ; GCN-NEXT:    v_mul_lo_u32 v2, v2, v2
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
-; GCN-NEXT:    global_load_dwordx4 v[4:7], v16, s[0:1] offset:64
-; GCN-NEXT:    global_load_dwordx4 v[8:11], v16, s[0:1] offset:96
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
 ; GCN-NEXT:    v_mul_lo_u32 v1, v1, v1
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, v0
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; GCN-NEXT:    global_store_dwordx4 v16, v[0:3], s[2:3]
+; GCN-NEXT:    global_load_dwordx4 v[0:3], v16, s[0:1] offset:112
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mul_lo_u32 v3, v3, v3
+; GCN-NEXT:    v_mul_lo_u32 v2, v2, v2
+; GCN-NEXT:    v_mul_lo_u32 v1, v1, v1
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, v0
+; GCN-NEXT:    global_store_dwordx4 v16, v[0:3], s[2:3] offset:112
+; GCN-NEXT:    global_load_dwordx4 v[0:3], v16, s[0:1] offset:96
+; GCN-NEXT:    s_nop 0
+; GCN-NEXT:    global_load_dwordx4 v[4:7], v16, s[0:1] offset:48
 ; GCN-NEXT:    s_waitcnt vmcnt(1)
+; GCN-NEXT:    v_mul_lo_u32 v3, v3, v3
+; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_mul_lo_u32 v7, v7, v7
-; GCN-NEXT:    v_mul_lo_u32 v6, v6, v6
 ; GCN-NEXT:    v_mul_lo_u32 v5, v5, v5
 ; GCN-NEXT:    v_mul_lo_u32 v4, v4, v4
-; GCN-NEXT:    global_store_dwordx4 v16, v[4:7], s[2:3] offset:64
+; GCN-NEXT:    v_mul_lo_u32 v6, v6, v6
+; GCN-NEXT:    global_store_dwordx4 v16, v[4:7], s[2:3] offset:48
+; GCN-NEXT:    global_load_dwordx4 v[8:11], v16, s[0:1] offset:16
+; GCN-NEXT:    v_mul_lo_u32 v2, v2, v2
+; GCN-NEXT:    v_mul_lo_u32 v1, v1, v1
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, v0
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
-; GCN-NEXT:    global_load_dwordx4 v[4:7], v16, s[0:1] offset:48
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
-; GCN-NEXT:    s_waitcnt vmcnt(2)
-; GCN-NEXT:    v_mul_lo_u32 v11, v11, v11
-; GCN-NEXT:    v_mul_lo_u32 v10, v10, v10
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_lo_u32 v7, v7, v7
-; GCN-NEXT:    v_mul_lo_u32 v6, v6, v6
-; GCN-NEXT:    v_mul_lo_u32 v5, v5, v5
-; GCN-NEXT:    v_mul_lo_u32 v4, v4, v4
-; GCN-NEXT:    global_store_dwordx4 v16, v[4:7], s[2:3] offset:48
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
-; GCN-NEXT:    global_load_dwordx4 v[4:7], v16, s[0:1] offset:32
-; GCN-NEXT:    s_nop 0
-; GCN-NEXT:    global_load_dwordx4 v[12:15], v16, s[0:1] offset:112
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
+; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_mul_lo_u32 v9, v9, v9
 ; GCN-NEXT:    v_mul_lo_u32 v8, v8, v8
+; GCN-NEXT:    v_mul_lo_u32 v11, v11, v11
+; GCN-NEXT:    v_mul_lo_u32 v10, v10, v10
+; GCN-NEXT:    global_store_dwordx4 v16, v[8:11], s[2:3] offset:16
+; GCN-NEXT:    global_load_dwordx4 v[8:11], v16, s[0:1] offset:80
+; GCN-NEXT:    s_nop 0
+; GCN-NEXT:    global_load_dwordx4 v[4:7], v16, s[0:1] offset:64
+; GCN-NEXT:    global_load_dwordx4 v[12:15], v16, s[0:1] offset:32
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
-; GCN-NEXT:    global_store_dwordx4 v16, v[8:11], s[2:3] offset:96
-; GCN-NEXT:    s_waitcnt vmcnt(2)
-; GCN-NEXT:    v_mul_lo_u32 v7, v7, v7
-; GCN-NEXT:    v_mul_lo_u32 v6, v6, v6
-; GCN-NEXT:    v_mul_lo_u32 v5, v5, v5
-; GCN-NEXT:    v_mul_lo_u32 v4, v4, v4
-; GCN-NEXT:    global_store_dwordx4 v16, v[4:7], s[2:3] offset:32
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
-; GCN-NEXT:    global_load_dwordx4 v[4:7], v16, s[0:1] offset:16
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
-; GCN-NEXT:    s_waitcnt vmcnt(3)
-; GCN-NEXT:    v_mul_lo_u32 v15, v15, v15
-; GCN-NEXT:    v_mul_lo_u32 v14, v14, v14
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; GCN-NEXT:    s_waitcnt vmcnt(2)
+; GCN-NEXT:    v_mul_lo_u32 v11, v11, v11
+; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_mul_lo_u32 v13, v13, v13
 ; GCN-NEXT:    v_mul_lo_u32 v12, v12, v12
-; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mul_lo_u32 v15, v15, v15
+; GCN-NEXT:    v_mul_lo_u32 v14, v14, v14
+; GCN-NEXT:    v_mul_lo_u32 v10, v10, v10
+; GCN-NEXT:    v_mul_lo_u32 v9, v9, v9
+; GCN-NEXT:    v_mul_lo_u32 v8, v8, v8
 ; GCN-NEXT:    v_mul_lo_u32 v7, v7, v7
 ; GCN-NEXT:    v_mul_lo_u32 v6, v6, v6
 ; GCN-NEXT:    v_mul_lo_u32 v5, v5, v5
 ; GCN-NEXT:    v_mul_lo_u32 v4, v4, v4
-; GCN-NEXT:    global_store_dwordx4 v16, v[4:7], s[2:3] offset:16
+; GCN-NEXT:    global_store_dwordx4 v16, v[12:15], s[2:3] offset:32
+; GCN-NEXT:    global_store_dwordx4 v16, v[8:11], s[2:3] offset:80
+; GCN-NEXT:    global_store_dwordx4 v16, v[4:7], s[2:3] offset:64
+; GCN-NEXT:    global_store_dwordx4 v16, v[0:3], s[2:3] offset:96
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
-; GCN-NEXT:    global_load_dwordx4 v[4:7], v16, s[0:1]
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_mul_lo_u32 v7, v7, v7
-; GCN-NEXT:    global_store_dwordx4 v16, v[12:15], s[2:3] offset:112
-; GCN-NEXT:    v_mul_lo_u32 v6, v6, v6
-; GCN-NEXT:    v_mul_lo_u32 v5, v5, v5
-; GCN-NEXT:    v_mul_lo_u32 v4, v4, v4
-; GCN-NEXT:    global_store_dwordx4 v16, v[0:3], s[2:3] offset:80
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
-; GCN-NEXT:    global_store_dwordx4 v16, v[4:7], s[2:3]
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
 ; GCN-NEXT:    s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_alternating_READ_VALU_WRITE:
+; EXACTCUTOFF:       ; %bb.0:
+; EXACTCUTOFF-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; EXACTCUTOFF-NEXT:    v_lshlrev_b32_e32 v16, 7, v0
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[0:3], v16, s[0:1]
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v3, v3, v3
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v2, v2, v2
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v1, v1, v1
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v0, v0, v0
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v16, v[0:3], s[2:3]
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[0:3], v16, s[0:1] offset:112
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v3, v3, v3
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v2, v2, v2
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v1, v1, v1
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v0, v0, v0
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v16, v[0:3], s[2:3] offset:112
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[0:3], v16, s[0:1] offset:96
+; EXACTCUTOFF-NEXT:    s_nop 0
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[4:7], v16, s[0:1] offset:48
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(1)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v3, v3, v3
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v7, v7, v7
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v5, v5, v5
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v4, v4, v4
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v6, v6, v6
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v16, v[4:7], s[2:3] offset:48
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[8:11], v16, s[0:1] offset:16
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v2, v2, v2
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v1, v1, v1
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v0, v0, v0
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v9, v9, v9
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v8, v8, v8
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v11, v11, v11
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v10, v10, v10
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v16, v[8:11], s[2:3] offset:16
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[8:11], v16, s[0:1] offset:80
+; EXACTCUTOFF-NEXT:    s_nop 0
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[4:7], v16, s[0:1] offset:64
+; EXACTCUTOFF-NEXT:    global_load_dwordx4 v[12:15], v16, s[0:1] offset:32
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(2)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v11, v11, v11
+; EXACTCUTOFF-NEXT:    s_waitcnt vmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v13, v13, v13
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v12, v12, v12
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v15, v15, v15
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v14, v14, v14
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v10, v10, v10
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v9, v9, v9
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v8, v8, v8
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v7, v7, v7
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v6, v6, v6
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v5, v5, v5
+; EXACTCUTOFF-NEXT:    v_mul_lo_u32 v4, v4, v4
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v16, v[12:15], s[2:3] offset:32
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v16, v[8:11], s[2:3] offset:80
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v16, v[4:7], s[2:3] offset:64
+; EXACTCUTOFF-NEXT:    global_store_dwordx4 v16, v[0:3], s[2:3] offset:96
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000020) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000002) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000040) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x() #2
   %gep1 = getelementptr <32 x i32>, <32 x i32> addrspace(1)* %in, i32 %tid
   %load = load <32 x i32>, <32 x i32> addrspace(1)* %gep1
@@ -365,113 +603,228 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_cluster(<32 x
 ; GCN-LABEL: test_sched_group_barrier_pipeline_MFMA_cluster:
 ; GCN:       ; %bb.0: ; %entry
 ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GCN-NEXT:    v_lshlrev_b32_e32 v35, 7, v0
-; GCN-NEXT:    v_mov_b32_e32 v33, 1.0
-; GCN-NEXT:    v_mov_b32_e32 v34, 2.0
+; GCN-NEXT:    v_lshlrev_b32_e32 v99, 7, v0
+; GCN-NEXT:    v_mov_b32_e32 v96, 1.0
+; GCN-NEXT:    v_mov_b32_e32 v97, 2.0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_add_u32_e32 v32, s0, v35
-; GCN-NEXT:    ds_read_b128 v[28:31], v32 offset:112
-; GCN-NEXT:    ds_read_b128 v[24:27], v32 offset:96
-; GCN-NEXT:    ds_read_b128 v[20:23], v32 offset:80
-; GCN-NEXT:    ds_read_b128 v[16:19], v32 offset:64
-; GCN-NEXT:    ds_read_b128 v[0:3], v32
-; GCN-NEXT:    ds_read_b128 v[4:7], v32 offset:16
-; GCN-NEXT:    ds_read_b128 v[8:11], v32 offset:32
-; GCN-NEXT:    ds_read_b128 v[12:15], v32 offset:48
-; GCN-NEXT:    ds_read_b128 v[64:67], v32 offset:8304
-; GCN-NEXT:    ds_read_b128 v[60:63], v32 offset:8288
-; GCN-NEXT:    ds_read_b128 v[56:59], v32 offset:8272
-; GCN-NEXT:    ds_read_b128 v[52:55], v32 offset:8256
-; GCN-NEXT:    ds_read_b128 v[48:51], v32 offset:8240
-; GCN-NEXT:    ds_read_b128 v[44:47], v32 offset:8224
-; GCN-NEXT:    ds_read_b128 v[40:43], v32 offset:8208
-; GCN-NEXT:    ds_read_b128 v[36:39], v32 offset:8192
-; GCN-NEXT:    ds_read_b128 v[96:99], v32 offset:24688
-; GCN-NEXT:    ds_read_b128 v[92:95], v32 offset:24672
-; GCN-NEXT:    ds_read_b128 v[88:91], v32 offset:24656
-; GCN-NEXT:    ds_read_b128 v[84:87], v32 offset:24640
-; GCN-NEXT:    ds_read_b128 v[80:83], v32 offset:24624
-; GCN-NEXT:    ds_read_b128 v[76:79], v32 offset:24608
-; GCN-NEXT:    ds_read_b128 v[72:75], v32 offset:24592
-; GCN-NEXT:    ds_read_b128 v[68:71], v32 offset:24576
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(40) SyncID(0)
-; GCN-NEXT:    v_add_u32_e32 v35, s1, v35
+; GCN-NEXT:    v_add_u32_e32 v98, s0, v99
+; GCN-NEXT:    ds_read_b128 v[28:31], v98 offset:112
+; GCN-NEXT:    ds_read_b128 v[24:27], v98 offset:96
+; GCN-NEXT:    ds_read_b128 v[20:23], v98 offset:80
+; GCN-NEXT:    ds_read_b128 v[16:19], v98 offset:64
+; GCN-NEXT:    ds_read_b128 v[0:3], v98
+; GCN-NEXT:    ds_read_b128 v[4:7], v98 offset:16
+; GCN-NEXT:    ds_read_b128 v[8:11], v98 offset:32
+; GCN-NEXT:    ds_read_b128 v[12:15], v98 offset:48
+; GCN-NEXT:    ds_read_b128 v[60:63], v98 offset:8304
+; GCN-NEXT:    ds_read_b128 v[56:59], v98 offset:8288
+; GCN-NEXT:    ds_read_b128 v[52:55], v98 offset:8272
+; GCN-NEXT:    ds_read_b128 v[48:51], v98 offset:8256
+; GCN-NEXT:    ds_read_b128 v[44:47], v98 offset:8240
+; GCN-NEXT:    ds_read_b128 v[40:43], v98 offset:8224
+; GCN-NEXT:    ds_read_b128 v[36:39], v98 offset:8208
+; GCN-NEXT:    ds_read_b128 v[32:35], v98 offset:8192
+; GCN-NEXT:    ds_read_b128 v[92:95], v98 offset:24688
+; GCN-NEXT:    ds_read_b128 v[88:91], v98 offset:24672
+; GCN-NEXT:    ds_read_b128 v[84:87], v98 offset:24656
+; GCN-NEXT:    ds_read_b128 v[80:83], v98 offset:24640
+; GCN-NEXT:    ds_read_b128 v[76:79], v98 offset:24624
+; GCN-NEXT:    ds_read_b128 v[72:75], v98 offset:24608
+; GCN-NEXT:    ds_read_b128 v[68:71], v98 offset:24592
+; GCN-NEXT:    ds_read_b128 v[64:67], v98 offset:24576
+; GCN-NEXT:    v_add_u32_e32 v99, s1, v99
 ; GCN-NEXT:    s_waitcnt lgkmcnt(14)
-; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v33, v34, v[0:31]
-; GCN-NEXT:    v_add_u32_e32 v100, 0x6000, v32
-; GCN-NEXT:    s_waitcnt lgkmcnt(8)
-; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v33, v34, v[36:67]
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[68:99], v33, v34, v[68:99]
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(5) SyncID(0)
+; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v96, v97, v[0:31]
+; GCN-NEXT:    v_add_u32_e32 v100, 0x6000, v98
 ; GCN-NEXT:    s_nop 7
-; GCN-NEXT:    s_nop 5
-; GCN-NEXT:    ds_write_b128 v35, v[28:31] offset:112
-; GCN-NEXT:    ds_write_b128 v35, v[24:27] offset:96
-; GCN-NEXT:    ds_write_b128 v35, v[20:23] offset:80
-; GCN-NEXT:    ds_write_b128 v35, v[16:19] offset:64
-; GCN-NEXT:    ds_write_b128 v35, v[12:15] offset:48
-; GCN-NEXT:    ds_write_b128 v35, v[8:11] offset:32
-; GCN-NEXT:    ds_write_b128 v35, v[4:7] offset:16
-; GCN-NEXT:    ds_write_b128 v35, v[0:3]
-; GCN-NEXT:    ds_read_b128 v[28:31], v32 offset:49264
-; GCN-NEXT:    ds_read_b128 v[24:27], v32 offset:49248
-; GCN-NEXT:    ds_read_b128 v[20:23], v32 offset:49232
-; GCN-NEXT:    ds_read_b128 v[16:19], v32 offset:49216
-; GCN-NEXT:    ds_read_b128 v[12:15], v32 offset:49200
-; GCN-NEXT:    ds_read_b128 v[8:11], v32 offset:49184
-; GCN-NEXT:    ds_read_b128 v[4:7], v32 offset:49168
-; GCN-NEXT:    ds_read_b128 v[0:3], v32 offset:49152
-; GCN-NEXT:    v_mov_b32_e32 v32, s1
-; GCN-NEXT:    ds_write_b128 v32, v[60:63] offset:8288
-; GCN-NEXT:    ds_write_b128 v32, v[64:67] offset:8304
-; GCN-NEXT:    ds_write_b128 v32, v[52:55] offset:8256
-; GCN-NEXT:    ds_write_b128 v32, v[56:59] offset:8272
-; GCN-NEXT:    ds_write_b128 v32, v[44:47] offset:8224
-; GCN-NEXT:    ds_write_b128 v32, v[48:51] offset:8240
-; GCN-NEXT:    ds_write_b128 v32, v[36:39] offset:8192
-; GCN-NEXT:    ds_write_b128 v32, v[40:43] offset:8208
-; GCN-NEXT:    ds_read_b128 v[64:67], v100 offset:57456
-; GCN-NEXT:    ds_read_b128 v[60:63], v100 offset:57440
-; GCN-NEXT:    ds_read_b128 v[56:59], v100 offset:57424
-; GCN-NEXT:    ds_read_b128 v[52:55], v100 offset:57408
-; GCN-NEXT:    ds_read_b128 v[36:39], v100 offset:57344
-; GCN-NEXT:    ds_read_b128 v[40:43], v100 offset:57360
-; GCN-NEXT:    ds_read_b128 v[44:47], v100 offset:57376
-; GCN-NEXT:    ds_read_b128 v[48:51], v100 offset:57392
-; GCN-NEXT:    ds_write_b128 v32, v[92:95] offset:16480
-; GCN-NEXT:    ds_write_b128 v32, v[96:99] offset:16496
-; GCN-NEXT:    ds_write_b128 v32, v[84:87] offset:16448
-; GCN-NEXT:    ds_write_b128 v32, v[88:91] offset:16464
-; GCN-NEXT:    ds_write_b128 v32, v[76:79] offset:16416
-; GCN-NEXT:    ds_write_b128 v32, v[80:83] offset:16432
-; GCN-NEXT:    ds_write_b128 v32, v[68:71] offset:16384
-; GCN-NEXT:    ds_write_b128 v32, v[72:75] offset:16400
+; GCN-NEXT:    s_nop 7
+; GCN-NEXT:    s_nop 1
+; GCN-NEXT:    ds_write_b128 v99, v[28:31] offset:112
+; GCN-NEXT:    ds_write_b128 v99, v[24:27] offset:96
+; GCN-NEXT:    ds_write_b128 v99, v[20:23] offset:80
+; GCN-NEXT:    ds_write_b128 v99, v[16:19] offset:64
+; GCN-NEXT:    ds_write_b128 v99, v[12:15] offset:48
+; GCN-NEXT:    ds_write_b128 v99, v[8:11] offset:32
+; GCN-NEXT:    ds_write_b128 v99, v[4:7] offset:16
+; GCN-NEXT:    ds_write_b128 v99, v[0:3]
+; GCN-NEXT:    ds_read_b128 v[28:31], v98 offset:49264
+; GCN-NEXT:    ds_read_b128 v[24:27], v98 offset:49248
+; GCN-NEXT:    ds_read_b128 v[20:23], v98 offset:49232
+; GCN-NEXT:    ds_read_b128 v[16:19], v98 offset:49216
+; GCN-NEXT:    ds_read_b128 v[12:15], v98 offset:49200
+; GCN-NEXT:    ds_read_b128 v[8:11], v98 offset:49184
+; GCN-NEXT:    ds_read_b128 v[4:7], v98 offset:49168
+; GCN-NEXT:    ds_read_b128 v[0:3], v98 offset:49152
 ; GCN-NEXT:    s_waitcnt lgkmcnt(14)
-; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v33, v34, v[0:31]
-; GCN-NEXT:    s_waitcnt lgkmcnt(8)
-; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v33, v34, v[36:67]
+; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[32:63], v96, v97, v[32:63]
+; GCN-NEXT:    v_mov_b32_e32 v98, s1
 ; GCN-NEXT:    s_nop 7
 ; GCN-NEXT:    s_nop 7
-; GCN-NEXT:    s_nop 0
-; GCN-NEXT:    ds_write_b128 v32, v[24:27] offset:24672
-; GCN-NEXT:    ds_write_b128 v32, v[28:31] offset:24688
-; GCN-NEXT:    ds_write_b128 v32, v[16:19] offset:24640
-; GCN-NEXT:    ds_write_b128 v32, v[20:23] offset:24656
-; GCN-NEXT:    ds_write_b128 v32, v[8:11] offset:24608
-; GCN-NEXT:    ds_write_b128 v32, v[12:15] offset:24624
-; GCN-NEXT:    ds_write_b128 v32, v[0:3] offset:24576
-; GCN-NEXT:    ds_write_b128 v32, v[4:7] offset:24592
-; GCN-NEXT:    ds_write_b128 v32, v[60:63] offset:32864
-; GCN-NEXT:    ds_write_b128 v32, v[64:67] offset:32880
-; GCN-NEXT:    ds_write_b128 v32, v[52:55] offset:32832
-; GCN-NEXT:    ds_write_b128 v32, v[56:59] offset:32848
-; GCN-NEXT:    ds_write_b128 v32, v[44:47] offset:32800
-; GCN-NEXT:    ds_write_b128 v32, v[48:51] offset:32816
-; GCN-NEXT:    ds_write_b128 v32, v[36:39] offset:32768
-; GCN-NEXT:    ds_write_b128 v32, v[40:43] offset:32784
+; GCN-NEXT:    s_nop 1
+; GCN-NEXT:    ds_write_b128 v98, v[56:59] offset:8288
+; GCN-NEXT:    ds_write_b128 v98, v[60:63] offset:8304
+; GCN-NEXT:    ds_write_b128 v98, v[48:51] offset:8256
+; GCN-NEXT:    ds_write_b128 v98, v[52:55] offset:8272
+; GCN-NEXT:    ds_write_b128 v98, v[40:43] offset:8224
+; GCN-NEXT:    ds_write_b128 v98, v[44:47] offset:8240
+; GCN-NEXT:    ds_write_b128 v98, v[32:35] offset:8192
+; GCN-NEXT:    ds_write_b128 v98, v[36:39] offset:8208
+; GCN-NEXT:    ds_read_b128 v[60:63], v100 offset:57456
+; GCN-NEXT:    ds_read_b128 v[56:59], v100 offset:57440
+; GCN-NEXT:    ds_read_b128 v[52:55], v100 offset:57424
+; GCN-NEXT:    ds_read_b128 v[48:51], v100 offset:57408
+; GCN-NEXT:    ds_read_b128 v[32:35], v100 offset:57344
+; GCN-NEXT:    ds_read_b128 v[36:39], v100 offset:57360
+; GCN-NEXT:    ds_read_b128 v[40:43], v100 offset:57376
+; GCN-NEXT:    ds_read_b128 v[44:47], v100 offset:57392
+; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[64:95], v96, v97, v[64:95]
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(40) SyncID(0)
+; GCN-NEXT:    s_waitcnt lgkmcnt(14)
+; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v96, v97, v[0:31]
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[32:63], v96, v97, v[32:63]
+; GCN-NEXT:    s_nop 7
+; GCN-NEXT:    s_nop 6
+; GCN-NEXT:    ds_write_b128 v98, v[88:91] offset:16480
+; GCN-NEXT:    ds_write_b128 v98, v[92:95] offset:16496
+; GCN-NEXT:    ds_write_b128 v98, v[80:83] offset:16448
+; GCN-NEXT:    ds_write_b128 v98, v[84:87] offset:16464
+; GCN-NEXT:    ds_write_b128 v98, v[72:75] offset:16416
+; GCN-NEXT:    ds_write_b128 v98, v[76:79] offset:16432
+; GCN-NEXT:    ds_write_b128 v98, v[64:67] offset:16384
+; GCN-NEXT:    ds_write_b128 v98, v[68:71] offset:16400
+; GCN-NEXT:    ds_write_b128 v98, v[24:27] offset:24672
+; GCN-NEXT:    ds_write_b128 v98, v[28:31] offset:24688
+; GCN-NEXT:    ds_write_b128 v98, v[16:19] offset:24640
+; GCN-NEXT:    ds_write_b128 v98, v[20:23] offset:24656
+; GCN-NEXT:    ds_write_b128 v98, v[8:11] offset:24608
+; GCN-NEXT:    ds_write_b128 v98, v[12:15] offset:24624
+; GCN-NEXT:    ds_write_b128 v98, v[0:3] offset:24576
+; GCN-NEXT:    ds_write_b128 v98, v[4:7] offset:24592
+; GCN-NEXT:    ds_write_b128 v98, v[56:59] offset:32864
+; GCN-NEXT:    ds_write_b128 v98, v[60:63] offset:32880
+; GCN-NEXT:    ds_write_b128 v98, v[48:51] offset:32832
+; GCN-NEXT:    ds_write_b128 v98, v[52:55] offset:32848
+; GCN-NEXT:    ds_write_b128 v98, v[40:43] offset:32800
+; GCN-NEXT:    ds_write_b128 v98, v[44:47] offset:32816
+; GCN-NEXT:    ds_write_b128 v98, v[32:35] offset:32768
+; GCN-NEXT:    ds_write_b128 v98, v[36:39] offset:32784
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(5) SyncID(0)
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(40) SyncID(0)
 ; GCN-NEXT:    s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_MFMA_cluster:
+; EXACTCUTOFF:       ; %bb.0: ; %entry
+; EXACTCUTOFF-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; EXACTCUTOFF-NEXT:    v_lshlrev_b32_e32 v99, 7, v0
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v96, 1.0
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v97, 2.0
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_add_u32_e32 v98, s0, v99
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[28:31], v98 offset:112
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[24:27], v98 offset:96
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[20:23], v98 offset:80
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[16:19], v98 offset:64
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[0:3], v98
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[4:7], v98 offset:16
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[8:11], v98 offset:32
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[12:15], v98 offset:48
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[60:63], v98 offset:8304
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[56:59], v98 offset:8288
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[52:55], v98 offset:8272
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[48:51], v98 offset:8256
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[44:47], v98 offset:8240
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[40:43], v98 offset:8224
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[36:39], v98 offset:8208
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[32:35], v98 offset:8192
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[92:95], v98 offset:24688
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[88:91], v98 offset:24672
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[84:87], v98 offset:24656
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[80:83], v98 offset:24640
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[76:79], v98 offset:24624
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[72:75], v98 offset:24608
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[68:71], v98 offset:24592
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[64:67], v98 offset:24576
+; EXACTCUTOFF-NEXT:    v_add_u32_e32 v99, s1, v99
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(14)
+; EXACTCUTOFF-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v96, v97, v[0:31]
+; EXACTCUTOFF-NEXT:    v_add_u32_e32 v100, 0x6000, v98
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 1
+; EXACTCUTOFF-NEXT:    ds_write_b128 v99, v[28:31] offset:112
+; EXACTCUTOFF-NEXT:    ds_write_b128 v99, v[24:27] offset:96
+; EXACTCUTOFF-NEXT:    ds_write_b128 v99, v[20:23] offset:80
+; EXACTCUTOFF-NEXT:    ds_write_b128 v99, v[16:19] offset:64
+; EXACTCUTOFF-NEXT:    ds_write_b128 v99, v[12:15] offset:48
+; EXACTCUTOFF-NEXT:    ds_write_b128 v99, v[8:11] offset:32
+; EXACTCUTOFF-NEXT:    ds_write_b128 v99, v[4:7] offset:16
+; EXACTCUTOFF-NEXT:    ds_write_b128 v99, v[0:3]
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[28:31], v98 offset:49264
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[24:27], v98 offset:49248
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[20:23], v98 offset:49232
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[16:19], v98 offset:49216
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[12:15], v98 offset:49200
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[8:11], v98 offset:49184
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[4:7], v98 offset:49168
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[0:3], v98 offset:49152
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(14)
+; EXACTCUTOFF-NEXT:    v_mfma_f32_32x32x1f32 v[32:63], v96, v97, v[32:63]
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v98, s1
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 1
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[56:59] offset:8288
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[60:63] offset:8304
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[48:51] offset:8256
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[52:55] offset:8272
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[40:43] offset:8224
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[44:47] offset:8240
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[32:35] offset:8192
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[36:39] offset:8208
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[60:63], v100 offset:57456
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[56:59], v100 offset:57440
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[52:55], v100 offset:57424
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[48:51], v100 offset:57408
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[32:35], v100 offset:57344
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[36:39], v100 offset:57360
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[40:43], v100 offset:57376
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[44:47], v100 offset:57392
+; EXACTCUTOFF-NEXT:    v_mfma_f32_32x32x1f32 v[64:95], v96, v97, v[64:95]
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(40) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(14)
+; EXACTCUTOFF-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v96, v97, v[0:31]
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mfma_f32_32x32x1f32 v[32:63], v96, v97, v[32:63]
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 6
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[88:91] offset:16480
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[92:95] offset:16496
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[80:83] offset:16448
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[84:87] offset:16464
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[72:75] offset:16416
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[76:79] offset:16432
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[64:67] offset:16384
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[68:71] offset:16400
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[24:27] offset:24672
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[28:31] offset:24688
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[16:19] offset:24640
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[20:23] offset:24656
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[8:11] offset:24608
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[12:15] offset:24624
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[0:3] offset:24576
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[4:7] offset:24592
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[56:59] offset:32864
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[60:63] offset:32880
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[48:51] offset:32832
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[52:55] offset:32848
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[40:43] offset:32800
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[44:47] offset:32816
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[32:35] offset:32768
+; EXACTCUTOFF-NEXT:    ds_write_b128 v98, v[36:39] offset:32784
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(5) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(40) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_endpgm
 entry:
   %idx = call i32 @llvm.amdgcn.workitem.id.x()
   %load.0.addr = getelementptr <32 x float>, <32 x float> addrspace(3)* %in, i32 %idx
@@ -512,23 +865,11 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(<32
 ; GCN-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave:
 ; GCN:       ; %bb.0: ; %entry
 ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GCN-NEXT:    v_lshlrev_b32_e32 v35, 7, v0
-; GCN-NEXT:    v_mov_b32_e32 v33, 1.0
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
-; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
-; GCN-NEXT:    v_mov_b32_e32 v34, 2.0
+; GCN-NEXT:    v_lshlrev_b32_e32 v33, 7, v0
+; GCN-NEXT:    v_mov_b32_e32 v34, 1.0
+; GCN-NEXT:    v_mov_b32_e32 v35, 2.0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_add_u32_e32 v32, s0, v35
+; GCN-NEXT:    v_add_u32_e32 v32, s0, v33
 ; GCN-NEXT:    ds_read_b128 v[28:31], v32 offset:112
 ; GCN-NEXT:    ds_read_b128 v[24:27], v32 offset:96
 ; GCN-NEXT:    ds_read_b128 v[20:23], v32 offset:80
@@ -537,110 +878,255 @@ define amdgpu_kernel void @test_sched_group_barrier_pipeline_MFMA_interleave(<32
 ; GCN-NEXT:    ds_read_b128 v[4:7], v32 offset:16
 ; GCN-NEXT:    ds_read_b128 v[8:11], v32 offset:32
 ; GCN-NEXT:    ds_read_b128 v[12:15], v32 offset:48
+; GCN-NEXT:    v_add_u32_e32 v33, s1, v33
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
-; GCN-NEXT:    v_add_u32_e32 v35, s1, v35
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v33, v34, v[0:31]
+; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v34, v35, v[0:31]
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
 ; GCN-NEXT:    s_nop 7
 ; GCN-NEXT:    s_nop 7
 ; GCN-NEXT:    s_nop 2
-; GCN-NEXT:    ds_write_b128 v35, v[28:31] offset:112
-; GCN-NEXT:    ds_write_b128 v35, v[24:27] offset:96
-; GCN-NEXT:    ds_write_b128 v35, v[20:23] offset:80
-; GCN-NEXT:    ds_write_b128 v35, v[16:19] offset:64
-; GCN-NEXT:    ds_write_b128 v35, v[12:15] offset:48
-; GCN-NEXT:    ds_write_b128 v35, v[8:11] offset:32
-; GCN-NEXT:    ds_write_b128 v35, v[4:7] offset:16
-; GCN-NEXT:    ds_write_b128 v35, v[0:3]
-; GCN-NEXT:    ds_read_b128 v[28:31], v32 offset:8304
-; GCN-NEXT:    ds_read_b128 v[24:27], v32 offset:8288
-; GCN-NEXT:    ds_read_b128 v[20:23], v32 offset:8272
-; GCN-NEXT:    ds_read_b128 v[16:19], v32 offset:8256
-; GCN-NEXT:    ds_read_b128 v[12:15], v32 offset:8240
-; GCN-NEXT:    ds_read_b128 v[8:11], v32 offset:8224
-; GCN-NEXT:    ds_read_b128 v[4:7], v32 offset:8208
-; GCN-NEXT:    ds_read_b128 v[0:3], v32 offset:8192
-; GCN-NEXT:    v_mov_b32_e32 v35, s1
+; GCN-NEXT:    ds_write_b128 v33, v[28:31] offset:112
+; GCN-NEXT:    ds_write_b128 v33, v[24:27] offset:96
+; GCN-NEXT:    ds_write_b128 v33, v[20:23] offset:80
+; GCN-NEXT:    ds_write_b128 v33, v[16:19] offset:64
+; GCN-NEXT:    ds_write_b128 v33, v[12:15] offset:48
+; GCN-NEXT:    ds_write_b128 v33, v[8:11] offset:32
+; GCN-NEXT:    ds_write_b128 v33, v[4:7] offset:16
+; GCN-NEXT:    ds_write_b128 v33, v[0:3]
+; GCN-NEXT:    ds_read_b128 v[64:67], v32 offset:8304
+; GCN-NEXT:    ds_read_b128 v[60:63], v32 offset:8288
+; GCN-NEXT:    ds_read_b128 v[56:59], v32 offset:8272
+; GCN-NEXT:    ds_read_b128 v[52:55], v32 offset:8256
+; GCN-NEXT:    ds_read_b128 v[48:51], v32 offset:8240
+; GCN-NEXT:    ds_read_b128 v[44:47], v32 offset:8224
+; GCN-NEXT:    ds_read_b128 v[40:43], v32 offset:8208
+; GCN-NEXT:    ds_read_b128 v[36:39], v32 offset:8192
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_add_u32_e32 v1, 0x6000, v32
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v33, v34, v[0:31]
+; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v34, v35, v[36:67]
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
 ; GCN-NEXT:    s_nop 7
 ; GCN-NEXT:    s_nop 7
 ; GCN-NEXT:    s_nop 2
-; GCN-NEXT:    ds_write_b128 v35, v[24:27] offset:8288
-; GCN-NEXT:    ds_write_b128 v35, v[28:31] offset:8304
-; GCN-NEXT:    ds_write_b128 v35, v[16:19] offset:8256
-; GCN-NEXT:    ds_write_b128 v35, v[20:23] offset:8272
-; GCN-NEXT:    ds_write_b128 v35, v[8:11] offset:8224
-; GCN-NEXT:    ds_write_b128 v35, v[12:15] offset:8240
-; GCN-NEXT:    ds_write_b128 v35, v[0:3] offset:8192
-; GCN-NEXT:    ds_write_b128 v35, v[4:7] offset:8208
-; GCN-NEXT:    ds_read_b128 v[28:31], v32 offset:24688
-; GCN-NEXT:    ds_read_b128 v[24:27], v32 offset:24672
-; GCN-NEXT:    ds_read_b128 v[20:23], v32 offset:24656
-; GCN-NEXT:    ds_read_b128 v[16:19], v32 offset:24640
-; GCN-NEXT:    ds_read_b128 v[12:15], v32 offset:24624
-; GCN-NEXT:    ds_read_b128 v[8:11], v32 offset:24608
-; GCN-NEXT:    ds_read_b128 v[4:7], v32 offset:24592
-; GCN-NEXT:    ds_read_b128 v[0:3], v32 offset:24576
+; GCN-NEXT:    ds_write_b128 v0, v[60:63] offset:8288
+; GCN-NEXT:    ds_write_b128 v0, v[64:67] offset:8304
+; GCN-NEXT:    ds_write_b128 v0, v[52:55] offset:8256
+; GCN-NEXT:    ds_write_b128 v0, v[56:59] offset:8272
+; GCN-NEXT:    ds_write_b128 v0, v[44:47] offset:8224
+; GCN-NEXT:    ds_write_b128 v0, v[48:51] offset:8240
+; GCN-NEXT:    ds_write_b128 v0, v[36:39] offset:8192
+; GCN-NEXT:    ds_write_b128 v0, v[40:43] offset:8208
+; GCN-NEXT:    ds_read_b128 v[64:67], v32 offset:24688
+; GCN-NEXT:    ds_read_b128 v[60:63], v32 offset:24672
+; GCN-NEXT:    ds_read_b128 v[56:59], v32 offset:24656
+; GCN-NEXT:    ds_read_b128 v[52:55], v32 offset:24640
+; GCN-NEXT:    ds_read_b128 v[48:51], v32 offset:24624
+; GCN-NEXT:    ds_read_b128 v[44:47], v32 offset:24608
+; GCN-NEXT:    ds_read_b128 v[40:43], v32 offset:24592
+; GCN-NEXT:    ds_read_b128 v[36:39], v32 offset:24576
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v33, v34, v[0:31]
+; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v34, v35, v[36:67]
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
 ; GCN-NEXT:    s_nop 7
 ; GCN-NEXT:    s_nop 7
 ; GCN-NEXT:    s_nop 2
-; GCN-NEXT:    ds_write_b128 v35, v[24:27] offset:16480
-; GCN-NEXT:    ds_write_b128 v35, v[28:31] offset:16496
-; GCN-NEXT:    ds_write_b128 v35, v[16:19] offset:16448
-; GCN-NEXT:    ds_write_b128 v35, v[20:23] offset:16464
-; GCN-NEXT:    ds_write_b128 v35, v[8:11] offset:16416
-; GCN-NEXT:    ds_write_b128 v35, v[12:15] offset:16432
-; GCN-NEXT:    ds_write_b128 v35, v[0:3] offset:16384
-; GCN-NEXT:    ds_write_b128 v35, v[4:7] offset:16400
-; GCN-NEXT:    ds_read_b128 v[28:31], v32 offset:49264
-; GCN-NEXT:    ds_read_b128 v[24:27], v32 offset:49248
-; GCN-NEXT:    ds_read_b128 v[20:23], v32 offset:49232
-; GCN-NEXT:    ds_read_b128 v[16:19], v32 offset:49216
-; GCN-NEXT:    ds_read_b128 v[12:15], v32 offset:49200
-; GCN-NEXT:    ds_read_b128 v[8:11], v32 offset:49184
-; GCN-NEXT:    ds_read_b128 v[4:7], v32 offset:49168
-; GCN-NEXT:    ds_read_b128 v[0:3], v32 offset:49152
-; GCN-NEXT:    v_add_u32_e32 v32, 0x6000, v32
+; GCN-NEXT:    ds_write_b128 v0, v[60:63] offset:16480
+; GCN-NEXT:    ds_write_b128 v0, v[64:67] offset:16496
+; GCN-NEXT:    ds_write_b128 v0, v[52:55] offset:16448
+; GCN-NEXT:    ds_write_b128 v0, v[56:59] offset:16464
+; GCN-NEXT:    ds_write_b128 v0, v[44:47] offset:16416
+; GCN-NEXT:    ds_write_b128 v0, v[48:51] offset:16432
+; GCN-NEXT:    ds_write_b128 v0, v[36:39] offset:16384
+; GCN-NEXT:    ds_write_b128 v0, v[40:43] offset:16400
+; GCN-NEXT:    ds_read_b128 v[64:67], v32 offset:49264
+; GCN-NEXT:    ds_read_b128 v[60:63], v32 offset:49248
+; GCN-NEXT:    ds_read_b128 v[56:59], v32 offset:49232
+; GCN-NEXT:    ds_read_b128 v[52:55], v32 offset:49216
+; GCN-NEXT:    ds_read_b128 v[48:51], v32 offset:49200
+; GCN-NEXT:    ds_read_b128 v[44:47], v32 offset:49184
+; GCN-NEXT:    ds_read_b128 v[40:43], v32 offset:49168
+; GCN-NEXT:    ds_read_b128 v[36:39], v32 offset:49152
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v33, v34, v[0:31]
+; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v34, v35, v[36:67]
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
 ; GCN-NEXT:    s_nop 7
 ; GCN-NEXT:    s_nop 7
 ; GCN-NEXT:    s_nop 2
-; GCN-NEXT:    ds_write_b128 v35, v[24:27] offset:24672
-; GCN-NEXT:    ds_write_b128 v35, v[28:31] offset:24688
-; GCN-NEXT:    ds_write_b128 v35, v[16:19] offset:24640
-; GCN-NEXT:    ds_write_b128 v35, v[20:23] offset:24656
-; GCN-NEXT:    ds_write_b128 v35, v[8:11] offset:24608
-; GCN-NEXT:    ds_write_b128 v35, v[12:15] offset:24624
-; GCN-NEXT:    ds_write_b128 v35, v[0:3] offset:24576
-; GCN-NEXT:    ds_write_b128 v35, v[4:7] offset:24592
-; GCN-NEXT:    ds_read_b128 v[28:31], v32 offset:57456
-; GCN-NEXT:    ds_read_b128 v[24:27], v32 offset:57440
-; GCN-NEXT:    ds_read_b128 v[20:23], v32 offset:57424
-; GCN-NEXT:    ds_read_b128 v[16:19], v32 offset:57408
-; GCN-NEXT:    ds_read_b128 v[0:3], v32 offset:57344
-; GCN-NEXT:    ds_read_b128 v[4:7], v32 offset:57360
-; GCN-NEXT:    ds_read_b128 v[8:11], v32 offset:57376
-; GCN-NEXT:    ds_read_b128 v[12:15], v32 offset:57392
+; GCN-NEXT:    ds_write_b128 v0, v[60:63] offset:24672
+; GCN-NEXT:    ds_write_b128 v0, v[64:67] offset:24688
+; GCN-NEXT:    ds_write_b128 v0, v[52:55] offset:24640
+; GCN-NEXT:    ds_write_b128 v0, v[56:59] offset:24656
+; GCN-NEXT:    ds_write_b128 v0, v[44:47] offset:24608
+; GCN-NEXT:    ds_write_b128 v0, v[48:51] offset:24624
+; GCN-NEXT:    ds_write_b128 v0, v[36:39] offset:24576
+; GCN-NEXT:    ds_write_b128 v0, v[40:43] offset:24592
+; GCN-NEXT:    ds_read_b128 v[30:33], v1 offset:57456
+; GCN-NEXT:    ds_read_b128 v[26:29], v1 offset:57440
+; GCN-NEXT:    ds_read_b128 v[22:25], v1 offset:57424
+; GCN-NEXT:    ds_read_b128 v[18:21], v1 offset:57408
+; GCN-NEXT:    ds_read_b128 v[2:5], v1 offset:57344
+; GCN-NEXT:    ds_read_b128 v[6:9], v1 offset:57360
+; GCN-NEXT:    ds_read_b128 v[10:13], v1 offset:57376
+; GCN-NEXT:    ds_read_b128 v[14:17], v1 offset:57392
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v33, v34, v[0:31]
+; GCN-NEXT:    v_mfma_f32_32x32x1f32 v[2:33], v34, v35, v[2:33]
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
 ; GCN-NEXT:    s_nop 7
 ; GCN-NEXT:    s_nop 7
 ; GCN-NEXT:    s_nop 2
-; GCN-NEXT:    ds_write_b128 v35, v[24:27] offset:32864
-; GCN-NEXT:    ds_write_b128 v35, v[28:31] offset:32880
-; GCN-NEXT:    ds_write_b128 v35, v[16:19] offset:32832
-; GCN-NEXT:    ds_write_b128 v35, v[20:23] offset:32848
-; GCN-NEXT:    ds_write_b128 v35, v[8:11] offset:32800
-; GCN-NEXT:    ds_write_b128 v35, v[12:15] offset:32816
-; GCN-NEXT:    ds_write_b128 v35, v[0:3] offset:32768
-; GCN-NEXT:    ds_write_b128 v35, v[4:7] offset:32784
+; GCN-NEXT:    ds_write_b128 v0, v[26:29] offset:32864
+; GCN-NEXT:    ds_write_b128 v0, v[30:33] offset:32880
+; GCN-NEXT:    ds_write_b128 v0, v[18:21] offset:32832
+; GCN-NEXT:    ds_write_b128 v0, v[22:25] offset:32848
+; GCN-NEXT:    ds_write_b128 v0, v[10:13] offset:32800
+; GCN-NEXT:    ds_write_b128 v0, v[14:17] offset:32816
+; GCN-NEXT:    ds_write_b128 v0, v[2:5] offset:32768
+; GCN-NEXT:    ds_write_b128 v0, v[6:9] offset:32784
 ; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
 ; GCN-NEXT:    s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_MFMA_interleave:
+; EXACTCUTOFF:       ; %bb.0: ; %entry
+; EXACTCUTOFF-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; EXACTCUTOFF-NEXT:    v_lshlrev_b32_e32 v33, 7, v0
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v34, 1.0
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v35, 2.0
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_add_u32_e32 v32, s0, v33
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[28:31], v32 offset:112
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[24:27], v32 offset:96
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[20:23], v32 offset:80
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[16:19], v32 offset:64
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[0:3], v32
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[4:7], v32 offset:16
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[8:11], v32 offset:32
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[12:15], v32 offset:48
+; EXACTCUTOFF-NEXT:    v_add_u32_e32 v33, s1, v33
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mfma_f32_32x32x1f32 v[0:31], v34, v35, v[0:31]
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 2
+; EXACTCUTOFF-NEXT:    ds_write_b128 v33, v[28:31] offset:112
+; EXACTCUTOFF-NEXT:    ds_write_b128 v33, v[24:27] offset:96
+; EXACTCUTOFF-NEXT:    ds_write_b128 v33, v[20:23] offset:80
+; EXACTCUTOFF-NEXT:    ds_write_b128 v33, v[16:19] offset:64
+; EXACTCUTOFF-NEXT:    ds_write_b128 v33, v[12:15] offset:48
+; EXACTCUTOFF-NEXT:    ds_write_b128 v33, v[8:11] offset:32
+; EXACTCUTOFF-NEXT:    ds_write_b128 v33, v[4:7] offset:16
+; EXACTCUTOFF-NEXT:    ds_write_b128 v33, v[0:3]
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[64:67], v32 offset:8304
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[60:63], v32 offset:8288
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[56:59], v32 offset:8272
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[52:55], v32 offset:8256
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[48:51], v32 offset:8240
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[44:47], v32 offset:8224
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[40:43], v32 offset:8208
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[36:39], v32 offset:8192
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v0, s1
+; EXACTCUTOFF-NEXT:    v_add_u32_e32 v1, 0x6000, v32
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v34, v35, v[36:67]
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 2
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[60:63] offset:8288
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[64:67] offset:8304
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[52:55] offset:8256
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[56:59] offset:8272
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[44:47] offset:8224
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[48:51] offset:8240
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[36:39] offset:8192
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[40:43] offset:8208
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[64:67], v32 offset:24688
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[60:63], v32 offset:24672
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[56:59], v32 offset:24656
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[52:55], v32 offset:24640
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[48:51], v32 offset:24624
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[44:47], v32 offset:24608
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[40:43], v32 offset:24592
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[36:39], v32 offset:24576
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v34, v35, v[36:67]
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 2
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[60:63] offset:16480
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[64:67] offset:16496
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[52:55] offset:16448
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[56:59] offset:16464
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[44:47] offset:16416
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[48:51] offset:16432
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[36:39] offset:16384
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[40:43] offset:16400
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[64:67], v32 offset:49264
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[60:63], v32 offset:49248
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[56:59], v32 offset:49232
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[52:55], v32 offset:49216
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[48:51], v32 offset:49200
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[44:47], v32 offset:49184
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[40:43], v32 offset:49168
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[36:39], v32 offset:49152
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mfma_f32_32x32x1f32 v[36:67], v34, v35, v[36:67]
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 2
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[60:63] offset:24672
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[64:67] offset:24688
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[52:55] offset:24640
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[56:59] offset:24656
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[44:47] offset:24608
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[48:51] offset:24624
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[36:39] offset:24576
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[40:43] offset:24592
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[30:33], v1 offset:57456
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[26:29], v1 offset:57440
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[22:25], v1 offset:57424
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[18:21], v1 offset:57408
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[2:5], v1 offset:57344
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[6:9], v1 offset:57360
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[10:13], v1 offset:57376
+; EXACTCUTOFF-NEXT:    ds_read_b128 v[14:17], v1 offset:57392
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(8) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_mfma_f32_32x32x1f32 v[2:33], v34, v35, v[2:33]
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 7
+; EXACTCUTOFF-NEXT:    s_nop 2
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[26:29] offset:32864
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[30:33] offset:32880
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[18:21] offset:32832
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[22:25] offset:32848
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[10:13] offset:32800
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[14:17] offset:32816
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[2:5] offset:32768
+; EXACTCUTOFF-NEXT:    ds_write_b128 v0, v[6:9] offset:32784
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(8) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_endpgm
 entry:
   %idx = call i32 @llvm.amdgcn.workitem.id.x()
   %load.0.addr = getelementptr <32 x float>, <32 x float> addrspace(3)* %in, i32 %idx

diff  --git a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir
new file mode 100644
index 0000000000000..962e9947a3ba5
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir
@@ -0,0 +1,393 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=gfx908 -run-pass=machine-scheduler -o - %s | FileCheck -check-prefix=GREEDY %s
+# RUN: llc -march=amdgcn -mcpu=gfx908 -amdgpu-igrouplp-exact-solver -run-pass=machine-scheduler -o - %s | FileCheck -check-prefix=EXACT %s
+
+--- |
+  define amdgpu_kernel void @sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
+  define amdgpu_kernel void @sched_group_barrier_MFMA_VALU_and_SALU_alternating(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
+  define amdgpu_kernel void @sched_group_barrier_2_separate_pipes(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
+  define amdgpu_kernel void @sched_group_barrier_3_separate_pipes(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
+
+  !0 = distinct !{!0}
+  !1 = !{!1, !0}
+...
+
+---
+name: sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE
+tracksRegLiveness: true
+body: |
+  bb.0:
+    ; GREEDY-LABEL: name: sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE
+    ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+    ; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; GREEDY-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
+    ; GREEDY-NEXT: S_NOP 0
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 10, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 5, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
+    ; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
+    ; EXACT-LABEL: name: sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE
+    ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
+    ; EXACT-NEXT: S_NOP 0
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 10, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 5, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
+    ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
+    %0:sreg_64 = IMPLICIT_DEF
+    %1:vgpr_32 = IMPLICIT_DEF
+    %2:areg_128 = IMPLICIT_DEF
+    %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    %4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec
+    GLOBAL_STORE_DWORD_SADDR %1, %4, %0, 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    %5:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec
+    %6:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec
+    S_NOP 0
+    %7:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %2, 0, 0, 0, implicit $mode, implicit $exec
+    %8:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %7, 0, 0, 0, implicit $mode, implicit $exec
+    %9:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %8, 0, 0, 0, implicit $mode, implicit $exec
+    %10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %9, 0, 0, 0, implicit $mode, implicit $exec
+    %11:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %10, 0, 0, 0, implicit $mode, implicit $exec
+    %12:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    %13:vgpr_32 = nsw V_MUL_LO_U32_e64 %12, %12, implicit $exec
+    GLOBAL_STORE_DWORD_SADDR %1, %13, %0, 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; 2 VMEM
+    SCHED_GROUP_BARRIER 16, 2, 0
+    ; 10 ALU
+    SCHED_GROUP_BARRIER 6, 10, 0
+    ; 5 MFMA
+    SCHED_GROUP_BARRIER 8, 5, 0
+    ; 2 VMEM_WRITE
+    SCHED_GROUP_BARRIER 64, 2, 0
+    S_ENDPGM 0, implicit %5, implicit %6, implicit %11
+...
+
+---
+name: sched_group_barrier_MFMA_VALU_and_SALU_alternating
+tracksRegLiveness: true
+body: |
+  bb.0:
+    ; GREEDY-LABEL: name: sched_group_barrier_MFMA_VALU_and_SALU_alternating
+    ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+    ; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; GREEDY-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: S_NOP 0
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+    ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
+    ; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
+    ; EXACT-LABEL: name: sched_group_barrier_MFMA_VALU_and_SALU_alternating
+    ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: S_NOP 0
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
+    ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
+    %0:sreg_64 = IMPLICIT_DEF
+    %1:vgpr_32 = IMPLICIT_DEF
+    %2:areg_128 = IMPLICIT_DEF
+    %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    %4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec
+    GLOBAL_STORE_DWORD_SADDR %1, %4, %0, 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    %5:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec
+    %6:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec
+    S_NOP 0
+    %7:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %2, 0, 0, 0, implicit $mode, implicit $exec
+    %8:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %7, 0, 0, 0, implicit $mode, implicit $exec
+    %9:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %8, 0, 0, 0, implicit $mode, implicit $exec
+    %10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %9, 0, 0, 0, implicit $mode, implicit $exec
+    %11:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %10, 0, 0, 0, implicit $mode, implicit $exec
+    %12:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    %13:vgpr_32 = nsw V_MUL_LO_U32_e64 %12, %12, implicit $exec
+    GLOBAL_STORE_DWORD_SADDR %1, %13, %0, 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; 2 VMEM
+    SCHED_GROUP_BARRIER 16, 2, 0
+    ; 1 VALU+SALU
+    SCHED_GROUP_BARRIER 8, 1, 0
+    ; 1 MFMA
+    SCHED_GROUP_BARRIER 6, 1, 0
+    ; 1 VALU+SALU
+    SCHED_GROUP_BARRIER 8, 1, 0
+    ; 1 MFMA
+    SCHED_GROUP_BARRIER 6, 1, 0
+    ; 1 VALU+SALU
+    SCHED_GROUP_BARRIER 8, 1, 0
+    ; 1 MFMA
+    SCHED_GROUP_BARRIER 6, 1, 0
+    ; 1 VALU+SALU
+    SCHED_GROUP_BARRIER 8, 1, 0
+    ; 1 MFMA
+    SCHED_GROUP_BARRIER 6, 1, 0
+    ; 1 VALU+SALU
+    SCHED_GROUP_BARRIER 8, 1, 0
+    ; 1 MFMA
+    SCHED_GROUP_BARRIER 6, 1, 0
+    ; 2 VMEM_WRITE
+    SCHED_GROUP_BARRIER 64, 2, 0
+    S_ENDPGM 0, implicit %5, implicit %6, implicit %11
+...
+
+---
+name: sched_group_barrier_2_separate_pipes
+tracksRegLiveness: true
+body: |
+  bb.0:
+    ; GREEDY-LABEL: name: sched_group_barrier_2_separate_pipes
+    ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+    ; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; GREEDY-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: S_NOP 0
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 5, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 2
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 64, 2, 2
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 2
+    ; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MUL_LO_U32_e64_3]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
+    ; EXACT-LABEL: name: sched_group_barrier_2_separate_pipes
+    ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
+    ; EXACT-NEXT: S_NOP 0
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 5, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 2
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 2
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 2
+    ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
+    %0:sreg_64 = IMPLICIT_DEF
+    %1:vgpr_32 = IMPLICIT_DEF
+    %2:areg_128 = IMPLICIT_DEF
+    %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    %4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec
+    GLOBAL_STORE_DWORD_SADDR %1, %4, %0, 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    %5:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec
+    %6:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec
+    S_NOP 0
+    %7:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %2, 0, 0, 0, implicit $mode, implicit $exec
+    %8:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %7, 0, 0, 0, implicit $mode, implicit $exec
+    %9:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %8, 0, 0, 0, implicit $mode, implicit $exec
+    %10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %9, 0, 0, 0, implicit $mode, implicit $exec
+    %11:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %10, 0, 0, 0, implicit $mode, implicit $exec
+    %12:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    %13:vgpr_32 = nsw V_MUL_LO_U32_e64 %12, %12, implicit $exec
+    GLOBAL_STORE_DWORD_SADDR %1, %13, %0, 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; 2 VMEM
+    SCHED_GROUP_BARRIER 16, 2, 0
+    ; 5 ALU
+    SCHED_GROUP_BARRIER 6, 5, 0
+    ; 2 MFMA
+    SCHED_GROUP_BARRIER 8, 2, 0
+    ; 2 MFMA
+    SCHED_GROUP_BARRIER 8, 2, 2
+    ; 2 VMEM_WRITE
+    SCHED_GROUP_BARRIER 64, 2, 2
+    ; 2 MFMA
+    SCHED_GROUP_BARRIER 8, 2, 2
+    S_ENDPGM 0, implicit %5, implicit %6, implicit %11
+...
+
+---
+name: sched_group_barrier_3_separate_pipes
+tracksRegLiveness: true
+body: |
+  bb.0:
+    ; GREEDY-LABEL: name: sched_group_barrier_3_separate_pipes
+    ; GREEDY: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+    ; GREEDY-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; GREEDY-NEXT: S_NOP 0
+    ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; GREEDY-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; GREEDY-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+    ; GREEDY-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_1]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; GREEDY-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 6, 5, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 0
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 2
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 64, 2, 2
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 2, 2
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 4, 1, 1
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 2, 1, 1
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 8, 1, 1
+    ; GREEDY-NEXT: SCHED_GROUP_BARRIER 16, 1, 1
+    ; GREEDY-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MUL_LO_U32_e64_3]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
+    ; EXACT-LABEL: name: sched_group_barrier_3_separate_pipes
+    ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
+    ; EXACT-NEXT: S_NOP 0
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 5, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 2
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 2
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 2, 2
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 4, 1, 1
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 2, 1, 1
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 1
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 1, 1
+    ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
+    %0:sreg_64 = IMPLICIT_DEF
+    %1:vgpr_32 = IMPLICIT_DEF
+    %2:areg_128 = IMPLICIT_DEF
+    %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    %4:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %3, implicit $exec
+    GLOBAL_STORE_DWORD_SADDR %1, %4, %0, 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    %5:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec
+    %6:vgpr_32 = nsw V_MUL_LO_U32_e64 %3, %1, implicit $exec
+    S_NOP 0
+    %7:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %2, 0, 0, 0, implicit $mode, implicit $exec
+    %8:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %7, 0, 0, 0, implicit $mode, implicit $exec
+    %9:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %8, 0, 0, 0, implicit $mode, implicit $exec
+    %10:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %9, 0, 0, 0, implicit $mode, implicit $exec
+    %11:areg_128 = V_MFMA_F32_4X4X1F32_e64 %1, %3, %10, 0, 0, 0, implicit $mode, implicit $exec
+    %12:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %1, 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    %13:vgpr_32 = nsw V_MUL_LO_U32_e64 %12, %12, implicit $exec
+    GLOBAL_STORE_DWORD_SADDR %1, %13, %0, 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; 2 VMEM
+    SCHED_GROUP_BARRIER 16, 2, 0
+    ; 5 ALU
+    SCHED_GROUP_BARRIER 6, 5, 0
+    ; 2 MFMA
+    SCHED_GROUP_BARRIER 8, 2, 0
+    ; 2 MFMA
+    SCHED_GROUP_BARRIER 8, 2, 2
+    ; 2 VMEM_WRITE
+    SCHED_GROUP_BARRIER 64, 2, 2
+    ; 2 MFMA
+    SCHED_GROUP_BARRIER 8, 2, 2
+    ; 1 SALU
+    SCHED_GROUP_BARRIER 4, 1, 1
+    ; 1 VALU
+    SCHED_GROUP_BARRIER 2, 1, 1
+    ; 1 MFMA
+    SCHED_GROUP_BARRIER 8, 1, 1
+    ; 1 VMEM
+    SCHED_GROUP_BARRIER 16, 1, 1
+    S_ENDPGM 0, implicit %5, implicit %6, implicit %11
+...

diff  --git a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir
index b28355c3588f8..6c10c8f09474c 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir
@@ -1,5 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 # RUN: llc -march=amdgcn -mcpu=gfx908 -misched-cluster=false -run-pass=machine-scheduler -verify-misched -o - %s | FileCheck %s
+# RUN: llc -march=amdgcn -mcpu=gfx908 -misched-cluster=false -run-pass=machine-scheduler -amdgpu-igrouplp-exact-solver -verify-misched -o - %s | FileCheck -check-prefix=EXACT %s
 
 --- |
   define amdgpu_kernel void @no_sched_group_barrier(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void }
@@ -35,6 +36,25 @@ body: |
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
     ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
     ; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
+    ; EXACT-LABEL: name: no_sched_group_barrier
+    ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+    ; EXACT-NEXT: S_NOP 0
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
     %0:sreg_64 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:areg_128 = IMPLICIT_DEF
@@ -65,26 +85,51 @@ body: |
     ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
     ; CHECK-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 32, 1, 0
     ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 2, 1, 0
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 5, 0
     ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 32, 1, 0
     ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
     ; CHECK-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
     ; CHECK-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 2, 3, 0
     ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
     ; CHECK-NEXT: S_NOP 0
     ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 32, 1, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 2, 1, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 5, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 32, 1, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 2, 3, 0
     ; CHECK-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
     ; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
+    ; EXACT-LABEL: name: sched_group_barrier_1_VMEM_READ_1_VALU_5_MFMA_1_VMEM_READ_3_VALU_2_VMEM_WRITE
+    ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: S_NOP 0
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 32, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 2, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 5, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 32, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 2, 3, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
+    ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
     %0:sreg_64 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:areg_128 = IMPLICIT_DEF
@@ -128,23 +173,46 @@ body: |
     ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
     ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
     ; CHECK-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
-    ; CHECK-NEXT: S_NOP 0
-    ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
-    ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
-    ; CHECK-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
-    ; CHECK-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 1, 10, 0
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+    ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+    ; CHECK-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; CHECK-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+    ; CHECK-NEXT: S_NOP 0
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 5, 0
     ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
     ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 1, 10, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 5, 0
     ; CHECK-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
     ; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
+    ; EXACT-LABEL: name: sched_group_barrier_2_VMEM_1000_ALU_5_MFMA_2_VMEM_WRITE
+    ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+    ; EXACT-NEXT: S_NOP 0
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 1, 10, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 5, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
+    ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
     %0:sreg_64 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:areg_128 = IMPLICIT_DEF
@@ -184,31 +252,62 @@ body: |
     ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
     ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
     ; CHECK-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
     ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
     ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
     ; CHECK-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
     ; CHECK-NEXT: S_NOP 0
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
     ; CHECK-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
     ; CHECK-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
-    ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
     ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
     ; CHECK-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; CHECK-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
     ; CHECK-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
     ; CHECK-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
+    ; EXACT-LABEL: name: sched_group_barrier_MFMA_VALU_and_SALU_alternating
+    ; EXACT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]], 512, 0, implicit $exec :: (load (s32) from %ir.in, !alias.scope !0, addrspace 1)
+    ; EXACT-NEXT: [[DEF2:%[0-9]+]]:areg_128 = IMPLICIT_DEF
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_1:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_2:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_2:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR]], [[DEF1]], implicit $exec
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_3:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: S_NOP 0
+    ; EXACT-NEXT: [[V_MFMA_F32_4X4X1F32_e64_4:%[0-9]+]]:areg_128 = V_MFMA_F32_4X4X1F32_e64 [[DEF1]], [[GLOBAL_LOAD_DWORD_SADDR]], [[V_MFMA_F32_4X4X1F32_e64_3]], 0, 0, 0, implicit $mode, implicit $exec
+    ; EXACT-NEXT: [[V_MUL_LO_U32_e64_3:%[0-9]+]]:vgpr_32 = nsw V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[V_MUL_LO_U32_e64_3]], [[DEF]], 512, 0, implicit $exec :: (store (s32) into %ir.out, !noalias !0, addrspace 1)
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 16, 2, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 8, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 6, 1, 0
+    ; EXACT-NEXT: SCHED_GROUP_BARRIER 64, 2, 0
+    ; EXACT-NEXT: S_ENDPGM 0, implicit [[V_MUL_LO_U32_e64_1]], implicit [[V_MUL_LO_U32_e64_2]], implicit [[V_MFMA_F32_4X4X1F32_e64_4]]
     %0:sreg_64 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:areg_128 = IMPLICIT_DEF


        


More information about the llvm-commits mailing list