[llvm-branch-commits] [llvm] [AMDGPU][Scheduler] Use MIR-level rematerializer in rematerialization stage (PR #189491)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Mon Mar 30 14:46:49 PDT 2026
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: Lucas Ramirez (lucas-rami)
<details>
<summary>Changes</summary>
This makes the scheduler's rematerialization stage use the target-independent rematerializer. Previously duplicate logic is deleted, and restrictions are put in place in the stage so that the same constraints as before apply on rematerializable registers (as the rematerializer is able to expose many more rematerialization opportunities than what the stage can track at the moment). Consequently it is not expected that this change improves performance overall, but it is a first step toward being able to use the rematerializer's more advanced capabilities during scheduling.
This is *not* a NFC for 2 reasons.
- Score equalities between two rematerialization candidates with otherwise equivalent score are decided by their corresponding register's index handle in the rematerializer (previously the pointer to their state object's value). This is determined by the rematerializer's register collection order, which is different from the stage's old register collection order. This is the cause of all test changes but one, and should not be detrimental to performance in real cases.
- To support rollback, the stage now uses the rematerializer's rollback listener instead of its previous ad-hoc method (setting the opcode of rematerialized MIs to a DBG_VALUE, and their registers to the sentinel). This is the source of test changes in `machine-scheduler-sink-trivial-remats-debug.mir`. The new rollback mechanism completely removes the behavior tested by `misched-remat-revert.ll` so the test is deleted.
---
Patch is 353.32 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/189491.diff
8 Files Affected:
- (modified) llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp (+101-292)
- (modified) llvm/lib/Target/AMDGPU/GCNSchedStrategy.h (+48-73)
- (modified) llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-attr.mir (+19-19)
- (modified) llvm/test/CodeGen/AMDGPU/machine-scheduler-sink-trivial-remats-debug.mir (+6-6)
- (removed) llvm/test/CodeGen/AMDGPU/misched-remat-revert.ll (-577)
- (modified) llvm/test/CodeGen/AMDGPU/sched_mfma_rewrite_copies.mir (+551-551)
- (modified) llvm/test/CodeGen/AMDGPU/sched_mfma_rewrite_cost.mir (+36-36)
- (modified) llvm/test/CodeGen/AMDGPU/sched_mfma_rewrite_diff_types.mir (+17-17)
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 78b450c8814d9..274513a030b33 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -37,6 +37,7 @@
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/Rematerializer.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/MC/TargetRegistry.h"
@@ -1452,23 +1453,6 @@ bool PreRARematStage::initGCNSchedStage() {
if (!GCNSchedStage::initGCNSchedStage() || DAG.Regions.size() <= 1)
return false;
- // Maps all MIs (except lone terminators, which are not part of any region) to
- // their parent region. Non-lone terminators are considered part of the region
- // they delimitate.
- DenseMap<MachineInstr *, unsigned> MIRegion(MF.getInstructionCount());
-
- // Before performing any IR modification record the parent region of each MI
- // and the parent MBB of each region.
- const unsigned NumRegions = DAG.Regions.size();
- for (unsigned I = 0; I < NumRegions; ++I) {
- RegionBoundaries Region = DAG.Regions[I];
- for (auto MI = Region.first; MI != Region.second; ++MI)
- MIRegion.insert({&*MI, I});
- MachineBasicBlock *ParentMBB = Region.first->getParent();
- if (Region.second != ParentMBB->end())
- MIRegion.insert({&*Region.second, I});
- }
-
#ifndef NDEBUG
auto PrintTargetRegions = [&]() -> void {
if (TargetRegions.none()) {
@@ -1479,31 +1463,6 @@ bool PreRARematStage::initGCNSchedStage() {
for (unsigned I : TargetRegions.set_bits())
dbgs() << REMAT_PREFIX << " [" << I << "] " << RPTargets[I] << '\n';
};
- auto PrintCandidate = [&](const ScoredRemat &Cand) -> Printable {
- return Printable([&, Cand](raw_ostream &OS) {
- // Concatenate all region numbers in which the register is unused and
- // live-through.
- const RematReg &Remat = *Cand.Remat;
- bool HasLiveThroughRegion = false;
- OS << '[' << Remat.DefRegion << " -";
- for (unsigned I = 0; I < NumRegions; ++I) {
- if (!Cand.UnpredictableRPSave[I]) {
- if (HasLiveThroughRegion) {
- OS << ',';
- } else {
- OS << "- ";
- HasLiveThroughRegion = true;
- }
- OS << I;
- }
- }
- if (HasLiveThroughRegion)
- OS << " -";
- OS << "-> " << Remat.UseRegion << "] ";
- Remat.DefMI->print(OS, /*IsStandalone=*/true, /*SkipOpers=*/false,
- /*SkipDebugLoc=*/false, /*AddNewLine=*/false);
- });
- };
#endif
// Set an objective for the stage based on current RP in each region.
@@ -1527,36 +1486,68 @@ bool PreRARematStage::initGCNSchedStage() {
PrintTargetRegions();
});
- // Collect all rematerializable registers in the function, then create a
- // corresponding scored rematerialization candidate for each one.
- if (!collectRematRegs(MIRegion)) {
+ // We need up-to-date live-out info. to query live-out register masks in
+ // regions containing rematerializable instructions.
+ DAG.RegionLiveOuts.buildLiveRegMap();
+
+ if (!Remater.analyze()) {
REMAT_DEBUG(dbgs() << "No rematerializable registers\n");
return false;
}
const ScoredRemat::FreqInfo FreqInfo(MF, DAG);
+
+ // Set of registers already marked for potential remterialization; used to
+ // avoid rematerialization chains.
+ SmallSet<Register, 4> MarkedRegs;
+ auto IsMarkedForRemat = [&MarkedRegs](const MachineOperand &MO) -> bool {
+ return MO.isReg() && MarkedRegs.contains(MO.getReg());
+ };
+
+ // Collect candidates. We have more restrictions on what we can track here
+ // compared to the rematerializer.
SmallVector<ScoredRemat, 8> Candidates;
- Candidates.reserve(RematRegs.size());
SmallVector<unsigned> CandidateOrder, NewCandidateOrder;
- for (RematReg &Remat : RematRegs) {
- ScoredRemat &Candidate = Candidates.emplace_back(&Remat, FreqInfo, DAG);
- if (Candidate.update(TargetRegions, RPTargets, FreqInfo, !TargetOcc))
+ for (unsigned RegIdx = 0, E = Remater.getNumRegs(); RegIdx < E; ++RegIdx) {
+ const Rematerializer::Reg &CandReg = Remater.getReg(RegIdx);
+
+ // Single user only.
+ unsigned NumUsers = 0;
+ for (const auto &[_, RegionUses] : CandReg.Uses)
+ NumUsers += RegionUses.size();
+ if (NumUsers != 1)
+ continue;
+
+ // We further filter the registers that we can rematerialize based on our
+ // current tracking capabilities in the stage.
+ MachineInstr *UseMI = *CandReg.Uses.begin()->getSecond().begin();
+ const MachineOperand &UseMO = UseMI->getOperand(0);
+ if (IsMarkedForRemat(UseMO) ||
+ llvm::any_of(CandReg.DefMI->operands(), IsMarkedForRemat))
+ continue;
+
+ // Do not rematerialize an instruction it it uses registers that aren't
+ // available at its use. This ensures that we are not extending any live
+ // range while rematerializing.
+ SlotIndex UseIdx = DAG.LIS->getInstructionIndex(*UseMI).getRegSlot(true);
+ if (!VirtRegAuxInfo::allUsesAvailableAt(CandReg.DefMI, UseIdx, *DAG.LIS,
+ DAG.MRI, *DAG.TII))
+ continue;
+
+ MarkedRegs.insert(CandReg.getDefReg());
+ ScoredRemat &Cand = Candidates.emplace_back(RegIdx, FreqInfo, Remater, DAG);
+ if (Cand.update(TargetRegions, RPTargets, FreqInfo, !TargetOcc))
CandidateOrder.push_back(Candidates.size() - 1);
}
- REMAT_DEBUG({
- dbgs() << "Rematerializable registers:\n";
- for (const ScoredRemat &Cand : Candidates)
- dbgs() << REMAT_PREFIX << " " << PrintCandidate(Cand) << '\n';
- dbgs() << REMAT_PREFIX << "Region frequencies\n";
- for (auto [I, Freq] : enumerate(FreqInfo.Regions)) {
- dbgs() << REMAT_PREFIX << " [" << I << "] ";
- if (Freq)
- dbgs() << Freq;
- else
- dbgs() << "unknown ";
- dbgs() << " | " << *DAG.Regions[I].first;
- }
- });
+ if (TargetOcc) {
+ // Every rematerialization we do here is likely to move the instruction
+ // into a higher frequency region, increasing the total sum latency of the
+ // instruction itself. This is acceptable if we are eliminating a spill in
+ // the process, but when the goal is increasing occupancy we get nothing
+ // out of rematerialization if occupancy is not increased in the end; in
+ // such cases we want to roll back the rematerialization.
+ Rollback = std::make_unique<RollbackSupport>(Remater);
+ }
// Rematerialize registers in successive rounds until all RP targets are
// satisifed or until we run out of rematerialization candidates.
@@ -1575,7 +1566,7 @@ bool PreRARematStage::initGCNSchedStage() {
<< "Candidates with non-null score, in rematerialization order:\n";
for (const ScoredRemat &Cand : reverse(Candidates)) {
dbgs() << REMAT_PREFIX << " " << Cand.print() << " | "
- << PrintCandidate(Cand) << '\n';
+ << Remater.printRematReg(Cand.RegIdx) << '\n';
}
PrintTargetRegions();
});
@@ -1585,6 +1576,8 @@ bool PreRARematStage::initGCNSchedStage() {
// are no longer useful to decrease RP.
while (!CandidateOrder.empty()) {
const ScoredRemat &Cand = Candidates[CandidateOrder.back()];
+ const Rematerializer::Reg &Reg = Remater.getReg(Cand.RegIdx);
+
// When previous rematerializations in this round have already satisfied
// RP targets in all regions this rematerialization can impact, we have a
// good indication that our scores have diverged significantly from
@@ -1593,44 +1586,22 @@ bool PreRARematStage::initGCNSchedStage() {
// in at least one target region.
if (!Cand.maybeBeneficial(TargetRegions, RPTargets)) {
REMAT_DEBUG(dbgs() << "Interrupt round on stale score for "
- << Cand.print() << " | " << *Cand.Remat->DefMI);
+ << Cand.print() << " | "
+ << Remater.printRematReg(Cand.RegIdx));
break;
}
CandidateOrder.pop_back();
- RematReg &Remat = *Cand.Remat;
-
- // Remove the register from all regions where it is a live-in or live-out
- // and rematerialize it.
- REMAT_DEBUG(dbgs() << "** REMAT " << PrintCandidate(Cand) << '\n');
- removeFromLiveMaps(Remat.getReg(), Cand.LiveIn, Cand.LiveOut);
- MachineInstr *RematMI = Cand.rematerialize(DAG);
-
- // Every rematerialization we do here is likely to move the instruction
- // into a higher frequency region, increasing the total sum latency of the
- // instruction itself. This is acceptable if we are eliminating a spill in
- // the process, but when the goal is increasing occupancy we get nothing
- // out of rematerialization if occupancy is not increased in the end; in
- // such cases we want to roll back the rematerialization.
- if (TargetOcc) {
- RollbackInfo &Rollback =
- Rollbacks.emplace_back(&Remat, Cand.LiveIn, Cand.LiveOut);
- Rollback.RematMI = RematMI;
- // Make the original MI a debug value so that it does not influence
- // scheduling and replace all read registers with a sentinel register to
- // prevent operands to appear in use-lists of other MIs during LIS
- // updates. Store mappings between operand indices and original
- // registers for potential rollback.
- Remat.DefMI->setDesc(DAG.TII->get(TargetOpcode::DBG_VALUE));
- for (auto [Idx, MO] : enumerate(Remat.DefMI->operands())) {
- if (MO.isReg() && MO.readsReg()) {
- Rollback.RegMap.insert({Idx, MO.getReg()});
- MO.setReg(Register());
- }
- }
- } else {
- // Just delete the original instruction if it cannot be rolled back.
- DAG.deleteMI(Remat.DefRegion, Remat.DefMI);
+
+ // Remove the register from all regions where it is a live-in or live-out,
+ // then rematerialize the register.
+ REMAT_DEBUG(dbgs() << "** REMAT " << Remater.printRematReg(Cand.RegIdx)
+ << '\n');
+ removeFromLiveMaps(Reg.getDefReg(), Cand.LiveIn, Cand.LiveOut);
+ if (Rollback) {
+ Rollback->LiveMapUpdates.emplace_back(Cand.RegIdx, Cand.LiveIn,
+ Cand.LiveOut);
}
+ Cand.rematerialize(Remater);
// Adjust RP targets. The save is guaranteed in regions in which the
// register is live-through and unused but optimistic in all other regions
@@ -2825,82 +2796,6 @@ bool PreRARematStage::setObjective() {
return TargetRegions.any();
}
-bool PreRARematStage::collectRematRegs(
- const DenseMap<MachineInstr *, unsigned> &MIRegion) {
- // We need up-to-date live-out info. to query live-out register masks in
- // regions containing rematerializable instructions.
- DAG.RegionLiveOuts.buildLiveRegMap();
-
- // Set of registers already marked for potential remterialization; used to
- // avoid rematerialization chains.
- SmallSet<Register, 4> MarkedRegs;
- auto IsMarkedForRemat = [&MarkedRegs](const MachineOperand &MO) -> bool {
- return MO.isReg() && MarkedRegs.contains(MO.getReg());
- };
-
- // Identify rematerializable instructions in the function.
- for (unsigned I = 0, E = DAG.Regions.size(); I != E; ++I) {
- RegionBoundaries Bounds = DAG.Regions[I];
- for (auto MI = Bounds.first; MI != Bounds.second; ++MI) {
- // The instruction must be rematerializable.
- MachineInstr &DefMI = *MI;
- if (!isReMaterializable(DefMI))
- continue;
-
- // We only support rematerializing virtual registers with one
- // definition.
- Register Reg = DefMI.getOperand(0).getReg();
- if (!Reg.isVirtual() || !DAG.MRI.hasOneDef(Reg))
- continue;
-
- // We only care to rematerialize the instruction if it has a single
- // non-debug user in a different region.
- // FIXME: Allow rematerializations with multiple uses. This should be
- // relatively easy to support using the current cost model.
- MachineInstr *UseMI = DAG.MRI.getOneNonDBGUser(Reg);
- if (!UseMI)
- continue;
- auto UseRegion = MIRegion.find(UseMI);
- if (UseRegion == MIRegion.end() || UseRegion->second == I)
- continue;
-
- // Do not rematerialize an instruction if it uses or is used by an
- // instruction that we have designated for rematerialization.
- // FIXME: Allow for rematerialization chains: this requires 1. updating
- // remat points to account for uses that are rematerialized, and 2.
- // either rematerializing the candidates in careful ordering, or
- // deferring the MBB RP walk until the entire chain has been
- // rematerialized.
- const MachineOperand &UseMO = UseMI->getOperand(0);
- if (IsMarkedForRemat(UseMO) ||
- llvm::any_of(DefMI.operands(), IsMarkedForRemat))
- continue;
-
- // Do not rematerialize an instruction it it uses registers that aren't
- // available at its use. This ensures that we are not extending any live
- // range while rematerializing.
- SlotIndex UseIdx = DAG.LIS->getInstructionIndex(*UseMI).getRegSlot(true);
- if (!VirtRegAuxInfo::allUsesAvailableAt(&DefMI, UseIdx, *DAG.LIS, DAG.MRI,
- *DAG.TII))
- continue;
-
- // Add the instruction to the rematerializable list.
- MarkedRegs.insert(Reg);
- RematRegs.emplace_back(&DefMI, UseMI, DAG, MIRegion);
- }
- }
-
- return !RematRegs.empty();
-}
-
-PreRARematStage::RematReg::RematReg(
- MachineInstr *DefMI, MachineInstr *UseMI, GCNScheduleDAGMILive &DAG,
- const DenseMap<MachineInstr *, unsigned> &MIRegion)
- : DefMI(DefMI), UseMI(UseMI), DefRegion(MIRegion.at(DefMI)),
- UseRegion(MIRegion.at(UseMI)),
- Mask(DAG.RegionLiveOuts.getLiveRegsForRegionIdx(DefRegion).at(getReg())) {
-}
-
bool PreRARematStage::ScoredRemat::maybeBeneficial(
const BitVector &TargetRegions, ArrayRef<GCNRPTarget> RPTargets) const {
for (unsigned I : TargetRegions.set_bits()) {
@@ -2910,16 +2805,6 @@ bool PreRARematStage::ScoredRemat::maybeBeneficial(
return false;
}
-void PreRARematStage::ScoredRemat::insertMI(unsigned RegionIdx,
- MachineInstr *RematMI,
- GCNScheduleDAGMILive &DAG) const {
- RegionBoundaries &Bounds = DAG.Regions[RegionIdx];
- if (Bounds.first == std::next(MachineBasicBlock::iterator(RematMI)))
- Bounds.first = RematMI;
- DAG.LIS->InsertMachineInstrInMaps(*RematMI);
- DAG.LIS->createAndComputeVirtRegInterval(RematMI->getOperand(0).getReg());
-}
-
PreRARematStage::ScoredRemat::FreqInfo::FreqInfo(
MachineFunction &MF, const GCNScheduleDAGMILive &DAG) {
assert(DAG.MLI && "MLI not defined in DAG");
@@ -2951,11 +2836,16 @@ PreRARematStage::ScoredRemat::FreqInfo::FreqInfo(
}
}
-PreRARematStage::ScoredRemat::ScoredRemat(RematReg *Remat, const FreqInfo &Freq,
+PreRARematStage::ScoredRemat::ScoredRemat(RegisterIdx RegIdx,
+ const FreqInfo &Freq,
+ const Rematerializer &Remater,
GCNScheduleDAGMILive &DAG)
- : Remat(Remat), LiveIn(DAG.Regions.size()), LiveOut(DAG.Regions.size()),
+ : RegIdx(RegIdx), LiveIn(DAG.Regions.size()), LiveOut(DAG.Regions.size()),
Live(DAG.Regions.size()), UnpredictableRPSave(DAG.Regions.size()) {
- Register DefReg = Remat->getReg();
+ const Rematerializer::Reg &Reg = Remater.getReg(RegIdx);
+ Register DefReg = Reg.getDefReg();
+ assert(Reg.Uses.size() == 1 && "expected users in single region");
+ const unsigned UseRegion = Reg.Uses.begin()->first;
// Mark regions in which the rematerializable register is live.
for (unsigned I = 0, E = DAG.Regions.size(); I != E; ++I) {
@@ -2968,22 +2858,18 @@ PreRARematStage::ScoredRemat::ScoredRemat(RematReg *Remat, const FreqInfo &Freq,
// If the register is both unused and live-through in the region, the
// latter's RP is guaranteed to decrease.
- if (!LiveIn[I] || !LiveOut[I] || I == Remat->UseRegion)
+ if (!LiveIn[I] || !LiveOut[I] || I == UseRegion)
UnpredictableRPSave.set(I);
}
Live |= LiveIn;
Live |= LiveOut;
- RPSave.inc(DefReg, LaneBitmask::getNone(), Remat->Mask, DAG.MRI);
+ RPSave.inc(DefReg, LaneBitmask::getNone(), Reg.Mask, DAG.MRI);
// Get frequencies of defining and using regions. A rematerialization from the
// least frequent region to the most frequent region will yield the greatest
- // latency penalty and therefore should get minimum score. Reciprocally, a
- // rematerialization in the other direction should get maximum score. Default
- // to values that will yield the worst possible score given known frequencies
// in order to penalize rematerializations from or into regions whose
- // frequency is unknown.
- int64_t DefOrMin = std::max(Freq.Regions[Remat->DefRegion], Freq.MinFreq);
- int64_t UseOrMax = Freq.Regions[Remat->UseRegion];
+ int64_t DefOrMin = std::max(Freq.Regions[Reg.DefRegion], Freq.MinFreq);
+ int64_t UseOrMax = Freq.Regions[UseRegion];
if (!UseOrMax)
UseOrMax = Freq.MaxFreq;
FreqDiff = DefOrMin - UseOrMax;
@@ -3023,19 +2909,14 @@ bool PreRARematStage::ScoredRemat::update(const BitVector &TargetRegions,
return !hasNullScore();
}
-MachineInstr *
-PreRARematStage::ScoredRemat::rematerialize(GCNScheduleDAGMILive &DAG) const {
- const SIInstrInfo *TII = DAG.MF.getSubtarget<GCNSubtarget>().getInstrInfo();
- MachineInstr &DefMI = *Remat->DefMI;
- Register Reg = DefMI.getOperand(0).getReg();
- Register NewReg = DAG.MRI.cloneVirtualRegister(Reg);
-
- // Rematerialize the register in the region where it is used.
- MachineBasicBlock::iterator InsertPos = Remat->UseMI;
- TII->reMaterialize(*InsertPos->getParent(), InsertPos, NewReg, 0, DefMI);
- MachineInstr *RematMI = &*std::prev(InsertPos);
- Remat->UseMI->substituteRegister(Reg, NewReg, 0, *DAG.TRI);
- insertMI(Remat->UseRegion, RematMI, DAG);
+void PreRARematStage::ScoredRemat::rematerialize(
+ Rematerializer &Remater) const {
+ const Rematerializer::Reg &Reg = Remater.getReg(RegIdx);
+ Rematerializer::DependencyReuseInfo DRI;
+ for (const Rematerializer::Reg::Dependency &Dep : Reg.Dependencies)
+ DRI.reuse(Dep.RegIdx);
+ unsigned UseRegion = Reg.Uses.begin()->first;
+ Remater.rematerializeToRegion(RegIdx, UseRegion, DRI);
#ifdef EXPENSIVE_CHECKS
// All uses are known to be available / live at the remat point. Thus,
@@ -3065,13 +2946,6 @@ PreRARematStage::ScoredRemat::rematerialize(GCNScheduleDAGMILive &DAG) const {
}
}
#endif
- return RematMI;
-}
-
-void PreRARematStage::commitRematerializations() const {
- REMAT_DEBUG(dbgs() << "Commiting all rematerializations\n");
- for (const RollbackInfo &Rollback : Rollbacks)
- DAG.deleteMI(Rollback.Remat->DefRegion, Rollback.Remat->DefMI);
}
void PreRARematStage::updateRPTargets(const BitVector &Regions,
@@ -3103,24 +2977,6 @@ bool PreRARematStage::updateAndVerifyRPTargets(const BitVector &Regions) {
return TooOptimistic;
}
-// Copied from MachineLICM
-bool PreRARematStage::isReMaterializable(const MachineInstr &MI) {
- if (!DAG.TII->isReMaterializable(MI))
- return false;
-
- for (const MachineOperand &MO : MI.all_uses()) {
- // We can't remat physreg uses, unless it is a constant or an ignorable
- // use (e.g. implicit exec use on VALU instructions)
- if (MO.getReg().isPhysical()) {
- if (DAG.MRI.isConstantPhysReg(MO.getReg()) || DAG.TII->isIgnorableUse(MO))
- continue;
- return false;
- }
- }
-
- return true;
-}
-
void PreRARematStage::removeFromLiveMaps(Register Reg, const BitVector &LiveIn,
const BitVector &LiveOut) {
assert(LiveIn.size() == DAG.Regions.size() && "region num mismatch");
@@ -3152,28 +3008,8 @@ void PreRARematStage::finalizeGCNSchedStage() {
// When increasing occupancy, it is possible that re-scheduling is not able to
// achieve the target occupancy in all regions, in which case re-scheduling in
// all regions should be reverted.
- if (DAG.MinOccupancy >= *TargetOcc) {
- co...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/189491
More information about the llvm-branch-commits
mailing list