[llvm] r328011 - [llvm-mca] Move the logic that computes the scheduler's queue usage to the BackendStatistics view.

Andrea Di Biagio via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 20 11:20:39 PDT 2018


Author: adibiagio
Date: Tue Mar 20 11:20:39 2018
New Revision: 328011

URL: http://llvm.org/viewvc/llvm-project?rev=328011&view=rev
Log:
[llvm-mca] Move the logic that computes the scheduler's queue usage to the BackendStatistics view.

This patch introduces two new callbacks in the event listener interface to
handle the "buffered resource reserved" event and the "buffered resource
released" event. Every time a buffered resource is used, an event is generated.

Before this patch, the Scheduler (with the help of the ResourceManager) was
responsible for tracking the scheduler's queue usage. However, that design
forced the Scheduler to 'publish' scheduler's queue pressure information through
the Backend interface.

The goal of this patch is to break the dependency between the BackendStatistics
view, and the Backend. Now the Scheduler knows how to notify "buffer
reserved/released" events.  The scheduler's queue usage analysis has been moved
to the BackendStatistics.

Differential Revision: https://reviews.llvm.org/D44686

Modified:
    llvm/trunk/tools/llvm-mca/Backend.cpp
    llvm/trunk/tools/llvm-mca/Backend.h
    llvm/trunk/tools/llvm-mca/BackendStatistics.cpp
    llvm/trunk/tools/llvm-mca/BackendStatistics.h
    llvm/trunk/tools/llvm-mca/HWEventListener.h
    llvm/trunk/tools/llvm-mca/Scheduler.cpp
    llvm/trunk/tools/llvm-mca/Scheduler.h

Modified: llvm/trunk/tools/llvm-mca/Backend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/tools/llvm-mca/Backend.cpp?rev=328011&r1=328010&r2=328011&view=diff
==============================================================================
--- llvm/trunk/tools/llvm-mca/Backend.cpp (original)
+++ llvm/trunk/tools/llvm-mca/Backend.cpp Tue Mar 20 11:20:39 2018
@@ -75,6 +75,16 @@ void Backend::notifyResourceAvailable(co
     Listener->onResourceAvailable(RR);
 }
 
+void Backend::notifyReservedBuffers(ArrayRef<unsigned> Buffers) {
+  for (HWEventListener *Listener : Listeners)
+    Listener->onReservedBuffers(Buffers);
+}
+
+void Backend::notifyReleasedBuffers(ArrayRef<unsigned> Buffers) {
+  for (HWEventListener *Listener : Listeners)
+    Listener->onReleasedBuffers(Buffers);
+}
+
 void Backend::notifyCycleEnd(unsigned Cycle) {
   DEBUG(dbgs() << "[E] Cycle end: " << Cycle << "\n\n");
   for (HWEventListener *Listener : Listeners)

Modified: llvm/trunk/tools/llvm-mca/Backend.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/tools/llvm-mca/Backend.h?rev=328011&r1=328010&r2=328011&view=diff
==============================================================================
--- llvm/trunk/tools/llvm-mca/Backend.h (original)
+++ llvm/trunk/tools/llvm-mca/Backend.h Tue Mar 20 11:20:39 2018
@@ -94,15 +94,14 @@ public:
   unsigned getMaxUsedRegisterMappings() const {
     return DU->getMaxUsedRegisterMappings();
   }
-  void getBuffersUsage(std::vector<BufferUsageEntry> &Usage) const {
-    return HWS->getBuffersUsage(Usage);
-  }
 
   void addEventListener(HWEventListener *Listener);
   void notifyCycleBegin(unsigned Cycle);
   void notifyInstructionEvent(const HWInstructionEvent &Event);
   void notifyStallEvent(const HWStallEvent &Event);
   void notifyResourceAvailable(const ResourceRef &RR);
+  void notifyReservedBuffers(llvm::ArrayRef<unsigned> Buffers);
+  void notifyReleasedBuffers(llvm::ArrayRef<unsigned> Buffers);
   void notifyCycleEnd(unsigned Cycle);
 };
 

Modified: llvm/trunk/tools/llvm-mca/BackendStatistics.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/tools/llvm-mca/BackendStatistics.cpp?rev=328011&r1=328010&r2=328011&view=diff
==============================================================================
--- llvm/trunk/tools/llvm-mca/BackendStatistics.cpp (original)
+++ llvm/trunk/tools/llvm-mca/BackendStatistics.cpp Tue Mar 20 11:20:39 2018
@@ -36,6 +36,29 @@ void BackendStatistics::onInstructionEve
   }
 }
 
+void BackendStatistics::onReservedBuffers(ArrayRef<unsigned> Buffers) {
+  for (const unsigned Buffer : Buffers) {
+    if (BufferedResources.find(Buffer) != BufferedResources.end()) {
+      BufferUsage &BU = BufferedResources[Buffer];
+      BU.SlotsInUse++;
+      BU.MaxUsedSlots = std::max(BU.MaxUsedSlots, BU.SlotsInUse);
+      continue;
+    }
+
+    BufferedResources.insert(
+        std::pair<unsigned, BufferUsage>(Buffer, {1U, 1U}));
+  }
+}
+
+void BackendStatistics::onReleasedBuffers(ArrayRef<unsigned> Buffers) {
+  for (const unsigned Buffer : Buffers) {
+    assert(BufferedResources.find(Buffer) != BufferedResources.end() &&
+           "Buffered resource not in map?");
+    BufferUsage &BU = BufferedResources[Buffer];
+    BU.SlotsInUse--;
+  }
+}
+
 void BackendStatistics::printRetireUnitStatistics(llvm::raw_ostream &OS) const {
   std::string Buffer;
   raw_string_ostream TempStream(Buffer);
@@ -126,15 +149,13 @@ void BackendStatistics::printDispatchSta
   OS << Buffer;
 }
 
-void BackendStatistics::printSchedulerUsage(
-    raw_ostream &OS, const MCSchedModel &SM,
-    const ArrayRef<BufferUsageEntry> &Usage) const {
- 
+void BackendStatistics::printSchedulerUsage(raw_ostream &OS,
+                                            const MCSchedModel &SM) const {
   std::string Buffer;
   raw_string_ostream TempStream(Buffer);
   TempStream << "\n\nScheduler's queue usage:\n";
   // Early exit if no buffered resources were consumed.
-  if (Usage.empty()) {
+  if (BufferedResources.empty()) {
     TempStream << "No scheduler resources used.\n";
     TempStream.flush();
     OS << Buffer;
@@ -143,17 +164,15 @@ void BackendStatistics::printSchedulerUs
 
   for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
     const MCProcResourceDesc &ProcResource = *SM.getProcResource(I);
-    if (!ProcResource.BufferSize)
+    if (ProcResource.BufferSize <= 0)
       continue;
 
-    for (const BufferUsageEntry &Entry : Usage)
-      if (I == Entry.first)
-        TempStream << ProcResource.Name << ",  " << Entry.second << '/'
-                   << ProcResource.BufferSize << '\n';
+    const BufferUsage &BU = BufferedResources.lookup(I);
+    TempStream << ProcResource.Name << ",  " << BU.MaxUsedSlots << '/'
+               << ProcResource.BufferSize << '\n';
   }
 
   TempStream.flush();
   OS << Buffer;
 }
-
 } // namespace mca

Modified: llvm/trunk/tools/llvm-mca/BackendStatistics.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/tools/llvm-mca/BackendStatistics.h?rev=328011&r1=328010&r2=328011&view=diff
==============================================================================
--- llvm/trunk/tools/llvm-mca/BackendStatistics.h (original)
+++ llvm/trunk/tools/llvm-mca/BackendStatistics.h Tue Mar 20 11:20:39 2018
@@ -85,6 +85,17 @@ class BackendStatistics : public View {
   // is one counter for every generic stall kind (see class HWStallEvent).
   llvm::SmallVector<unsigned, 8> HWStalls;
 
+  // Tracks the usage of a scheduler's queue.
+  struct BufferUsage {
+    unsigned SlotsInUse;
+    unsigned MaxUsedSlots;
+  };
+
+  // There is a map entry for each buffered resource in the scheduling model.
+  // Every time a buffer is consumed/freed, this view updates the corresponding
+  // entry.
+  llvm::DenseMap<unsigned, BufferUsage> BufferedResources;
+
   void updateHistograms() {
     DispatchGroupSizePerCycle[NumDispatched]++;
     IssuedPerCycle[NumIssued]++;
@@ -107,8 +118,8 @@ class BackendStatistics : public View {
                               unsigned Cycles) const;
   void printIssuePerCycle(const Histogram &IssuePerCycle,
                           unsigned TotalCycles) const;
-  void printSchedulerUsage(llvm::raw_ostream &OS, const llvm::MCSchedModel &SM,
-                           const llvm::ArrayRef<BufferUsageEntry> &Usage) const;
+  void printSchedulerUsage(llvm::raw_ostream &OS,
+                           const llvm::MCSchedModel &SM) const;
 
 public:
   BackendStatistics(const Backend &backend, const llvm::MCSubtargetInfo &sti)
@@ -126,6 +137,14 @@ public:
       HWStalls[Event.Type]++;
   }
 
+  // Increases the number of used scheduler queue slots of every buffered
+  // resource in the Buffers set.
+  void onReservedBuffers(llvm::ArrayRef<unsigned> Buffers);
+
+  // Decreases by one the number of used scheduler queue slots of every
+  // buffered resource in the Buffers set.
+  void onReleasedBuffers(llvm::ArrayRef<unsigned> Buffers);
+
   void printView(llvm::raw_ostream &OS) const override {
     printDispatchStalls(OS);
     printRATStatistics(OS, B.getTotalRegisterMappingsCreated(),
@@ -134,12 +153,9 @@ public:
     printSchedulerStatistics(OS);
     printRetireUnitStatistics(OS);
 
-    std::vector<BufferUsageEntry> Usage;
-    B.getBuffersUsage(Usage);
-    printSchedulerUsage(OS, STI.getSchedModel(), Usage);
+    printSchedulerUsage(OS, STI.getSchedModel());
   }
 };
-
 } // namespace mca
 
 #endif

Modified: llvm/trunk/tools/llvm-mca/HWEventListener.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/tools/llvm-mca/HWEventListener.h?rev=328011&r1=328010&r2=328011&view=diff
==============================================================================
--- llvm/trunk/tools/llvm-mca/HWEventListener.h (original)
+++ llvm/trunk/tools/llvm-mca/HWEventListener.h Tue Mar 20 11:20:39 2018
@@ -104,6 +104,11 @@ public:
   using ResourceRef = std::pair<uint64_t, uint64_t>;
   virtual void onResourceAvailable(const ResourceRef &RRef) {}
 
+  // Events generated by the Scheduler when buffered resources are
+  // consumed/freed.
+  virtual void onReservedBuffers(llvm::ArrayRef<unsigned> Buffers) {}
+  virtual void onReleasedBuffers(llvm::ArrayRef<unsigned> Buffers) {}
+
   virtual ~HWEventListener() {}
 
 private:

Modified: llvm/trunk/tools/llvm-mca/Scheduler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/tools/llvm-mca/Scheduler.cpp?rev=328011&r1=328010&r2=328011&view=diff
==============================================================================
--- llvm/trunk/tools/llvm-mca/Scheduler.cpp (original)
+++ llvm/trunk/tools/llvm-mca/Scheduler.cpp Tue Mar 20 11:20:39 2018
@@ -181,7 +181,6 @@ bool ResourceManager::mustIssueImmediate
 void ResourceManager::issueInstruction(
     unsigned Index, const InstrDesc &Desc,
     SmallVectorImpl<std::pair<ResourceRef, unsigned>> &Pipes) {
-  releaseBuffers(Desc.Buffers);
   for (const std::pair<uint64_t, ResourceUsage> &R : Desc.Resources) {
     const CycleSegment &CS = R.second.CS;
     if (!CS.size()) {
@@ -252,11 +251,14 @@ void Scheduler::scheduleInstruction(unsi
   // Consume entries in the reservation stations.
   const InstrDesc &Desc = MCIS.getDesc();
 
-  // Reserve a slot in each buffered resource. Also, mark units with
-  // BufferSize=0 as reserved. Resources with a buffer size of zero will only be
-  // released after MCIS is issued, and all the ResourceCycles for those units
-  // have been consumed.
-  Resources->reserveBuffers(Desc.Buffers);
+  if (!Desc.Buffers.empty()) {
+    // Reserve a slot in each buffered resource. Also, mark units with
+    // BufferSize=0 as reserved. Resources with a buffer size of zero will only
+    // be released after MCIS is issued, and all the ResourceCycles for those
+    // units have been consumed.
+    Resources->reserveBuffers(Desc.Buffers);
+    notifyReservedBuffers(Desc.Buffers);
+  }
 
   bool MayLoad = Desc.MayLoad;
   bool MayStore = Desc.MayStore;
@@ -331,6 +333,13 @@ Scheduler::Event Scheduler::canBeDispatc
 }
 
 void Scheduler::issueInstruction(Instruction &IS, unsigned InstrIndex) {
+  const InstrDesc &D = IS.getDesc();
+
+  if (!D.Buffers.empty()) {
+    Resources->releaseBuffers(D.Buffers);
+    notifyReleasedBuffers(D.Buffers);
+  }
+
   // Issue the instruction and collect all the consumed resources
   // into a vector. That vector is then used to notify the listener.
   // Most instructions consume very few resurces (typically one or
@@ -338,8 +347,6 @@ void Scheduler::issueInstruction(Instruc
   // initialize its capacity to 4. This should address the majority of
   // the cases.
   SmallVector<std::pair<ResourceRef, unsigned>, 4> UsedResources;
-
-  const InstrDesc &D = IS.getDesc();
   Resources->issueInstruction(InstrIndex, D, UsedResources);
   // Notify the instruction that it started executing.
   // This updates the internal state of each write.
@@ -417,13 +424,14 @@ void Scheduler::updateIssuedQueue() {
 
 void Scheduler::notifyInstructionIssued(
     unsigned Index, const ArrayRef<std::pair<ResourceRef, unsigned>> &Used) {
-  DEBUG(dbgs() << "[E] Instruction Issued: " << Index << '\n';
-        for (const std::pair<ResourceRef, unsigned> &Resource
-             : Used) {
-          dbgs() << "[E] Resource Used: [" << Resource.first.first << '.'
-                 << Resource.first.second << "]\n";
-          dbgs() << "           cycles: " << Resource.second << '\n';
-        });
+  DEBUG({
+    dbgs() << "[E] Instruction Issued: " << Index << '\n';
+    for (const std::pair<ResourceRef, unsigned> &Resource : Used) {
+      dbgs() << "[E] Resource Used: [" << Resource.first.first << '.'
+             << Resource.first.second << "]\n";
+      dbgs() << "           cycles: " << Resource.second << '\n';
+    }
+  });
   Owner->notifyInstructionEvent(HWInstructionIssuedEvent(Index, Used));
 }
 
@@ -446,4 +454,20 @@ void Scheduler::notifyInstructionReady(u
 void Scheduler::notifyResourceAvailable(const ResourceRef &RR) {
   Owner->notifyResourceAvailable(RR);
 }
+
+void Scheduler::notifyReservedBuffers(ArrayRef<uint64_t> Buffers) {
+  SmallVector<unsigned, 4> BufferIDs(Buffers.begin(), Buffers.end());
+  std::transform(
+      Buffers.begin(), Buffers.end(), BufferIDs.begin(),
+      [&](uint64_t Op) { return Resources->resolveResourceMask(Op); });
+  Owner->notifyReservedBuffers(BufferIDs);
+}
+
+void Scheduler::notifyReleasedBuffers(ArrayRef<uint64_t> Buffers) {
+  SmallVector<unsigned, 4> BufferIDs(Buffers.begin(), Buffers.end());
+  std::transform(
+      Buffers.begin(), Buffers.end(), BufferIDs.begin(),
+      [&](uint64_t Op) { return Resources->resolveResourceMask(Op); });
+  Owner->notifyReleasedBuffers(BufferIDs);
+}
 } // namespace mca

Modified: llvm/trunk/tools/llvm-mca/Scheduler.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/tools/llvm-mca/Scheduler.h?rev=328011&r1=328010&r2=328011&view=diff
==============================================================================
--- llvm/trunk/tools/llvm-mca/Scheduler.h (original)
+++ llvm/trunk/tools/llvm-mca/Scheduler.h Tue Mar 20 11:20:39 2018
@@ -138,11 +138,6 @@ class ResourceState {
   // Available slots in the buffer (zero, if this is not a buffered resource).
   unsigned AvailableSlots;
 
-  // Maximum number of buffer slots seen used during one cycle.
-  // This helps tracking dynamic dispatch stalls caused by the lack of
-  // entries in the scheduler's queue.
-  unsigned MaxUsedSlots;
-
   // True if this is resource is currently unavailable.
   // An instruction may "reserve" a resource for a number of cycles.
   // During those cycles, the reserved resource cannot be used for other
@@ -182,14 +177,12 @@ public:
     ReadyMask = ResourceSizeMask;
     BufferSize = Desc.BufferSize;
     AvailableSlots = BufferSize == -1 ? 0U : static_cast<unsigned>(BufferSize);
-    MaxUsedSlots = 0;
     Unavailable = false;
   }
 
   unsigned getProcResourceID() const { return ProcResourceDescIndex; }
   uint64_t getResourceMask() const { return ResourceMask; }
   int getBufferSize() const { return BufferSize; }
-  unsigned getMaxUsedSlots() const { return MaxUsedSlots; }
 
   bool isBuffered() const { return BufferSize > 0; }
   bool isInOrder() const { return BufferSize == 1; }
@@ -244,8 +237,6 @@ public:
   void reserveBuffer() {
     if (AvailableSlots)
       AvailableSlots--;
-    unsigned UsedSlots = static_cast<unsigned>(BufferSize) - AvailableSlots;
-    MaxUsedSlots = std::max(MaxUsedSlots, UsedSlots);
   }
 
   void releaseBuffer() {
@@ -339,8 +330,12 @@ public:
 
   // Returns RS_BUFFER_AVAILABLE if buffered resources are not reserved, and if
   // there are enough available slots in the buffers.
-  ResourceStateEvent
-  canBeDispatched(const llvm::ArrayRef<uint64_t> Buffers) const;
+  ResourceStateEvent canBeDispatched(llvm::ArrayRef<uint64_t> Buffers) const;
+
+  // Return the processor resource identifier associated to this Mask.
+  unsigned resolveResourceMask(uint64_t Mask) const {
+    return Resources.find(Mask)->second->getProcResourceID();
+  }
 
   // Consume a slot in every buffered resource from array 'Buffers'. Resource
   // units that are dispatch hazards (i.e. BufferSize=0) are marked as reserved.
@@ -372,15 +367,6 @@ public:
 
   void cycleEvent(llvm::SmallVectorImpl<ResourceRef> &ResourcesFreed);
 
-  void getBuffersUsage(std::vector<BufferUsageEntry> &Usage) const {
-    for (const std::pair<uint64_t, UniqueResourceState> &Resource : Resources) {
-      const ResourceState &RS = *Resource.second;
-      if (RS.isBuffered())
-        Usage.emplace_back(std::pair<unsigned, unsigned>(RS.getProcResourceID(),
-                                                         RS.getMaxUsedSlots()));
-    }
-  }
-
 #ifndef NDEBUG
   void dump() const {
     for (const std::pair<uint64_t, UniqueResourceState> &Resource : Resources)
@@ -439,6 +425,11 @@ class Scheduler {
   void notifyInstructionReady(unsigned Index);
   void notifyResourceAvailable(const ResourceRef &RR);
 
+  // Notify the Backend that buffered resources were consumed.
+  void notifyReservedBuffers(llvm::ArrayRef<uint64_t> Buffers);
+  // Notify the Backend that buffered resources were freed.
+  void notifyReleasedBuffers(llvm::ArrayRef<uint64_t> Buffers);
+
   /// Issue instructions from the ready queue by giving priority to older
   /// instructions.
   void issue();
@@ -498,10 +489,6 @@ public:
 
   void cycleEvent(unsigned Cycle);
 
-  void getBuffersUsage(std::vector<BufferUsageEntry> &Usage) const {
-    Resources->getBuffersUsage(Usage);
-  }
-
 #ifndef NDEBUG
   void dump() const;
 #endif




More information about the llvm-commits mailing list