[llvm] [Hexagon] Add Hexagon Load Widening Pass (PR #116330)

via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 15 22:26:19 PST 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-hexagon

Author: Yashas Andaluri (yandalur)

<details>
<summary>Changes</summary>

Extend existing store widening pass to widen load instructions.

This patch also borrows the alias check algorithm from AMDGPU's load store widening pass.

Widened load instruction is inserted before the first candidate load instruction.
Widened store instruction is inserted after the last candidate store instruction.
This method helps avoid moving uses/defs when replacing load/store instructions with their widened equivalents.

The pass has also been extended to
* Generate 64-bit widened stores
* Handle 32-bit post increment load/store
* Handle stores of non-immediate values
* Handle stores where the offset is a GlobalValue

---

Patch is 67.15 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/116330.diff


9 Files Affected:

- (modified) llvm/lib/Target/Hexagon/CMakeLists.txt (+1-1) 
- (added) llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp (+915) 
- (removed) llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp (-606) 
- (modified) llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp (+6) 
- (added) llvm/test/CodeGen/Hexagon/load-widen.ll (+43) 
- (modified) llvm/test/CodeGen/Hexagon/store-widen-aliased-load.ll (+6-10) 
- (added) llvm/test/CodeGen/Hexagon/widen-alias.ll (+97) 
- (added) llvm/test/CodeGen/Hexagon/widen-not-load.ll (+61) 
- (added) llvm/test/CodeGen/Hexagon/widen-volatile.ll (+34) 


``````````diff
diff --git a/llvm/lib/Target/Hexagon/CMakeLists.txt b/llvm/lib/Target/Hexagon/CMakeLists.txt
index e8ec93dd5ee63c..d758260a8ab5db 100644
--- a/llvm/lib/Target/Hexagon/CMakeLists.txt
+++ b/llvm/lib/Target/Hexagon/CMakeLists.txt
@@ -59,7 +59,7 @@ add_llvm_target(HexagonCodeGen
   HexagonSelectionDAGInfo.cpp
   HexagonSplitConst32AndConst64.cpp
   HexagonSplitDouble.cpp
-  HexagonStoreWidening.cpp
+  HexagonLoadStoreWidening.cpp
   HexagonSubtarget.cpp
   HexagonTargetMachine.cpp
   HexagonTargetObjectFile.cpp
diff --git a/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp b/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
new file mode 100644
index 00000000000000..1a60d0e13057e9
--- /dev/null
+++ b/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
@@ -0,0 +1,915 @@
+//===---HexagonLoadStoreWidening.cpp---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// HexagonStoreWidening:
+// Replace sequences of "narrow" stores to adjacent memory locations with
+// a fewer "wide" stores that have the same effect.
+// For example, replace:
+//   S4_storeirb_io  %100, 0, 0   ; store-immediate-byte
+//   S4_storeirb_io  %100, 1, 0   ; store-immediate-byte
+// with
+//   S4_storeirh_io  %100, 0, 0   ; store-immediate-halfword
+// The above is the general idea.  The actual cases handled by the code
+// may be a bit more complex.
+// The purpose of this pass is to reduce the number of outstanding stores,
+// or as one could say, "reduce store queue pressure".  Also, wide stores
+// mean fewer stores, and since there are only two memory instructions allowed
+// per packet, it also means fewer packets, and ultimately fewer cycles.
+//
+// HexagonLoadWidening does the same thing as HexagonStoreWidening but
+// for Loads. Here, we try to replace 4-byte Loads with register-pair loads.
+// For example:
+// Replace
+//   %2:intregs = L2_loadri_io %1:intregs, 0 :: (load (s32) from %ptr1, align 8)
+//   %3:intregs = L2_loadri_io %1:intregs, 4 :: (load (s32) from %ptr2)
+// with
+//   %4:doubleregs = L2_loadrd_io %1:intregs, 0 :: (load (s64) from %ptr1)
+//   %2:intregs = COPY %4.isub_lo:doubleregs
+//   %3:intregs = COPY %4.isub_hi:doubleregs
+//
+// LoadWidening for 8 and 16-bit loads is not useful as we end up generating 2N
+// insts to replace N loads: 1 widened load, N bitwise and, N - 1 shifts
+
+//===---------------------------------------------------------------------===//
+
+#include "HexagonInstrInfo.h"
+#include "HexagonRegisterInfo.h"
+#include "HexagonSubtarget.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <vector>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "hexagon-load-store-widening"
+
+static cl::opt<unsigned> MaxMBBSizeForLoadStoreWidening(
+    "max-bb-size-for-load-store-widening", cl::Hidden, cl::init(1000),
+    cl::desc("Limit block size to analyze in load/store widening pass"));
+
+namespace llvm {
+
+FunctionPass *createHexagonStoreWidening();
+FunctionPass *createHexagonLoadWidening();
+void initializeHexagonStoreWideningPass(PassRegistry &);
+void initializeHexagonLoadWideningPass(PassRegistry &);
+
+} // end namespace llvm
+
+namespace {
+
+struct HexagonLoadStoreWidening {
+  enum WideningMode { Store, Load };
+  const HexagonInstrInfo *TII;
+  const HexagonRegisterInfo *TRI;
+  MachineRegisterInfo *MRI;
+  AliasAnalysis *AA;
+  MachineFunction *MF;
+
+public:
+  HexagonLoadStoreWidening(const HexagonInstrInfo *TII,
+                           const HexagonRegisterInfo *TRI,
+                           MachineRegisterInfo *MRI, AliasAnalysis *AA,
+                           MachineFunction *MF, bool StoreMode)
+      : TII(TII), TRI(TRI), MRI(MRI), AA(AA), MF(MF),
+        Mode(StoreMode ? WideningMode::Store : WideningMode::Load),
+        HII(MF->getSubtarget<HexagonSubtarget>().getInstrInfo()) {}
+
+  bool run();
+
+private:
+  const bool Mode;
+  const unsigned MaxWideSize = 8;
+  const HexagonInstrInfo *HII = nullptr;
+
+  using InstrSet = SmallPtrSet<MachineInstr *, 16>;
+  using InstrGroup = SmallVector<MachineInstr *, 8>;
+  using InstrGroupList = SmallVector<InstrGroup, 8>;
+
+  InstrSet ProcessedInsts;
+
+  unsigned getBaseAddressRegister(const MachineInstr *MI);
+  int64_t getOffset(const MachineInstr *MI);
+  int64_t getPostIncrementValue(const MachineInstr *MI);
+  bool handledInstType(const MachineInstr *MI);
+
+  void createGroup(MachineInstr *BaseInst, InstrGroup &Group);
+  void createGroups(MachineBasicBlock &MBB, InstrGroupList &StoreGroups);
+  bool processBasicBlock(MachineBasicBlock &MBB);
+  bool processGroup(InstrGroup &Group);
+  bool selectInsts(InstrGroup::iterator Begin, InstrGroup::iterator End,
+                   InstrGroup &OG, unsigned &TotalSize, unsigned MaxSize);
+  bool createWideInsts(InstrGroup &OG, InstrGroup &NG, unsigned TotalSize);
+  bool createWideStores(InstrGroup &OG, InstrGroup &NG, unsigned TotalSize);
+  bool createWideLoads(InstrGroup &OG, InstrGroup &NG, unsigned TotalSize);
+  bool replaceInsts(InstrGroup &OG, InstrGroup &NG);
+  bool areAdjacent(const MachineInstr *S1, const MachineInstr *S2);
+  bool canSwapInstructions(const MachineInstr *A, const MachineInstr *B);
+};
+
+struct HexagonStoreWidening : public MachineFunctionPass {
+  static char ID;
+
+  HexagonStoreWidening() : MachineFunctionPass(ID) {
+    initializeHexagonStoreWideningPass(*PassRegistry::getPassRegistry());
+  }
+
+  StringRef getPassName() const override { return "Hexagon Store Widening"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addRequired<AAResultsWrapperPass>();
+    AU.addPreserved<AAResultsWrapperPass>();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  bool runOnMachineFunction(MachineFunction &MFn) override {
+    if (skipFunction(MFn.getFunction()))
+      return false;
+
+    auto &ST = MFn.getSubtarget<HexagonSubtarget>();
+    const HexagonInstrInfo *TII = ST.getInstrInfo();
+    const HexagonRegisterInfo *TRI = ST.getRegisterInfo();
+    MachineRegisterInfo *MRI = &MFn.getRegInfo();
+    AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
+
+    return HexagonLoadStoreWidening(TII, TRI, MRI, AA, &MFn, true).run();
+  }
+};
+
+struct HexagonLoadWidening : public MachineFunctionPass {
+  static char ID;
+
+  HexagonLoadWidening() : MachineFunctionPass(ID) {
+    initializeHexagonLoadWideningPass(*PassRegistry::getPassRegistry());
+  }
+
+  StringRef getPassName() const override { return "Hexagon Load Widening"; }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addRequired<AAResultsWrapperPass>();
+    AU.addPreserved<AAResultsWrapperPass>();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  bool runOnMachineFunction(MachineFunction &MFn) override {
+    if (skipFunction(MFn.getFunction()))
+      return false;
+
+    auto &ST = MFn.getSubtarget<HexagonSubtarget>();
+    const HexagonInstrInfo *TII = ST.getInstrInfo();
+    const HexagonRegisterInfo *TRI = ST.getRegisterInfo();
+    MachineRegisterInfo *MRI = &MFn.getRegInfo();
+    AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
+    return HexagonLoadStoreWidening(TII, TRI, MRI, AA, &MFn, false).run();
+  }
+};
+
+char HexagonStoreWidening::ID = 0;
+char HexagonLoadWidening::ID = 0;
+
+} // end anonymous namespace
+
+INITIALIZE_PASS_BEGIN(HexagonStoreWidening, "hexagon-widen-stores",
+                      "Hexagon Store Widening", false, false)
+INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
+INITIALIZE_PASS_END(HexagonStoreWidening, "hexagon-widen-stores",
+                    "Hexagon Store Widening", false, false)
+
+INITIALIZE_PASS_BEGIN(HexagonLoadWidening, "hexagon-widen-loads",
+                      "Hexagon Load Widening", false, false)
+INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
+INITIALIZE_PASS_END(HexagonLoadWidening, "hexagon-widen-loads",
+                    "Hexagon Load Widening", false, false)
+
+static const MachineMemOperand &getMemTarget(const MachineInstr *MI) {
+  assert(!MI->memoperands_empty() && "Expecting memory operands");
+  return **MI->memoperands_begin();
+}
+
+unsigned
+HexagonLoadStoreWidening::getBaseAddressRegister(const MachineInstr *MI) {
+  assert(HexagonLoadStoreWidening::handledInstType(MI) && "Unhandled opcode");
+  unsigned Base, Offset;
+  HII->getBaseAndOffsetPosition(*MI, Base, Offset);
+  const MachineOperand &MO = MI->getOperand(Base);
+  assert(MO.isReg() && "Expecting register operand");
+  return MO.getReg();
+}
+
+int64_t HexagonLoadStoreWidening::getOffset(const MachineInstr *MI) {
+  assert(HexagonLoadStoreWidening::handledInstType(MI) && "Unhandled opcode");
+
+  // On Hexagon, post-incs always have an offset of 0
+  // There is no Offset operand to post-incs
+  if (HII->isPostIncrement(*MI))
+    return 0;
+
+  unsigned Base, Offset;
+
+  HII->getBaseAndOffsetPosition(*MI, Base, Offset);
+  const MachineOperand &MO = MI->getOperand(Offset);
+  switch (MO.getType()) {
+  case MachineOperand::MO_Immediate:
+    return MO.getImm();
+  case MachineOperand::MO_GlobalAddress:
+    return MO.getOffset();
+  default:
+    break;
+  }
+  llvm_unreachable("Expecting an immediate or global operand");
+}
+
+inline int64_t
+HexagonLoadStoreWidening::getPostIncrementValue(const MachineInstr *MI) {
+  unsigned Base, PostIncIdx;
+  HII->getBaseAndOffsetPosition(*MI, Base, PostIncIdx);
+  const MachineOperand &MO = MI->getOperand(PostIncIdx);
+  return MO.getImm();
+}
+
+// Filtering function: any loads/stores whose opcodes are not "approved" of by
+// this function will not be subjected to widening.
+inline bool HexagonLoadStoreWidening::handledInstType(const MachineInstr *MI) {
+  unsigned Opc = MI->getOpcode();
+  if (Mode == WideningMode::Store) {
+    switch (Opc) {
+    case Hexagon::S4_storeirb_io:
+    case Hexagon::S4_storeirh_io:
+    case Hexagon::S4_storeiri_io:
+    case Hexagon::S2_storeri_io:
+      // Base address must be a register. (Implement FI later.)
+      return MI->getOperand(0).isReg();
+    case Hexagon::S2_storeri_pi:
+      return MI->getOperand(1).isReg();
+    }
+  } else {
+    // LoadWidening for 8 and 16 bit loads needs 2x instructions to replace x
+    // loads. So we only widen 32 bit loads as we don't need to select the
+    // right bits with AND & SHIFT ops.
+    switch (Opc) {
+    case Hexagon::L2_loadri_io:
+      // Base address must be a register and offset must be immediate.
+      return !MI->memoperands_empty() && MI->getOperand(1).isReg() &&
+             MI->getOperand(2).isImm();
+    case Hexagon::L2_loadri_pi:
+      return !MI->memoperands_empty() && MI->getOperand(2).isReg();
+    }
+  }
+  return false;
+}
+
+static void addDefsUsesToList(const MachineInstr *MI,
+                              DenseSet<Register> &RegDefs,
+                              DenseSet<Register> &RegUses) {
+  for (const auto &Op : MI->operands()) {
+    if (!Op.isReg())
+      continue;
+    if (Op.isDef())
+      RegDefs.insert(Op.getReg());
+    if (Op.readsReg())
+      RegUses.insert(Op.getReg());
+  }
+}
+
+bool HexagonLoadStoreWidening::canSwapInstructions(const MachineInstr *A,
+                                                   const MachineInstr *B) {
+  DenseSet<Register> ARegDefs;
+  DenseSet<Register> ARegUses;
+  addDefsUsesToList(A, ARegDefs, ARegUses);
+  if (A->mayLoadOrStore() && B->mayLoadOrStore() &&
+      (A->mayStore() || B->mayStore()) && A->mayAlias(AA, *B, true))
+    return false;
+  for (const auto &BOp : B->operands()) {
+    if (!BOp.isReg())
+      continue;
+    if ((BOp.isDef() || BOp.readsReg()) && ARegDefs.contains(BOp.getReg()))
+      return false;
+    if (BOp.isDef() && ARegUses.contains(BOp.getReg()))
+      return false;
+  }
+  return true;
+}
+
+// Inspect a machine basic block, and generate groups out of loads/stores
+// encountered in the block.
+//
+// A load/store group is a group of loads or stores that use the same base
+// register, and which can be reordered within that group without altering the
+// semantics of the program.  A single group could be widened as
+// a whole, if there existed a single load/store instruction with the same
+// semantics as the entire group.  In many cases, a single group may need more
+// than one wide load or store.
+void HexagonLoadStoreWidening::createGroups(MachineBasicBlock &MBB,
+                                            InstrGroupList &StoreGroups) {
+  // Traverse all instructions and if we encounter
+  // a load/store, then try to create a group starting at that instruction
+  // i.e. a sequence of independent loads/stores that can be widened.
+  for (auto I = MBB.begin(); I != MBB.end(); ++I) {
+    MachineInstr *MI = &(*I);
+    if (!handledInstType(MI))
+      continue;
+    if (ProcessedInsts.count(MI))
+      continue;
+
+    // Found a store.  Try to create a store group.
+    InstrGroup G;
+    createGroup(MI, G);
+    if (G.size() > 1)
+      StoreGroups.push_back(G);
+  }
+}
+
+// Create a single load/store group.  The insts need to be independent between
+// themselves, and also there cannot be other instructions between them
+// that could read or modify storage being read from or stored into.
+void HexagonLoadStoreWidening::createGroup(MachineInstr *BaseInst,
+                                           InstrGroup &Group) {
+  assert(handledInstType(BaseInst) && "Unexpected instruction");
+  unsigned BaseReg = getBaseAddressRegister(BaseInst);
+  InstrGroup Other;
+
+  Group.push_back(BaseInst);
+  LLVM_DEBUG(dbgs() << "BaseInst: "; BaseInst->dump());
+  auto End = BaseInst->getParent()->end();
+  auto I = BaseInst->getIterator();
+
+  while (true) {
+    I = std::next(I);
+    if (I == End)
+      break;
+    MachineInstr *MI = &(*I);
+
+    // Assume calls are aliased to everything.
+    if (MI->isCall() || MI->hasUnmodeledSideEffects() ||
+        MI->hasOrderedMemoryRef())
+      return;
+
+    if (!handledInstType(MI)) {
+      if (MI->mayLoadOrStore())
+        Other.push_back(MI);
+      continue;
+    }
+
+    // We have a handledInstType instruction
+    // If this load/store instruction is aliased with anything already in the
+    // group, terminate the group now.
+    for (auto GI : Group)
+      if (GI->mayAlias(AA, *MI, true))
+        return;
+    if (Mode == WideningMode::Load) {
+      // Check if current load MI can be moved to the first load instruction
+      // in Group. If any load instruction aliases with memory instructions in
+      // Other, terminate the group.
+      for (auto MemI : Other)
+        if (!canSwapInstructions(MI, MemI))
+          return;
+    } else {
+      // Check if store instructions in the group can be moved to current
+      // store MI. If any store instruction aliases with memory instructions
+      // in Other, terminate the group.
+      for (auto MemI : Other) {
+        if (std::distance(Group.back()->getIterator(), MemI->getIterator()) <=
+            0)
+          continue;
+        for (auto GI : Group)
+          if (!canSwapInstructions(MemI, GI))
+            return;
+      }
+    }
+
+    unsigned BR = getBaseAddressRegister(MI);
+    if (BR == BaseReg) {
+      LLVM_DEBUG(dbgs() << "Added MI to group: "; MI->dump());
+      Group.push_back(MI);
+      ProcessedInsts.insert(MI);
+    }
+  } // while
+}
+
+// Check if load/store instructions S1 and S2 are adjacent.  More precisely,
+// S2 has to access memory immediately following that accessed by S1.
+bool HexagonLoadStoreWidening::areAdjacent(const MachineInstr *S1,
+                                           const MachineInstr *S2) {
+  if (!handledInstType(S1) || !handledInstType(S2))
+    return false;
+
+  const MachineMemOperand &S1MO = getMemTarget(S1);
+
+  // Currently only handling immediate stores.
+  int Off1 = getOffset(S1);
+  int Off2 = getOffset(S2);
+
+  return (Off1 >= 0) ? Off1 + S1MO.getSize().getValue() == unsigned(Off2)
+                     : int(Off1 + S1MO.getSize().getValue()) == Off2;
+}
+
+/// Given a sequence of adjacent loads/stores, and a maximum size of a single
+/// wide inst, pick a group of insts that can be replaced by a single load/store
+/// of size not exceeding MaxSize.  The selected sequence will be recorded
+/// in OG ("old group" of instructions).
+/// OG should be empty on entry, and should be left empty if the function
+/// fails.
+bool HexagonLoadStoreWidening::selectInsts(InstrGroup::iterator Begin,
+                                           InstrGroup::iterator End,
+                                           InstrGroup &OG, unsigned &TotalSize,
+                                           unsigned MaxSize) {
+  assert(Begin != End && "No instructions to analyze");
+  assert(OG.empty() && "Old group not empty on entry");
+
+  if (std::distance(Begin, End) <= 1)
+    return false;
+
+  MachineInstr *FirstMI = *Begin;
+  assert(!FirstMI->memoperands_empty() && "Expecting some memory operands");
+  const MachineMemOperand &FirstMMO = getMemTarget(FirstMI);
+  if (!FirstMMO.getType().isValid())
+    return false;
+
+  unsigned Alignment = FirstMMO.getAlign().value();
+  unsigned SizeAccum = FirstMMO.getSize().getValue();
+  unsigned FirstOffset = getOffset(FirstMI);
+
+  // The initial value of SizeAccum should always be a power of 2.
+  assert(isPowerOf2_32(SizeAccum) && "First store size not a power of 2");
+
+  // If the size of the first store equals to or exceeds the limit, do nothing.
+  if (SizeAccum >= MaxSize)
+    return false;
+
+  // If the size of the first load/store is greater than or equal to the address
+  // stored to, then the inst cannot be made any wider.
+  if (SizeAccum >= Alignment) {
+    LLVM_DEBUG(
+        dbgs() << "Size of load/store greater than equal to its alignment\n");
+    return false;
+  }
+
+  // The offset of a load/store will put restrictions on how wide the inst can
+  // be.  Offsets in loads/stores of size 2^n bytes need to have the n lowest
+  // bits be 0.  If the first inst already exhausts the offset limits, quit.
+  // Test this by checking if the next wider size would exceed the limit.
+  // For post-increment instructions, the increment amount needs to follow the
+  // same rule.
+  unsigned OffsetOrIncVal = 0;
+  if (HII->isPostIncrement(*FirstMI))
+    OffsetOrIncVal = getPostIncrementValue(FirstMI);
+  else
+    OffsetOrIncVal = FirstOffset;
+  if ((2 * SizeAccum - 1) & OffsetOrIncVal) {
+    LLVM_DEBUG(dbgs() << "Instruction cannot be widened as the offset/postinc"
+                      << " value: " << getPostIncrementValue(FirstMI)
+                      << " is invalid in the widened version\n");
+    return false;
+  }
+
+  OG.push_back(FirstMI);
+  MachineInstr *S1 = FirstMI;
+
+  // Pow2Num will be the largest number of elements in OG such that the sum
+  // of sizes of loads/stores 0...Pow2Num-1 will be a power of 2.
+  unsigned Pow2Num = 1;
+  unsigned Pow2Size = SizeAccum;
+  bool HavePostInc = HII->isPostIncrement(*S1);
+
+  // Be greedy: keep accumulating insts as long as they are to adjacent
+  // memory locations, and as long as the total number of bytes stored
+  // does not exceed the limit (MaxSize).
+  // Keep track of when the total size covered is a power of 2, since
+  // this is a size a single load/store can cover.
+  for (InstrGroup::iterator I = Begin + 1; I != End; ++I) {
+    MachineInstr *S2 = *I;
+    // Insts are sorted, so if S1 and S2 are not adjacent, there won't be
+    // any othe...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/116330


More information about the llvm-commits mailing list