[llvm] [llvm][ARM] Add a cortex-m4f alignment hazard recognizer (PR #126991)

Jon Roelofs via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 13 11:25:02 PST 2025


================
@@ -266,3 +271,159 @@ void ARMBankConflictHazardRecognizer::EmitInstruction(SUnit *SU) {
 void ARMBankConflictHazardRecognizer::AdvanceCycle() { Accesses.clear(); }
 
 void ARMBankConflictHazardRecognizer::RecedeCycle() { Accesses.clear(); }
+
+#define DEBUG_TYPE "cortex-m4-alignment-hazard-rec"
+
+STATISTIC(NumNoops, "Number of noops inserted");
+
+static cl::opt<bool> LoopsOnly(DEBUG_TYPE "-loops-only", cl::Hidden,
+                               cl::init(true),
+                               cl::desc("Emit nops only in loops"));
+
+static cl::opt<bool>
+    InnermostLoopsOnly(DEBUG_TYPE "-innermost-loops-only", cl::Hidden,
+                       cl::init(true),
+                       cl::desc("Emit noops only in innermost loops"));
+
+void ARMCortexM4AlignmentHazardRecognizer::Reset() { Offset = 0; }
+
+ARMCortexM4AlignmentHazardRecognizer::ARMCortexM4AlignmentHazardRecognizer(
+    const MCSubtargetInfo &STI)
+    : STI(STI), MBB(nullptr), MF(nullptr), Offset(0), Advanced(false),
+      EmittingNoop(false) {
+  MaxLookAhead = 1;
+}
+
+void ARMCortexM4AlignmentHazardRecognizer::EmitInstruction(SUnit *SU) {
+  if (!SU->isInstr())
+    return;
+
+  MachineInstr *MI = SU->getInstr();
+  assert(MI);
+  return EmitInstruction(MI);
+}
+
+void ARMCortexM4AlignmentHazardRecognizer::EmitInstruction(MachineInstr *MI) {
+  if (MI->isDebugInstr())
+    return;
+
+  unsigned Size = MI->getDesc().getSize();
+  Offset += Size;
+
+  // If the previous instruction had a hazard, then we're inserting a nop. Mark
+  // it with an AsmPrinter comment.
+  if (EmittingNoop)
+    if (MachineInstr *Prev = MI->getPrevNode())
+      Prev->setAsmPrinterFlag(ARM::M4F_ALIGNMENT_HAZARD);
+
+  EmittingNoop = false;
+}
+
+ScheduleHazardRecognizer::HazardType
+ARMCortexM4AlignmentHazardRecognizer::getHazardType(SUnit *SU,
+                                                    int /*Ignored*/) {
+  if (!SU->isInstr())
+    return HazardType::NoHazard;
+
+  MachineInstr *MI = SU->getInstr();
+  assert(MI);
+  return getHazardTypeAssumingOffset(MI, Offset);
+}
+
+ScheduleHazardRecognizer::HazardType
+ARMCortexM4AlignmentHazardRecognizer::getHazardTypeAssumingOffset(
+    MachineInstr *MI, size_t AssumedOffset) {
+  if (Advanced) {
+    Advanced = false;
+    return HazardType::NoHazard;
+  }
+
+  if (AssumedOffset % 4 == 0)
+    return HazardType::NoHazard;
+
+  const MCSchedModel &SCModel = STI.getSchedModel();
+  const MachineFunction *MF = MI->getParent()->getParent();
+  const ARMBaseInstrInfo &TII =
+      *static_cast<const ARMBaseInstrInfo *>(MF->getSubtarget().getInstrInfo());
+  int Latency = SCModel.computeInstrLatency<MCSubtargetInfo, MCInstrInfo,
+                                            InstrItineraryData, MachineInstr>(
+      STI, TII, *MI);
+  if (!Latency)
+    return HazardType::NoHazard;
+
+  const MCInstrDesc &MCID = MI->getDesc();
+  unsigned Domain = MCID.TSFlags & ARMII::DomainMask;
+
+  bool SingleCycleFP =
+      Latency == 1 && (Domain & (ARMII::DomainNEON | ARMII::DomainVFP));
+  if (SingleCycleFP)
+    return HazardType::NoopHazard;
+
+  if (MCID.getSize() == 4 && (MI->mayLoad() || MI->mayStore()))
----------------
jroelofs wrote:

This one is documented in the TRM. I added a comment.

https://github.com/llvm/llvm-project/pull/126991


More information about the llvm-commits mailing list