[llvm] [llvm][ARM] Add a cortex-m4f alignment hazard recognizer (PR #126991)
Jon Roelofs via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 12 16:59:15 PST 2025
https://github.com/jroelofs created https://github.com/llvm/llvm-project/pull/126991
https://developer.arm.com/documentation/ka006138/latest
"A long sequence of T32 single-cycle floating-point instructions aligned on odd halfword boundaries will experience a performance drop. Specifically, one stall cycle is inserted for every three instructions executed."
To avoid this pipeline hazard, we insert a new hazard recognizer that counts instruction alignments and informs the instruction scheduler of the stalls. We run the same hazard recognizer very late in the pass pipeline in order to inform nop placement, and fix up the alignment of single-cycle T32 floating-point instructions to even halfword boundaries.
>From c0db642705fec1c7fab5cc09f899636445dd238b Mon Sep 17 00:00:00 2001
From: Jon Roelofs <jonathan_roelofs at apple.com>
Date: Wed, 12 Feb 2025 09:01:38 -0800
Subject: [PATCH] [llvm][ARM] Add a cortex-m4f alignment hazard recognizer
https://developer.arm.com/documentation/ka006138/latest
"A long sequence of T32 single-cycle floating-point instructions aligned on odd
halfword boundaries will experience a performance drop. Specifically, one stall
cycle is inserted for every three instructions executed."
To avoid this pipeline hazard, we insert a new hazard recognizer that counts
instruction alignments and informs the instruction scheduler of the stalls. We
run the same hazard recognizer very late in the pass pipeline in order to
inform nop placement, and fix up the alignment of single-cycle T32
floating-point instructions to even halfword boundaries.
---
llvm/lib/Target/ARM/ARMAsmPrinter.cpp | 5 +
llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp | 33 +++
llvm/lib/Target/ARM/ARMBaseInstrInfo.h | 6 +
llvm/lib/Target/ARM/ARMHazardRecognizer.cpp | 161 ++++++++++++++
llvm/lib/Target/ARM/ARMHazardRecognizer.h | 44 ++++
llvm/lib/Target/ARM/ARMInstrInfo.h | 5 +
llvm/lib/Target/ARM/ARMProcessors.td | 3 +
llvm/lib/Target/ARM/ARMSubtarget.cpp | 1 +
llvm/lib/Target/ARM/ARMSubtarget.h | 1 +
llvm/lib/Target/ARM/ARMTargetMachine.cpp | 10 +
llvm/test/CodeGen/ARM/O3-pipeline.ll | 1 +
.../ARM/cortex-m4f-alignment-hazard.mir | 207 ++++++++++++++++++
12 files changed, 477 insertions(+)
create mode 100644 llvm/test/CodeGen/ARM/cortex-m4f-alignment-hazard.mir
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index b8772e1665f8a..83d6b365605b6 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -14,6 +14,7 @@
#include "ARMAsmPrinter.h"
#include "ARM.h"
#include "ARMConstantPoolValue.h"
+#include "ARMInstrInfo.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMTargetMachine.h"
#include "ARMTargetObjectFile.h"
@@ -1445,6 +1446,10 @@ void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) {
InConstantPool = false;
}
+ if (MI->getAsmPrinterFlag(
+ MachineInstr::CommentFlag(ARM::M4F_ALIGNMENT_HAZARD)))
+ OutStreamer->AddComment("cortex-m4f alignment hazard");
+
// Emit unwinding stuff for frame-related instructions
if (Subtarget->isTargetEHABICompatible() &&
MI->getFlag(MachineInstr::FrameSetup))
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 839b7e81f8998..78c3d90854fca 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -151,6 +151,11 @@ ScheduleHazardRecognizer *ARMBaseInstrInfo::CreateTargetMIHazardRecognizer(
MHR->AddHazardRecognizer(
std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4, true));
+ if (Subtarget.isCortexM4() && !DAG->hasVRegLiveness())
+ MHR->AddHazardRecognizer(
+ std::make_unique<ARMCortexM4AlignmentHazardRecognizer>(
+ DAG->MF.getSubtarget()));
+
// Not inserting ARMHazardRecognizerFPMLx because that would change
// legacy behavior
@@ -168,12 +173,25 @@ CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
if (Subtarget.isThumb2() || Subtarget.hasVFP2Base())
MHR->AddHazardRecognizer(std::make_unique<ARMHazardRecognizerFPMLx>());
+ if (Subtarget.isCortexM4())
+ MHR->AddHazardRecognizer(
+ std::make_unique<ARMCortexM4AlignmentHazardRecognizer>(
+ DAG->MF.getSubtarget()));
+
auto BHR = TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG);
if (BHR)
MHR->AddHazardRecognizer(std::unique_ptr<ScheduleHazardRecognizer>(BHR));
return MHR;
}
+ScheduleHazardRecognizer *ARMBaseInstrInfo::CreateTargetPostRAHazardRecognizer(
+ const MachineFunction &MF) const {
+ if (!Subtarget.isCortexM4())
+ return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(MF);
+
+ return new ARMCortexM4AlignmentHazardRecognizer(MF.getSubtarget());
+}
+
MachineInstr *
ARMBaseInstrInfo::convertToThreeAddress(MachineInstr &MI, LiveVariables *LV,
LiveIntervals *LIS) const {
@@ -5455,6 +5473,21 @@ bool ARMBaseInstrInfo::hasNOP() const {
return Subtarget.hasFeature(ARM::HasV6KOps);
}
+void ARMBaseInstrInfo::insertNoop(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const {
+ DebugLoc DL;
+ if (hasNOP()) {
+ BuildMI(MBB, MI, DL, get(ARM::HINT)).addImm(0).addImm(ARMCC::AL).addImm(0);
+ } else {
+ BuildMI(MBB, MI, DL, get(ARM::MOVr))
+ .addReg(ARM::R0)
+ .addReg(ARM::R0)
+ .addImm(ARMCC::AL)
+ .addReg(0)
+ .addReg(0);
+ }
+}
+
bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const {
if (MI->getNumOperands() < 4)
return true;
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
index ae760e881e7fa..396dd8d3b6627 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -121,6 +121,9 @@ class ARMBaseInstrInfo : public ARMGenInstrInfo {
// Return whether the target has an explicit NOP encoding.
bool hasNOP() const;
+ void insertNoop(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const override;
+
// Return the non-pre/post incrementing version of 'Opc'. Return 0
// if there is not such an opcode.
virtual unsigned getUnindexedOpcode(unsigned Opc) const = 0;
@@ -143,6 +146,9 @@ class ARMBaseInstrInfo : public ARMGenInstrInfo {
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
const ScheduleDAG *DAG) const override;
+ ScheduleHazardRecognizer *
+ CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const override;
+
// Branch analysis.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
diff --git a/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp b/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
index e78f228a912c0..3df8317cedb79 100644
--- a/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
+++ b/llvm/lib/Target/ARM/ARMHazardRecognizer.cpp
@@ -8,13 +8,18 @@
#include "ARMHazardRecognizer.h"
#include "ARMBaseInstrInfo.h"
+#include "ARMBaseRegisterInfo.h"
+#include "ARMInstrInfo.h"
#include "ARMSubtarget.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/CommandLine.h"
+#include <algorithm>
using namespace llvm;
@@ -266,3 +271,159 @@ void ARMBankConflictHazardRecognizer::EmitInstruction(SUnit *SU) {
void ARMBankConflictHazardRecognizer::AdvanceCycle() { Accesses.clear(); }
void ARMBankConflictHazardRecognizer::RecedeCycle() { Accesses.clear(); }
+
+#define DEBUG_TYPE "cortex-m4-alignment-hazard-rec"
+
+STATISTIC(NumNoops, "Number of noops inserted");
+
+static cl::opt<bool> LoopsOnly(DEBUG_TYPE "-loops-only", cl::Hidden,
+ cl::init(true),
+ cl::desc("Emit nops only in loops"));
+
+static cl::opt<bool>
+ InnermostLoopsOnly(DEBUG_TYPE "-innermost-loops-only", cl::Hidden,
+ cl::init(true),
+ cl::desc("Emit noops only in innermost loops"));
+
+void ARMCortexM4AlignmentHazardRecognizer::Reset() { Offset = 0; }
+
+ARMCortexM4AlignmentHazardRecognizer::ARMCortexM4AlignmentHazardRecognizer(
+ const MCSubtargetInfo &STI)
+ : STI(STI), MBB(nullptr), MF(nullptr), Offset(0), Advanced(false),
+ EmittingNoop(false) {
+ MaxLookAhead = 1;
+}
+
+void ARMCortexM4AlignmentHazardRecognizer::EmitInstruction(SUnit *SU) {
+ if (!SU->isInstr())
+ return;
+
+ MachineInstr *MI = SU->getInstr();
+ assert(MI);
+ return EmitInstruction(MI);
+}
+
+void ARMCortexM4AlignmentHazardRecognizer::EmitInstruction(MachineInstr *MI) {
+ if (MI->isDebugInstr())
+ return;
+
+ unsigned Size = MI->getDesc().getSize();
+ Offset += Size;
+
+ // If the previous instruction had a hazard, then we're inserting a nop. Mark
+ // it with an AsmPrinter comment.
+ if (EmittingNoop)
+ if (MachineInstr *Prev = MI->getPrevNode())
+ Prev->setAsmPrinterFlag(ARM::M4F_ALIGNMENT_HAZARD);
+
+ EmittingNoop = false;
+}
+
+ScheduleHazardRecognizer::HazardType
+ARMCortexM4AlignmentHazardRecognizer::getHazardType(SUnit *SU,
+ int /*Ignored*/) {
+ if (!SU->isInstr())
+ return HazardType::NoHazard;
+
+ MachineInstr *MI = SU->getInstr();
+ assert(MI);
+ return getHazardTypeAssumingOffset(MI, Offset);
+}
+
+ScheduleHazardRecognizer::HazardType
+ARMCortexM4AlignmentHazardRecognizer::getHazardTypeAssumingOffset(
+ MachineInstr *MI, size_t AssumedOffset) {
+ if (Advanced) {
+ Advanced = false;
+ return HazardType::NoHazard;
+ }
+
+ if (AssumedOffset % 4 == 0)
+ return HazardType::NoHazard;
+
+ const MCSchedModel &SCModel = STI.getSchedModel();
+ const MachineFunction *MF = MI->getParent()->getParent();
+ const ARMBaseInstrInfo &TII =
+ *static_cast<const ARMBaseInstrInfo *>(MF->getSubtarget().getInstrInfo());
+ int Latency = SCModel.computeInstrLatency<MCSubtargetInfo, MCInstrInfo,
+ InstrItineraryData, MachineInstr>(
+ STI, TII, *MI);
+ if (!Latency)
+ return HazardType::NoHazard;
+
+ const MCInstrDesc &MCID = MI->getDesc();
+ unsigned Domain = MCID.TSFlags & ARMII::DomainMask;
+
+ bool SingleCycleFP =
+ Latency == 1 && (Domain & (ARMII::DomainNEON | ARMII::DomainVFP));
+ if (SingleCycleFP)
+ return HazardType::NoopHazard;
+
+ if (MCID.getSize() == 4 && (MI->mayLoad() || MI->mayStore()))
+ return HazardType::NoopHazard;
+
+ return HazardType::NoHazard;
+}
+
+void ARMCortexM4AlignmentHazardRecognizer::AdvanceCycle() { Advanced = true; }
+void ARMCortexM4AlignmentHazardRecognizer::RecedeCycle() {}
+
+void ARMCortexM4AlignmentHazardRecognizer::EmitNoop() { Offset += 2; }
+
+unsigned ARMCortexM4AlignmentHazardRecognizer::PreEmitNoops(SUnit *SU) {
+ if (!SU->isInstr())
+ return 0;
+
+ MachineInstr *MI = SU->getInstr();
+ assert(MI);
+ return PreEmitNoops(MI);
+}
+
+unsigned ARMCortexM4AlignmentHazardRecognizer::PreEmitNoops(MachineInstr *MI) {
+ const MachineBasicBlock *Parent = MI->getParent();
+ if (Parent != MBB) {
+ Offset = 0;
+ MBB = Parent;
+ }
+
+ LLVM_DEBUG(MI->dump());
+
+ if (LoopsOnly) {
+ // This optimization is likely only critical in loops. Try to save code size
+ // elsewhere by avoiding it when we're not in an innermost loop.
+ if (const MachineLoop *Loop = getLoopFor(MI)) {
+ if (InnermostLoopsOnly && !Loop->isInnermost()) {
+ LLVM_DEBUG(dbgs() << "\toffset=0x" << utohexstr(Offset)
+ << "\n\tnot in an innermost loop\n");
+ return 0;
+ }
+ } else if (LoopsOnly) {
+ LLVM_DEBUG(dbgs() << "\toffset=0x" << utohexstr(Offset)
+ << "\n\tnot in a loop\n");
+ return 0;
+ }
+ }
+
+ if (HazardType::NoopHazard == getHazardTypeAssumingOffset(MI, Offset)) {
+ EmittingNoop = true;
+ NumNoops++;
+ LLVM_DEBUG(dbgs() << "\toffset=0x" << utohexstr(Offset)
+ << "\n\thas an alignment hazard, and requires a noop\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+const MachineLoop *
+ARMCortexM4AlignmentHazardRecognizer::getLoopFor(MachineInstr *MI) {
+ // Calculate and cache the MachineLoopInfo.
+ MachineFunction *ParentMF = MI->getParent()->getParent();
+ if (MF != ParentMF) {
+ MF = ParentMF;
+ MDT = MachineDominatorTree(*MF);
+ MLI.~MachineLoopInfo();
+ new (&MLI) MachineLoopInfo(MDT);
+ }
+ return MLI.getLoopFor(MI->getParent());
+}
\ No newline at end of file
diff --git a/llvm/lib/Target/ARM/ARMHazardRecognizer.h b/llvm/lib/Target/ARM/ARMHazardRecognizer.h
index b9ac3555c2bc3..5e2c4e7bef27a 100644
--- a/llvm/lib/Target/ARM/ARMHazardRecognizer.h
+++ b/llvm/lib/Target/ARM/ARMHazardRecognizer.h
@@ -16,6 +16,8 @@
#include "ARMBaseInstrInfo.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/Support/DataTypes.h"
#include <initializer_list>
@@ -63,6 +65,48 @@ class ARMBankConflictHazardRecognizer : public ScheduleHazardRecognizer {
inline HazardType CheckOffsets(unsigned O0, unsigned O1);
};
+/**
+ Hazards related to alignment of single-cycle fp instructions on cortex-m4f.
+
+ https://developer.arm.com/documentation/ka006138/latest
+
+ "A long sequence of T32 single-cycle floating-point instructions aligned on
+ odd halfword boundaries will experience a performance drop. Specifically, one
+ stall cycle is inserted for every three instructions executed."
+*/
+class ARMCortexM4AlignmentHazardRecognizer : public ScheduleHazardRecognizer {
+ const MCSubtargetInfo &STI;
+ const MachineBasicBlock *MBB;
+ MachineDominatorTree MDT;
+ MachineLoopInfo MLI;
+ MachineFunction *MF;
+ size_t Offset;
+ bool Advanced;
+ bool EmittingNoop;
+
+public:
+ ARMCortexM4AlignmentHazardRecognizer(const MCSubtargetInfo &STI);
+
+ void Reset() override;
+
+ void AdvanceCycle() override;
+ void RecedeCycle() override;
+ void EmitNoop() override;
+
+ void EmitInstruction(SUnit *SU) override;
+ void EmitInstruction(MachineInstr *MI) override;
+
+ HazardType getHazardType(SUnit *SU, int Stalls = 0) override;
+ HazardType getHazardType(MachineInstr *MI);
+ HazardType getHazardTypeAssumingOffset(MachineInstr *MI, size_t Offset);
+
+ unsigned PreEmitNoops(SUnit *SU) override;
+ unsigned PreEmitNoops(MachineInstr *MI) override;
+
+private:
+ const MachineLoop *getLoopFor(MachineInstr *MI);
+};
+
} // end namespace llvm
#endif
diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.h b/llvm/lib/Target/ARM/ARMInstrInfo.h
index 178d7a2c630e4..6e988a1113583 100644
--- a/llvm/lib/Target/ARM/ARMInstrInfo.h
+++ b/llvm/lib/Target/ARM/ARMInstrInfo.h
@@ -41,6 +41,11 @@ class ARMInstrInfo : public ARMBaseInstrInfo {
void expandLoadStackGuard(MachineBasicBlock::iterator MI) const override;
};
+namespace ARM {
+enum AsmComments {
+ M4F_ALIGNMENT_HAZARD = MachineInstr::TAsmComments,
+};
+} // namespace ARM
}
#endif
diff --git a/llvm/lib/Target/ARM/ARMProcessors.td b/llvm/lib/Target/ARM/ARMProcessors.td
index 7453727a7cff0..046a67c0c472d 100644
--- a/llvm/lib/Target/ARM/ARMProcessors.td
+++ b/llvm/lib/Target/ARM/ARMProcessors.td
@@ -99,6 +99,8 @@ def ProcM3 : SubtargetFeature<"m3", "ARMProcFamily", "CortexM3",
"Cortex-M3 ARM processors", []>;
def ProcM55 : SubtargetFeature<"m55", "ARMProcFamily", "CortexM55",
"Cortex-M55 ARM processors", []>;
+def ProcM4 : SubtargetFeature<"m4", "ARMProcFamily", "CortexM4",
+ "Cortex-M4 ARM processors", []>;
def ProcM7 : SubtargetFeature<"m7", "ARMProcFamily", "CortexM7",
"Cortex-M7 ARM processors", []>;
def ProcM85 : SubtargetFeature<"m85", "ARMProcFamily", "CortexM85",
@@ -340,6 +342,7 @@ def : ProcessorModel<"sc300", CortexM4Model, [ARMv7m,
FeatureHasNoBranchPredictor]>;
def : ProcessorModel<"cortex-m4", CortexM4Model, [ARMv7em,
+ ProcM4,
FeatureVFP4_D16_SP,
FeaturePreferBranchAlign32,
FeatureHasSlowFPVMLx,
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp
index 893084785e6f0..a1fbb387cce09 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp
@@ -289,6 +289,7 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
case CortexA78C:
case CortexA510:
case CortexA710:
+ case CortexM4:
case CortexR4:
case CortexR5:
case CortexR7:
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h
index 7329d3f2055f0..86f3ca61418bf 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -292,6 +292,7 @@ class ARMSubtarget : public ARMGenSubtargetInfo {
bool isSwift() const { return ARMProcFamily == Swift; }
bool isCortexM3() const { return ARMProcFamily == CortexM3; }
bool isCortexM55() const { return ARMProcFamily == CortexM55; }
+ bool isCortexM4() const { return ARMProcFamily == CortexM4; }
bool isCortexM7() const { return ARMProcFamily == CortexM7; }
bool isCortexM85() const { return ARMProcFamily == CortexM85; }
bool isLikeA9() const { return isCortexA9() || isCortexA15() || isKrait(); }
diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index 98bdf310dea91..a988c2e3269fa 100644
--- a/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -78,6 +78,10 @@ static cl::opt<cl::boolOrDefault>
EnableGlobalMerge("arm-global-merge", cl::Hidden,
cl::desc("Enable the global merge pass"));
+static cl::opt<cl::boolOrDefault> EnablePostRAHazardRecognizer(
+ "arm-postra-hazard-recognizer", cl::Hidden,
+ cl::desc("Enable the post-ra hazard recognizer"));
+
namespace llvm {
void initializeARMExecutionDomainFixPass(PassRegistry&);
}
@@ -622,6 +626,12 @@ void ARMPassConfig::addPreEmitPass2() {
// Identify valid eh continuation targets for Windows EHCont Guard.
addPass(createEHContGuardCatchretPass());
}
+
+ // Enable the hazard recognizer for cortex-m4f at -O2 or higher.
+ if ((EnablePostRAHazardRecognizer == cl::BOU_UNSET &&
+ CodeGenOptLevel::Default <= getOptLevel()) ||
+ EnablePostRAHazardRecognizer == cl::BOU_TRUE)
+ addPass(&PostRAHazardRecognizerID);
}
yaml::MachineFunctionInfo *
diff --git a/llvm/test/CodeGen/ARM/O3-pipeline.ll b/llvm/test/CodeGen/ARM/O3-pipeline.ll
index 1840b5ce46c6f..a3930c349b5d4 100644
--- a/llvm/test/CodeGen/ARM/O3-pipeline.ll
+++ b/llvm/test/CodeGen/ARM/O3-pipeline.ll
@@ -213,6 +213,7 @@
; CHECK-NEXT: Machine Natural Loop Construction
; CHECK-NEXT: ReachingDefAnalysis
; CHECK-NEXT: ARM Low Overhead Loops pass
+; CHECK-NEXT: Post RA hazard recognizer
; CHECK-NEXT: Lazy Machine Block Frequency Analysis
; CHECK-NEXT: Machine Optimization Remark Emitter
; CHECK-NEXT: ARM Assembly Printer
diff --git a/llvm/test/CodeGen/ARM/cortex-m4f-alignment-hazard.mir b/llvm/test/CodeGen/ARM/cortex-m4f-alignment-hazard.mir
new file mode 100644
index 0000000000000..7d8f6d1f7dacb
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/cortex-m4f-alignment-hazard.mir
@@ -0,0 +1,207 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=thumbv7-apple-darwin -mcpu=cortex-m4 -verify-machineinstrs -run-pass post-RA-hazard-rec -o - %s | FileCheck %s --check-prefixes=CHECK,DEFAULT
+# RUN: llc -mtriple=thumbv7-apple-darwin -mcpu=cortex-m4 -verify-machineinstrs -run-pass post-RA-hazard-rec -o - %s -cortex-m4-alignment-hazard-rec-innermost-loops-only=0 | FileCheck %s --check-prefixes=CHECK,ANY-LOOPS
+# RUN: llc -mtriple=thumbv7-apple-darwin -mcpu=cortex-m4 -verify-machineinstrs -run-pass post-RA-hazard-rec -o - %s -cortex-m4-alignment-hazard-rec-loops-only=0 | FileCheck %s --check-prefixes=CHECK,ANY-BLOCK
+
+---
+name: alignment_hazards_in_double_loop
+alignment: 4
+body: |
+ ; DEFAULT-LABEL: name: alignment_hazards_in_double_loop
+ ; DEFAULT: bb.0 (align 4):
+ ; DEFAULT-NEXT: successors: %bb.1(0x80000000)
+ ; DEFAULT-NEXT: {{ $}}
+ ; DEFAULT-NEXT: bb.1 (align 4):
+ ; DEFAULT-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; DEFAULT-NEXT: {{ $}}
+ ; DEFAULT-NEXT: renamable $s2 = VLDRS renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $r2 = t2LSRri renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
+ ; DEFAULT-NEXT: renamable $r1, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: HINT 0, 14 /* CC::al */, 0
+ ; DEFAULT-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: HINT 0, 14 /* CC::al */, 0
+ ; DEFAULT-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $s6 = VLDRS renamable $r3, 6, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: tBcc %bb.1, 1 /* CC::ne */, killed $cpsr
+ ; DEFAULT-NEXT: {{ $}}
+ ; DEFAULT-NEXT: bb.2 (align 4):
+ ; DEFAULT-NEXT: successors: %bb.0(0x40000000), %bb.3(0x40000000)
+ ; DEFAULT-NEXT: {{ $}}
+ ; DEFAULT-NEXT: renamable $s2 = VLDRS renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $r2 = t2LSRri renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
+ ; DEFAULT-NEXT: renamable $r1, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $s6 = VLDRS renamable $r3, 6, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: tBcc %bb.0, 1 /* CC::ne */, killed $cpsr
+ ; DEFAULT-NEXT: {{ $}}
+ ; DEFAULT-NEXT: bb.3:
+ ; DEFAULT-NEXT: renamable $s2 = VLDRS renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $r2 = t2LSRri renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
+ ; DEFAULT-NEXT: renamable $r1, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: renamable $s6 = VLDRS renamable $r3, 6, 14 /* CC::al */, $noreg
+ ; DEFAULT-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r6, def $r7, def $pc
+ ;
+ ; ANY-LOOPS-LABEL: name: alignment_hazards_in_double_loop
+ ; ANY-LOOPS: bb.0 (align 4):
+ ; ANY-LOOPS-NEXT: successors: %bb.1(0x80000000)
+ ; ANY-LOOPS-NEXT: {{ $}}
+ ; ANY-LOOPS-NEXT: bb.1 (align 4):
+ ; ANY-LOOPS-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; ANY-LOOPS-NEXT: {{ $}}
+ ; ANY-LOOPS-NEXT: renamable $s2 = VLDRS renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r2 = t2LSRri renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r1, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: HINT 0, 14 /* CC::al */, 0
+ ; ANY-LOOPS-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: HINT 0, 14 /* CC::al */, 0
+ ; ANY-LOOPS-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $s6 = VLDRS renamable $r3, 6, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: tBcc %bb.1, 1 /* CC::ne */, killed $cpsr
+ ; ANY-LOOPS-NEXT: {{ $}}
+ ; ANY-LOOPS-NEXT: bb.2 (align 4):
+ ; ANY-LOOPS-NEXT: successors: %bb.0(0x40000000), %bb.3(0x40000000)
+ ; ANY-LOOPS-NEXT: {{ $}}
+ ; ANY-LOOPS-NEXT: renamable $s2 = VLDRS renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r2 = t2LSRri renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r1, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: HINT 0, 14 /* CC::al */, 0
+ ; ANY-LOOPS-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: HINT 0, 14 /* CC::al */, 0
+ ; ANY-LOOPS-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $s6 = VLDRS renamable $r3, 6, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: tBcc %bb.0, 1 /* CC::ne */, killed $cpsr
+ ; ANY-LOOPS-NEXT: {{ $}}
+ ; ANY-LOOPS-NEXT: bb.3:
+ ; ANY-LOOPS-NEXT: renamable $s2 = VLDRS renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r2 = t2LSRri renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r1, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: renamable $s6 = VLDRS renamable $r3, 6, 14 /* CC::al */, $noreg
+ ; ANY-LOOPS-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r6, def $r7, def $pc
+ ;
+ ; ANY-BLOCK-LABEL: name: alignment_hazards_in_double_loop
+ ; ANY-BLOCK: bb.0 (align 4):
+ ; ANY-BLOCK-NEXT: successors: %bb.1(0x80000000)
+ ; ANY-BLOCK-NEXT: {{ $}}
+ ; ANY-BLOCK-NEXT: bb.1 (align 4):
+ ; ANY-BLOCK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; ANY-BLOCK-NEXT: {{ $}}
+ ; ANY-BLOCK-NEXT: renamable $s2 = VLDRS renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r2 = t2LSRri renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r1, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: HINT 0, 14 /* CC::al */, 0
+ ; ANY-BLOCK-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: HINT 0, 14 /* CC::al */, 0
+ ; ANY-BLOCK-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $s6 = VLDRS renamable $r3, 6, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: tBcc %bb.1, 1 /* CC::ne */, killed $cpsr
+ ; ANY-BLOCK-NEXT: {{ $}}
+ ; ANY-BLOCK-NEXT: bb.2 (align 4):
+ ; ANY-BLOCK-NEXT: successors: %bb.0(0x40000000), %bb.3(0x40000000)
+ ; ANY-BLOCK-NEXT: {{ $}}
+ ; ANY-BLOCK-NEXT: renamable $s2 = VLDRS renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r2 = t2LSRri renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r1, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: HINT 0, 14 /* CC::al */, 0
+ ; ANY-BLOCK-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: HINT 0, 14 /* CC::al */, 0
+ ; ANY-BLOCK-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $s6 = VLDRS renamable $r3, 6, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: tBcc %bb.0, 1 /* CC::ne */, killed $cpsr
+ ; ANY-BLOCK-NEXT: {{ $}}
+ ; ANY-BLOCK-NEXT: bb.3:
+ ; ANY-BLOCK-NEXT: renamable $s2 = VLDRS renamable $r3, 4, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r2 = t2LSRri renamable $lr, 1, 14 /* CC::al */, $noreg, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r1, dead $cpsr = tLSRri renamable $r2, 1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: HINT 0, 14 /* CC::al */, 0
+ ; ANY-BLOCK-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: HINT 0, 14 /* CC::al */, 0
+ ; ANY-BLOCK-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: $s2 = VMOVSR $r1, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: renamable $s6 = VLDRS renamable $r3, 6, 14 /* CC::al */, $noreg
+ ; ANY-BLOCK-NEXT: tPOP_RET 14 /* CC::al */, $noreg, def $r4, def $r6, def $r7, def $pc
+ bb.0 (align 4):
+ successors: %bb.1
+
+ bb.1 (align 4):
+ successors: %bb.1, %bb.2
+
+ renamable $s2 = VLDRS renamable $r3, 4, 14, $noreg
+ renamable $r2 = t2LSRri renamable $lr, 1, 14, $noreg, $noreg
+ renamable $r1, dead $cpsr = tLSRri renamable $r2, 1, 14, $noreg
+ $s2 = VMOVSR $r1, 14, $noreg
+ renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14, $noreg
+ $s2 = VMOVSR $r1, 14, $noreg
+ renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14, $noreg
+ renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14, $noreg
+ $s2 = VMOVSR $r1, 14, $noreg
+ renamable $s6 = VLDRS renamable $r3, 6, 14, $noreg
+ tBcc %bb.1, 1, killed $cpsr
+
+ bb.2 (align 4):
+ successors: %bb.0, %bb.3
+
+ renamable $s2 = VLDRS renamable $r3, 4, 14, $noreg
+ renamable $r2 = t2LSRri renamable $lr, 1, 14, $noreg, $noreg
+ renamable $r1, dead $cpsr = tLSRri renamable $r2, 1, 14, $noreg
+ $s2 = VMOVSR $r1, 14, $noreg
+ renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14, $noreg
+ $s2 = VMOVSR $r1, 14, $noreg
+ renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14, $noreg
+ renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14, $noreg
+ $s2 = VMOVSR $r1, 14, $noreg
+ renamable $s6 = VLDRS renamable $r3, 6, 14, $noreg
+ tBcc %bb.0, 1, killed $cpsr
+
+ bb.3:
+ renamable $s2 = VLDRS renamable $r3, 4, 14, $noreg
+ renamable $r2 = t2LSRri renamable $lr, 1, 14, $noreg, $noreg
+ renamable $r1, dead $cpsr = tLSRri renamable $r2, 1, 14, $noreg
+ $s2 = VMOVSR $r1, 14, $noreg
+ renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14, $noreg
+ $s2 = VMOVSR $r1, 14, $noreg
+ renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14, $noreg
+ renamable $r2, dead $cpsr = tLSRri renamable $r1, 1, 14, $noreg
+ $s2 = VMOVSR $r1, 14, $noreg
+ renamable $s6 = VLDRS renamable $r3, 6, 14, $noreg
+ tPOP_RET 14, $noreg, def $r4, def $r6, def $r7, def $pc
+...
More information about the llvm-commits
mailing list