[llvm] [AArch64] Add check that prologue insertion doesn't clobber live regs. (PR #71826)
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 9 08:42:40 PST 2023
https://github.com/fhahn created https://github.com/llvm/llvm-project/pull/71826
This patch extends AArch64FrameLowering::emitProglogue to check if the inserted prologue clobbers live registers.
At the moment, llvm/test/CodeGen/AArch64/framelayout-scavengingslot.mir is failing because it has a block with the following live-in list liveins: $x1, $x2, $x3, $x4, $x5, $x6, $x7, $x8, $x9, $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27, $x28, $lr
meaning the prologue actually clobbers a live register.
>From 4321e9f2b9a18f4b117ad8859a429cb4901a2850 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Wed, 8 Nov 2023 22:05:25 +0000
Subject: [PATCH] [AArch64] Add check that prologue insertion doesn't clobber
live regs.
This patch extends AArch64FrameLowering::emitProglogue to check if the
inserted prologue clobbers live registers.
At the moment, llvm/test/CodeGen/AArch64/framelayout-scavengingslot.mir
is failing because it has a block with the following live-in list
liveins: $x1, $x2, $x3, $x4, $x5, $x6, $x7, $x8, $x9, $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27, $x28, $lr
meaning the prologue actually clobbers a live register.
---
.../Target/AArch64/AArch64FrameLowering.cpp | 46 ++++++++++++++++++-
.../framelayout-sve-calleesaves-fix.mir | 1 +
...re-swift-async-context-clobber-live-reg.ll | 37 +++++++++++++++
3 files changed, 83 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 880de7d0306a7e1..87efb60b6ebbc33 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -1384,6 +1384,18 @@ static void emitDefineCFAWithFP(MachineFunction &MF, MachineBasicBlock &MBB,
.setMIFlags(MachineInstr::FrameSetup);
}
+/// Collect live registers from the end of \p MI's parent up to (including) \p
+/// MI in \p LiveRegs.
+static void getLivePhysRegsUpTo(MachineInstr &MI, const TargetRegisterInfo &TRI,
+ LivePhysRegs &LiveRegs) {
+
+ MachineBasicBlock &MBB = *MI.getParent();
+ LiveRegs.addLiveOuts(MBB);
+ for (const MachineInstr &MI :
+ reverse(make_range(MI.getIterator(), MBB.instr_end())))
+ LiveRegs.stepBackward(MI);
+}
+
void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
@@ -1392,6 +1404,8 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+
MachineModuleInfo &MMI = MF.getMMI();
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
bool EmitCFI = AFI->needsDwarfUnwindInfo(MF);
@@ -1401,6 +1415,24 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
bool HasWinCFI = false;
auto Cleanup = make_scope_exit([&]() { MF.setHasWinCFI(HasWinCFI); });
+ MachineBasicBlock::iterator End = MBB.end();
+#ifndef NDEBUG
+ // Collect live register from the end of MBB up to the start of the existing
+ // frame setup instructions.
+ MachineBasicBlock::iterator NonFrameStart = MBB.begin();
+ while (NonFrameStart != End && NonFrameStart->getFlag(MachineInstr::FrameSetup))
+ ++NonFrameStart;
+ LivePhysRegs LiveRegs(*TRI);
+ if (NonFrameStart != MBB.end()) {
+ getLivePhysRegsUpTo(*NonFrameStart, *TRI, LiveRegs);
+ // Ignore registers used for stack management for now.
+ LiveRegs.removeReg(AArch64::SP);
+ LiveRegs.removeReg(AArch64::X19);
+ LiveRegs.removeReg(AArch64::FP);
+ LiveRegs.removeReg(AArch64::LR);
+ }
+#endif
+
bool IsFunclet = MBB.isEHFuncletEntry();
// At this point, we're going to decide whether or not the function uses a
@@ -1569,7 +1601,6 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
// Move past the saves of the callee-saved registers, fixing up the offsets
// and pre-inc if we decided to combine the callee-save and local stack
// pointer bump above.
- MachineBasicBlock::iterator End = MBB.end();
while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup) &&
!IsSVECalleeSave(MBBI)) {
if (CombineSPBump)
@@ -1904,6 +1935,19 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
emitCalleeSavedGPRLocations(MBB, MBBI);
emitCalleeSavedSVELocations(MBB, MBBI);
}
+
+#ifndef NDEBUG
+ if (NonFrameStart != MBB.end()) {
+ // Check if any of the newly instructions clobber any of the live registers.
+ for (MachineInstr &MI :
+ make_range(MBB.instr_begin(), NonFrameStart->getIterator())) {
+ for (auto &Op : MI.operands())
+ if (Op.isReg() && Op.isDef())
+ assert(!LiveRegs.contains(Op.getReg()) &&
+ "live register clobbered by inserted prologue instructions");
+ }
+ }
+#endif
}
static bool isFuncletReturnInstr(const MachineInstr &MI) {
diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
index 7d7b3ace8a915cd..3dba21d59b4087e 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
@@ -30,6 +30,7 @@
; CHECK-NEXT: ret
...
name: fix_restorepoint_p4
+tracksRegLiveness: true
stack:
- { id: 0, stack-id: scalable-vector, size: 16, alignment: 16 }
body: |
diff --git a/llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll b/llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll
new file mode 100644
index 000000000000000..33c86297432478f
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/store-swift-async-context-clobber-live-reg.ll
@@ -0,0 +1,37 @@
+; RUN: llc -o - -mtriple=arm64e-apple-macosx %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+define swifttailcc void @test_async_with_jumptable(ptr %src, ptr swiftasync %as) #0 {
+entry:
+ %l = load i64, ptr %src, align 8
+ switch i64 %l, label %dead [
+ i64 0, label %exit
+ i64 1, label %then.1
+ i64 2, label %then.2
+ i64 3, label %then.3
+ ]
+
+then.1:
+ br label %exit
+
+then.2:
+ br label %exit
+
+then.3:
+ br label %exit
+
+dead: ; preds = %entryresume.5
+ unreachable
+
+exit:
+ %p = phi ptr [ %src, %then.3 ], [ null, %then.2 ], [ %as, %entry ], [ null, %then.1 ]
+ %r = call i64 @foo()
+ %fn = inttoptr i64 %r to ptr
+ musttail call swifttailcc void %fn(ptr swiftasync %src, ptr %p, ptr %as)
+ ret void
+}
+
+declare i64 @foo()
+
+attributes #0 = { "frame-pointer"="non-leaf" }
More information about the llvm-commits
mailing list