[llvm] 613a2a1 - [AArch64][PAC] Protect the entire function if pac-ret+leaf is requested (#140895)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 9 08:40:48 PDT 2025
Author: Anatoly Trosinenko
Date: 2025-07-09T18:40:45+03:00
New Revision: 613a2a1359370bc5ab3fbf71b4e89ae4211467c7
URL: https://github.com/llvm/llvm-project/commit/613a2a1359370bc5ab3fbf71b4e89ae4211467c7
DIFF: https://github.com/llvm/llvm-project/commit/613a2a1359370bc5ab3fbf71b4e89ae4211467c7.diff
LOG: [AArch64][PAC] Protect the entire function if pac-ret+leaf is requested (#140895)
Normally, pac-ret hardening is emitted as part of function prologues and
epilogues, thus it is affected by the shrink-wrapping optimization.
As protecting LR when it is spilled to the stack is already handled by
regular -mbranch-protection=pac-ret option, it is reasonable to assume
that pac-ret+leaf option means the user wants to apply pac-ret hardening
to as much code as possible. For that reason, if pac-ret+leaf hardening
mode is requested, this patch moves the emission of PAUTH_PROLOGUE (or
PAUTH_EPILOGUE) pseudos from emitPrologue (emitEpilogue) methods of the
AArch64FrameLowering class to processFunctionBeforeFrameIndicesReplaced.
This change does not currently affect targets that emit WinCFI unwind
information.
This commit only affects where LR is signed and authenticated, but does
not otherwise prevents the shrink-wrapping optimization. Moreover,
without "+leaf" modifier PAUTH_(PROLOGUE|EPILOGUE) pseudos respect the
shrink-wrapping optimization just as any other prologue/epilogue code.
Added:
Modified:
llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
llvm/lib/Target/AArch64/AArch64FrameLowering.h
llvm/test/CodeGen/AArch64/sign-return-address-cfi-negate-ra-state.ll
llvm/test/CodeGen/AArch64/sign-return-address-pauth-lr.ll
llvm/test/CodeGen/AArch64/sign-return-address-tailcall.ll
llvm/test/CodeGen/AArch64/sign-return-address.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 2f3cb71e4767f..666ff8bbab42a 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -1182,6 +1182,16 @@ static bool needsWinCFI(const MachineFunction &MF) {
F.needsUnwindTableEntry();
}
+static bool shouldSignReturnAddressEverywhere(const MachineFunction &MF) {
+ // FIXME: With WinCFI, extra care should be taken to place SEH_PACSignLR
+ // and SEH_EpilogEnd instructions in the correct order.
+ if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI())
+ return false;
+ const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
+ bool SignReturnAddressAll = AFI->shouldSignReturnAddress(/*SpillsLR=*/false);
+ return SignReturnAddressAll;
+}
+
bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
MachineFunction &MF, uint64_t StackBumpBytes) const {
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
@@ -1780,6 +1790,39 @@ static void getLivePhysRegsUpTo(MachineInstr &MI, const TargetRegisterInfo &TRI,
}
#endif
+void AArch64FrameLowering::emitPacRetPlusLeafHardening(
+ MachineFunction &MF) const {
+ const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
+ const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+
+ auto EmitSignRA = [&](MachineBasicBlock &MBB) {
+ DebugLoc DL; // Set debug location to unknown.
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::PAUTH_PROLOGUE))
+ .setMIFlag(MachineInstr::FrameSetup);
+ };
+
+ auto EmitAuthRA = [&](MachineBasicBlock &MBB) {
+ DebugLoc DL;
+ MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
+ if (MBBI != MBB.end())
+ DL = MBBI->getDebugLoc();
+
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::PAUTH_EPILOGUE))
+ .setMIFlag(MachineInstr::FrameDestroy);
+ };
+
+ // This should be in sync with PEIImpl::calculateSaveRestoreBlocks.
+ EmitSignRA(MF.front());
+ for (MachineBasicBlock &MBB : MF) {
+ if (MBB.isEHFuncletEntry())
+ EmitSignRA(MBB);
+ if (MBB.isReturnBlock())
+ EmitAuthRA(MBB);
+ }
+}
+
void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
@@ -1849,17 +1892,21 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
DebugLoc DL;
const auto &MFnI = *MF.getInfo<AArch64FunctionInfo>();
- if (MFnI.needsShadowCallStackPrologueEpilogue(MF))
- emitShadowCallStackPrologue(*TII, MF, MBB, MBBI, DL, NeedsWinCFI,
- MFnI.needsDwarfUnwindInfo(MF));
-
if (MFnI.shouldSignReturnAddress(MF)) {
- BuildMI(MBB, MBBI, DL, TII->get(AArch64::PAUTH_PROLOGUE))
- .setMIFlag(MachineInstr::FrameSetup);
+ // If pac-ret+leaf is in effect, PAUTH_PROLOGUE pseudo instructions
+ // are inserted by emitPacRetPlusLeafHardening().
+ if (!shouldSignReturnAddressEverywhere(MF)) {
+ BuildMI(MBB, MBBI, DL, TII->get(AArch64::PAUTH_PROLOGUE))
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
if (NeedsWinCFI)
HasWinCFI = true; // AArch64PointerAuth pass will insert SEH_PACSignLR
}
+ if (MFnI.needsShadowCallStackPrologueEpilogue(MF))
+ emitShadowCallStackPrologue(*TII, MF, MBB, MBBI, DL, NeedsWinCFI,
+ MFnI.needsDwarfUnwindInfo(MF));
+
if (EmitCFI && MFnI.isMTETagged()) {
BuildMI(MBB, MBBI, DL, TII->get(AArch64::EMITMTETAGGED))
.setMIFlag(MachineInstr::FrameSetup);
@@ -2413,17 +2460,21 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock::iterator EpilogStartI = MBB.end();
auto FinishingTouches = make_scope_exit([&]() {
- if (AFI->shouldSignReturnAddress(MF)) {
- BuildMI(MBB, MBB.getFirstTerminator(), DL,
- TII->get(AArch64::PAUTH_EPILOGUE))
- .setMIFlag(MachineInstr::FrameDestroy);
- if (NeedsWinCFI)
- HasWinCFI = true; // AArch64PointerAuth pass will insert SEH_PACSignLR
- }
if (AFI->needsShadowCallStackPrologueEpilogue(MF))
emitShadowCallStackEpilogue(*TII, MF, MBB, MBB.getFirstTerminator(), DL);
if (EmitCFI)
emitCalleeSavedGPRRestores(MBB, MBB.getFirstTerminator());
+ if (AFI->shouldSignReturnAddress(MF)) {
+ // If pac-ret+leaf is in effect, PAUTH_EPILOGUE pseudo instructions
+ // are inserted by emitPacRetPlusLeafHardening().
+ if (!shouldSignReturnAddressEverywhere(MF)) {
+ BuildMI(MBB, MBB.getFirstTerminator(), DL,
+ TII->get(AArch64::PAUTH_EPILOGUE))
+ .setMIFlag(MachineInstr::FrameDestroy);
+ }
+ if (NeedsWinCFI)
+ HasWinCFI = true; // AArch64PointerAuth pass will insert SEH_PACSignLR
+ }
if (HasWinCFI) {
BuildMI(MBB, MBB.getFirstTerminator(), DL,
TII->get(AArch64::SEH_EpilogEnd))
@@ -5230,6 +5281,13 @@ void AArch64FrameLowering::processFunctionBeforeFrameIndicesReplaced(
else if (StackTaggingMergeSetTag)
II = tryMergeAdjacentSTG(II, this, RS);
}
+
+ // By the time this method is called, most of the prologue/epilogue code is
+ // already emitted, whether its location was affected by the shrink-wrapping
+ // optimization or not.
+ if (!MF.getFunction().hasFnAttribute(Attribute::Naked) &&
+ shouldSignReturnAddressEverywhere(MF))
+ emitPacRetPlusLeafHardening(MF);
}
/// For Win64 AArch64 EH, the offset to the Unwind object is from the SP
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
index ced69c9cd3699..555a93359c274 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
@@ -36,6 +36,14 @@ class AArch64FrameLowering : public TargetFrameLowering {
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ /// Harden the entire function with pac-ret.
+ ///
+ /// If pac-ret+leaf is requested, we want to harden as much code as possible.
+ /// This function inserts pac-ret hardening at the points where prologue and
+ /// epilogue are traditionally inserted, ignoring possible shrink-wrapping
+ /// optimization.
+ void emitPacRetPlusLeafHardening(MachineFunction &MF) const;
+
bool enableCFIFixup(const MachineFunction &MF) const override;
bool enableFullCFIFixup(const MachineFunction &MF) const override;
diff --git a/llvm/test/CodeGen/AArch64/sign-return-address-cfi-negate-ra-state.ll b/llvm/test/CodeGen/AArch64/sign-return-address-cfi-negate-ra-state.ll
index 4d4b7c215b978..bf70bf30534ec 100644
--- a/llvm/test/CodeGen/AArch64/sign-return-address-cfi-negate-ra-state.ll
+++ b/llvm/test/CodeGen/AArch64/sign-return-address-cfi-negate-ra-state.ll
@@ -74,9 +74,9 @@ define hidden noundef i32 @baz_async(i32 noundef %a) #0 uwtable(async) {
; CHECK-V8A-NEXT: bl _Z3bari
; CHECK-V8A-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-V8A-NEXT: .cfi_def_cfa_offset 0
+; CHECK-V8A-NEXT: .cfi_restore w30
; CHECK-V8A-NEXT: hint #29
; CHECK-V8A-NEXT: .cfi_negate_ra_state
-; CHECK-V8A-NEXT: .cfi_restore w30
; CHECK-V8A-NEXT: b _Z3bari
; CHECK-V8A-NEXT: .LBB1_2: // %if.else
; CHECK-V8A-NEXT: .cfi_restore_state
@@ -84,9 +84,9 @@ define hidden noundef i32 @baz_async(i32 noundef %a) #0 uwtable(async) {
; CHECK-V8A-NEXT: add w0, w0, #1
; CHECK-V8A-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-V8A-NEXT: .cfi_def_cfa_offset 0
+; CHECK-V8A-NEXT: .cfi_restore w30
; CHECK-V8A-NEXT: hint #29
; CHECK-V8A-NEXT: .cfi_negate_ra_state
-; CHECK-V8A-NEXT: .cfi_restore w30
; CHECK-V8A-NEXT: ret
;
; CHECK-V83A-LABEL: baz_async:
@@ -103,9 +103,9 @@ define hidden noundef i32 @baz_async(i32 noundef %a) #0 uwtable(async) {
; CHECK-V83A-NEXT: bl _Z3bari
; CHECK-V83A-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-V83A-NEXT: .cfi_def_cfa_offset 0
+; CHECK-V83A-NEXT: .cfi_restore w30
; CHECK-V83A-NEXT: autiasp
; CHECK-V83A-NEXT: .cfi_negate_ra_state
-; CHECK-V83A-NEXT: .cfi_restore w30
; CHECK-V83A-NEXT: b _Z3bari
; CHECK-V83A-NEXT: .LBB1_2: // %if.else
; CHECK-V83A-NEXT: .cfi_restore_state
diff --git a/llvm/test/CodeGen/AArch64/sign-return-address-pauth-lr.ll b/llvm/test/CodeGen/AArch64/sign-return-address-pauth-lr.ll
index f37f12246e24a..85aa6846cd800 100644
--- a/llvm/test/CodeGen/AArch64/sign-return-address-pauth-lr.ll
+++ b/llvm/test/CodeGen/AArch64/sign-return-address-pauth-lr.ll
@@ -256,40 +256,40 @@ define i32 @non_leaf_sign_non_leaf(i32 %x) "branch-protection-pauth-lr" "sign-re
define i32 @non_leaf_scs(i32 %x) "branch-protection-pauth-lr" "sign-return-address"="non-leaf" shadowcallstack "target-features"="+v8.3a,+reserve-x18" {
; CHECK-LABEL: non_leaf_scs:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [x18], #8
-; CHECK-NEXT: .cfi_escape 0x16, 0x12, 0x02, 0x82, 0x78 //
; CHECK-NEXT: hint #39
; CHECK-NEXT: .cfi_negate_ra_state_with_pc
; CHECK-NEXT: .Ltmp4:
; CHECK-NEXT: paciasp
+; CHECK-NEXT: str x30, [x18], #8
+; CHECK-NEXT: .cfi_escape 0x16, 0x12, 0x02, 0x82, 0x78 //
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: bl foo
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ldr x30, [x18, #-8]!
; CHECK-NEXT: adrp x16, .Ltmp4
; CHECK-NEXT: add x16, x16, :lo12:.Ltmp4
; CHECK-NEXT: hint #39
; CHECK-NEXT: autiasp
-; CHECK-NEXT: ldr x30, [x18, #-8]!
; CHECK-NEXT: ret
;
; PAUTHLR-LABEL: non_leaf_scs:
; PAUTHLR: // %bb.0:
-; PAUTHLR-NEXT: str x30, [x18], #8
-; PAUTHLR-NEXT: .cfi_escape 0x16, 0x12, 0x02, 0x82, 0x78 //
; PAUTHLR-NEXT: .cfi_negate_ra_state_with_pc
; PAUTHLR-NEXT: .Ltmp4:
; PAUTHLR-NEXT: paciasppc
+; PAUTHLR-NEXT: str x30, [x18], #8
+; PAUTHLR-NEXT: .cfi_escape 0x16, 0x12, 0x02, 0x82, 0x78 //
; PAUTHLR-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; PAUTHLR-NEXT: .cfi_def_cfa_offset 16
; PAUTHLR-NEXT: .cfi_offset w30, -16
; PAUTHLR-NEXT: bl foo
; PAUTHLR-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; PAUTHLR-NEXT: ldr x30, [x18, #-8]!
; PAUTHLR-NEXT: adrp x16, .Ltmp4
; PAUTHLR-NEXT: add x16, x16, :lo12:.Ltmp4
; PAUTHLR-NEXT: autiasppc .Ltmp4
-; PAUTHLR-NEXT: ldr x30, [x18, #-8]!
; PAUTHLR-NEXT: ret
%call = call i32 @foo(i32 %x)
ret i32 %call
diff --git a/llvm/test/CodeGen/AArch64/sign-return-address-tailcall.ll b/llvm/test/CodeGen/AArch64/sign-return-address-tailcall.ll
index 032d3cc05961f..90e48fe0ed042 100644
--- a/llvm/test/CodeGen/AArch64/sign-return-address-tailcall.ll
+++ b/llvm/test/CodeGen/AArch64/sign-return-address-tailcall.ll
@@ -156,8 +156,8 @@ define i32 @tailcall_two_branches(i1 %0) "sign-return-address"="all" {
; COMMON: str x30, [sp, #-16]!
; COMMON: bl callee2
; COMMON: ldr x30, [sp], #16
-; COMMON-NEXT: [[AUTIASP]]
; COMMON-NEXT: .[[ELSE]]:
+; COMMON-NEXT: [[AUTIASP]]
; LDR-NEXT: ldr w16, [x30]
;
diff --git a/llvm/test/CodeGen/AArch64/sign-return-address.ll b/llvm/test/CodeGen/AArch64/sign-return-address.ll
index dafe0d71ceb5f..b0ab4775cb388 100644
--- a/llvm/test/CodeGen/AArch64/sign-return-address.ll
+++ b/llvm/test/CodeGen/AArch64/sign-return-address.ll
@@ -133,22 +133,131 @@ define i32 @non_leaf_sign_non_leaf(i32 %x) "sign-return-address"="non-leaf" {
define i32 @non_leaf_scs(i32 %x) "sign-return-address"="non-leaf" shadowcallstack "target-features"="+v8.3a,+reserve-x18" {
; CHECK-LABEL: non_leaf_scs:
; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [x18], #8
-; CHECK-NEXT: .cfi_escape 0x16, 0x12, 0x02, 0x82, 0x78 //
; CHECK-NEXT: paciasp
; CHECK-NEXT: .cfi_negate_ra_state
+; CHECK-NEXT: str x30, [x18], #8
+; CHECK-NEXT: .cfi_escape 0x16, 0x12, 0x02, 0x82, 0x78 //
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: bl foo
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT: autiasp
; CHECK-NEXT: ldr x30, [x18, #-8]!
+; CHECK-NEXT: autiasp
; CHECK-NEXT: ret
%call = call i32 @foo(i32 %x)
ret i32 %call
}
+ at var = dso_local global i64 0
+
+; By default, pac-ret hardening respects shrink-wrapping optimization.
+define void @shrink_wrap_sign_non_leaf(i32 %x, i32 %cond) "sign-return-address"="non-leaf" uwtable(async) {
+; COMPAT-LABEL: shrink_wrap_sign_non_leaf:
+; COMPAT: // %bb.0: // %entry
+; COMPAT-NEXT: cbnz w1, .LBB8_2
+; COMPAT-NEXT: // %bb.1: // %if.then
+; COMPAT-NEXT: hint #25
+; COMPAT-NEXT: .cfi_negate_ra_state
+; COMPAT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; COMPAT-NEXT: .cfi_def_cfa_offset 16
+; COMPAT-NEXT: .cfi_offset w30, -16
+; COMPAT-NEXT: bl foo
+; COMPAT-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; COMPAT-NEXT: .cfi_def_cfa_offset 0
+; COMPAT-NEXT: .cfi_restore w30
+; COMPAT-NEXT: hint #29
+; COMPAT-NEXT: .cfi_negate_ra_state
+; COMPAT-NEXT: .LBB8_2: // %exit
+; COMPAT-NEXT: adrp x8, var
+; COMPAT-NEXT: mov w9, #42 // =0x2a
+; COMPAT-NEXT: str x9, [x8, :lo12:var]
+; COMPAT-NEXT: ret
+;
+; V83A-LABEL: shrink_wrap_sign_non_leaf:
+; V83A: // %bb.0: // %entry
+; V83A-NEXT: cbnz w1, .LBB8_2
+; V83A-NEXT: // %bb.1: // %if.then
+; V83A-NEXT: paciasp
+; V83A-NEXT: .cfi_negate_ra_state
+; V83A-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; V83A-NEXT: .cfi_def_cfa_offset 16
+; V83A-NEXT: .cfi_offset w30, -16
+; V83A-NEXT: bl foo
+; V83A-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; V83A-NEXT: .cfi_def_cfa_offset 0
+; V83A-NEXT: .cfi_restore w30
+; V83A-NEXT: autiasp
+; V83A-NEXT: .cfi_negate_ra_state
+; V83A-NEXT: .LBB8_2: // %exit
+; V83A-NEXT: adrp x8, var
+; V83A-NEXT: mov w9, #42 // =0x2a
+; V83A-NEXT: str x9, [x8, :lo12:var]
+; V83A-NEXT: ret
+entry:
+ %cond.bool = icmp eq i32 %cond, 0
+ br i1 %cond.bool, label %if.then, label %exit
+if.then:
+ %call = call i32 @foo(i32 %x)
+ br label %exit
+exit:
+ store i64 42, ptr @var
+ ret void
+}
+
+; When "+leaf" is specified to harden everything, pac-ret hardens the entire
+; function, ignoring shrink-wrapping.
+define void @shrink_wrap_sign_all(i32 %x, i32 %cond) "sign-return-address"="all" uwtable(async) {
+; COMPAT-LABEL: shrink_wrap_sign_all:
+; COMPAT: // %bb.0: // %entry
+; COMPAT-NEXT: hint #25
+; COMPAT-NEXT: .cfi_negate_ra_state
+; COMPAT-NEXT: cbnz w1, .LBB9_2
+; COMPAT-NEXT: // %bb.1: // %if.then
+; COMPAT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; COMPAT-NEXT: .cfi_def_cfa_offset 16
+; COMPAT-NEXT: .cfi_offset w30, -16
+; COMPAT-NEXT: bl foo
+; COMPAT-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; COMPAT-NEXT: .cfi_def_cfa_offset 0
+; COMPAT-NEXT: .cfi_restore w30
+; COMPAT-NEXT: .LBB9_2: // %exit
+; COMPAT-NEXT: adrp x8, var
+; COMPAT-NEXT: mov w9, #42 // =0x2a
+; COMPAT-NEXT: str x9, [x8, :lo12:var]
+; COMPAT-NEXT: hint #29
+; COMPAT-NEXT: .cfi_negate_ra_state
+; COMPAT-NEXT: ret
+;
+; V83A-LABEL: shrink_wrap_sign_all:
+; V83A: // %bb.0: // %entry
+; V83A-NEXT: paciasp
+; V83A-NEXT: .cfi_negate_ra_state
+; V83A-NEXT: cbnz w1, .LBB9_2
+; V83A-NEXT: // %bb.1: // %if.then
+; V83A-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; V83A-NEXT: .cfi_def_cfa_offset 16
+; V83A-NEXT: .cfi_offset w30, -16
+; V83A-NEXT: bl foo
+; V83A-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; V83A-NEXT: .cfi_def_cfa_offset 0
+; V83A-NEXT: .cfi_restore w30
+; V83A-NEXT: .LBB9_2: // %exit
+; V83A-NEXT: adrp x8, var
+; V83A-NEXT: mov w9, #42 // =0x2a
+; V83A-NEXT: str x9, [x8, :lo12:var]
+; V83A-NEXT: retaa
+entry:
+ %cond.bool = icmp eq i32 %cond, 0
+ br i1 %cond.bool, label %if.then, label %exit
+if.then:
+ %call = call i32 @foo(i32 %x)
+ br label %exit
+exit:
+ store i64 42, ptr @var
+ ret void
+}
+
define i32 @leaf_sign_all_v83(i32 %x) "sign-return-address"="all" "target-features"="+v8.3a" {
; CHECK-LABEL: leaf_sign_all_v83:
; CHECK: // %bb.0:
More information about the llvm-commits
mailing list