[llvm] 84f888c - [ARM] don't emit constant pool for Thumb1 XO/stack guard combo
Ties Stuij via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 19 05:52:44 PDT 2023
Author: Ties Stuij
Date: 2023-07-19T13:51:43+01:00
New Revision: 84f888ca82a1b5924863bfd9769d0ef4552a1372
URL: https://github.com/llvm/llvm-project/commit/84f888ca82a1b5924863bfd9769d0ef4552a1372
DIFF: https://github.com/llvm/llvm-project/commit/84f888ca82a1b5924863bfd9769d0ef4552a1372.diff
LOG: [ARM] don't emit constant pool for Thumb1 XO/stack guard combo
Currently for armv6-m and armv8-m.baseline, we emit constant pool code when we
use execute-only (XO) in combination with stack guards.
XO is a new feature for armv6-m, and this patch is part of a series of patches
that substitutes constant pool generation with the tMOVi32imm equivalent.
However XO for armv8-m.baseline has been available for about 6 years, and so
for armv8-m.baseline this is a bugfix.
Reviewed By: simonwallis2, olista01
Differential Revision: https://reviews.llvm.org/D155170
Added:
llvm/test/CodeGen/Thumb/stack-guard-xo.ll
Modified:
llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
index 48eaa80ebc6501..e2f3fad2007904 100644
--- a/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -136,14 +136,21 @@ void Thumb1InstrInfo::expandLoadStackGuard(
MachineBasicBlock::iterator MI) const {
MachineFunction &MF = *MI->getParent()->getParent();
const TargetMachine &TM = MF.getTarget();
+ const ARMSubtarget &ST = MF.getSubtarget<ARMSubtarget>();
assert(MF.getFunction().getParent()->getStackProtectorGuard() != "tls" &&
"TLS stack protector not supported for Thumb1 targets");
+ unsigned Instr;
if (TM.isPositionIndependent())
- expandLoadStackGuardBase(MI, ARM::tLDRLIT_ga_pcrel, ARM::tLDRi);
+ Instr = ARM::tLDRLIT_ga_pcrel;
+ else if (ST.genExecuteOnly() && ST.hasV8MBaselineOps())
+ Instr = ARM::t2MOVi32imm;
+ else if (ST.genExecuteOnly())
+ Instr = ARM::tMOVi32imm;
else
- expandLoadStackGuardBase(MI, ARM::tLDRLIT_ga_abs, ARM::tLDRi);
+ Instr = ARM::tLDRLIT_ga_abs;
+ expandLoadStackGuardBase(MI, Instr, ARM::tLDRi);
}
bool Thumb1InstrInfo::canCopyGluedNodeDuringSchedule(SDNode *N) const {
diff --git a/llvm/test/CodeGen/Thumb/stack-guard-xo.ll b/llvm/test/CodeGen/Thumb/stack-guard-xo.ll
new file mode 100644
index 00000000000000..1c2b5bf7145e7f
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb/stack-guard-xo.ll
@@ -0,0 +1,100 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc < %s -mtriple=thumbv6m | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv6m -mattr=+execute-only | FileCheck %s -check-prefix=V6M
+; RUN: llc < %s -mtriple=thumbv8m.base -mattr=+execute-only | FileCheck %s -check-prefix=V8MBASE
+
+define dso_local i32 @main() #0 {
+; CHECK-LABEL: main:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: sub sp, #24
+; CHECK-NEXT: movs r0, #0
+; CHECK-NEXT: str r0, [sp, #4]
+; CHECK-NEXT: ldr r0, .LCPI0_0
+; CHECK-NEXT: ldr r0, [r0]
+; CHECK-NEXT: str r0, [sp, #20]
+; CHECK-NEXT: add r0, sp, #8
+; CHECK-NEXT: ldrb r0, [r0]
+; CHECK-NEXT: ldr r1, [sp, #20]
+; CHECK-NEXT: ldr r2, .LCPI0_0
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: cmp r2, r1
+; CHECK-NEXT: bne .LBB0_2
+; CHECK-NEXT: @ %bb.1: @ %entry
+; CHECK-NEXT: add sp, #24
+; CHECK-NEXT: pop {r7, pc}
+; CHECK-NEXT: .LBB0_2: @ %entry
+; CHECK-NEXT: bl __stack_chk_fail
+; CHECK-NEXT: .p2align 2
+; CHECK-NEXT: @ %bb.3:
+; CHECK-NEXT: .LCPI0_0:
+; CHECK-NEXT: .long __stack_chk_guard
+;
+; V6M-LABEL: main:
+; V6M: @ %bb.0: @ %entry
+; V6M-NEXT: push {r7, lr}
+; V6M-NEXT: sub sp, #24
+; V6M-NEXT: movs r0, #0
+; V6M-NEXT: str r0, [sp, #4]
+; V6M-NEXT: movs r0, :upper8_15:__stack_chk_guard
+; V6M-NEXT: lsls r0, r0, #8
+; V6M-NEXT: adds r0, :upper0_7:__stack_chk_guard
+; V6M-NEXT: lsls r0, r0, #8
+; V6M-NEXT: adds r0, :lower8_15:__stack_chk_guard
+; V6M-NEXT: lsls r0, r0, #8
+; V6M-NEXT: adds r0, :lower0_7:__stack_chk_guard
+; V6M-NEXT: ldr r0, [r0]
+; V6M-NEXT: str r0, [sp, #20]
+; V6M-NEXT: add r0, sp, #8
+; V6M-NEXT: ldrb r0, [r0]
+; V6M-NEXT: ldr r1, [sp, #20]
+; V6M-NEXT: movs r2, :upper8_15:__stack_chk_guard
+; V6M-NEXT: lsls r2, r2, #8
+; V6M-NEXT: adds r2, :upper0_7:__stack_chk_guard
+; V6M-NEXT: lsls r2, r2, #8
+; V6M-NEXT: adds r2, :lower8_15:__stack_chk_guard
+; V6M-NEXT: lsls r2, r2, #8
+; V6M-NEXT: adds r2, :lower0_7:__stack_chk_guard
+; V6M-NEXT: ldr r2, [r2]
+; V6M-NEXT: cmp r2, r1
+; V6M-NEXT: bne .LBB0_2
+; V6M-NEXT: @ %bb.1: @ %entry
+; V6M-NEXT: add sp, #24
+; V6M-NEXT: pop {r7, pc}
+; V6M-NEXT: .LBB0_2: @ %entry
+; V6M-NEXT: bl __stack_chk_fail
+;
+; V8MBASE-LABEL: main:
+; V8MBASE: @ %bb.0: @ %entry
+; V8MBASE-NEXT: push {r7, lr}
+; V8MBASE-NEXT: sub sp, #24
+; V8MBASE-NEXT: movs r0, #0
+; V8MBASE-NEXT: str r0, [sp, #4]
+; V8MBASE-NEXT: movw r0, :lower16:__stack_chk_guard
+; V8MBASE-NEXT: movt r0, :upper16:__stack_chk_guard
+; V8MBASE-NEXT: ldr r0, [r0]
+; V8MBASE-NEXT: str r0, [sp, #20]
+; V8MBASE-NEXT: add r0, sp, #8
+; V8MBASE-NEXT: ldrb r0, [r0]
+; V8MBASE-NEXT: ldr r1, [sp, #20]
+; V8MBASE-NEXT: movw r2, :lower16:__stack_chk_guard
+; V8MBASE-NEXT: movt r2, :upper16:__stack_chk_guard
+; V8MBASE-NEXT: ldr r2, [r2]
+; V8MBASE-NEXT: cmp r2, r1
+; V8MBASE-NEXT: bne .LBB0_2
+; V8MBASE-NEXT: @ %bb.1: @ %entry
+; V8MBASE-NEXT: add sp, #24
+; V8MBASE-NEXT: pop {r7, pc}
+; V8MBASE-NEXT: .LBB0_2: @ %entry
+; V8MBASE-NEXT: bl __stack_chk_fail
+entry:
+ %retval = alloca i32, align 4
+ %buffer = alloca [9 x i8], align 1
+ store i32 0, ptr %retval, align 4
+ %arrayidx = getelementptr inbounds [9 x i8], ptr %buffer, i32 0, i32 0
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %0 to i32
+ ret i32 %conv
+}
+
+attributes #0 = { ssp "stack-protector-buffer-size"="8" }
More information about the llvm-commits
mailing list