[llvm] 3f0cc96 - [AArch64] SLSHardening: compute correct thunk name for X29.
Kristof Beyls via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 17 22:37:16 PDT 2020
Author: Kristof Beyls
Date: 2020-06-18T06:36:49+01:00
New Revision: 3f0cc96a9694e499968544ebda6903982eaeb8a4
URL: https://github.com/llvm/llvm-project/commit/3f0cc96a9694e499968544ebda6903982eaeb8a4
DIFF: https://github.com/llvm/llvm-project/commit/3f0cc96a9694e499968544ebda6903982eaeb8a4.diff
LOG: [AArch64] SLSHardening: compute correct thunk name for X29.
The enum values for AArch64 registers are not all consecutive.
Therefore, the computation
"__llvm_slsblr_thunk_x" + utostr(Reg - AArch64::X0)
is not always correct. utostr(Reg - AArch64::X0) will not generate the
expected string for the registers that do not have consecutive values in
the enum.
This happened to work for most registers, but does not for AArch64::FP
(i.e. register X29).
This can get triggered when the X29 is not used as a frame pointer.
Differential Revision: https://reviews.llvm.org/D81997
Added:
Modified:
llvm/lib/Target/AArch64/AArch64SLSHardening.cpp
llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp b/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp
index 48456397a28b..85829b6a675c 100644
--- a/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp
+++ b/llvm/lib/Target/AArch64/AArch64SLSHardening.cpp
@@ -142,35 +142,45 @@ bool AArch64SLSHardening::hardenReturnsAndBRs(MachineBasicBlock &MBB) const {
static const char SLSBLRNamePrefix[] = "__llvm_slsblr_thunk_";
-static std::array<const char *, 29> SLSBLRThunkNames{{
- "__llvm_slsblr_thunk_x0", "__llvm_slsblr_thunk_x1",
- "__llvm_slsblr_thunk_x2", "__llvm_slsblr_thunk_x3",
- "__llvm_slsblr_thunk_x4", "__llvm_slsblr_thunk_x5",
- "__llvm_slsblr_thunk_x6", "__llvm_slsblr_thunk_x7",
- "__llvm_slsblr_thunk_x8", "__llvm_slsblr_thunk_x9",
- "__llvm_slsblr_thunk_x10", "__llvm_slsblr_thunk_x11",
- "__llvm_slsblr_thunk_x12", "__llvm_slsblr_thunk_x13",
- "__llvm_slsblr_thunk_x14", "__llvm_slsblr_thunk_x15",
- // X16 and X17 are deliberately missing, as the mitigation requires those
- // register to not be used in BLR. See comment in ConvertBLRToBL for more
- // details.
- "__llvm_slsblr_thunk_x18", "__llvm_slsblr_thunk_x19",
- "__llvm_slsblr_thunk_x20", "__llvm_slsblr_thunk_x21",
- "__llvm_slsblr_thunk_x22", "__llvm_slsblr_thunk_x23",
- "__llvm_slsblr_thunk_x24", "__llvm_slsblr_thunk_x25",
- "__llvm_slsblr_thunk_x26", "__llvm_slsblr_thunk_x27",
- "__llvm_slsblr_thunk_x28", "__llvm_slsblr_thunk_x29",
- // X30 is deliberately missing, for similar reasons as X16 and X17 are
- // missing.
- "__llvm_slsblr_thunk_x31",
-}};
-static std::array<unsigned, 29> SLSBLRThunkRegs{{
- AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, AArch64::X4,
- AArch64::X5, AArch64::X6, AArch64::X7, AArch64::X8, AArch64::X9,
- AArch64::X10, AArch64::X11, AArch64::X12, AArch64::X13, AArch64::X14,
- AArch64::X15, AArch64::X18, AArch64::X19, AArch64::X20, AArch64::X21,
- AArch64::X22, AArch64::X23, AArch64::X24, AArch64::X25, AArch64::X26,
- AArch64::X27, AArch64::X28, AArch64::FP, AArch64::XZR}};
+static const struct ThunkNameAndReg {
+ const char* Name;
+ Register Reg;
+} SLSBLRThunks[] = {
+ { "__llvm_slsblr_thunk_x0", AArch64::X0},
+ { "__llvm_slsblr_thunk_x1", AArch64::X1},
+ { "__llvm_slsblr_thunk_x2", AArch64::X2},
+ { "__llvm_slsblr_thunk_x3", AArch64::X3},
+ { "__llvm_slsblr_thunk_x4", AArch64::X4},
+ { "__llvm_slsblr_thunk_x5", AArch64::X5},
+ { "__llvm_slsblr_thunk_x6", AArch64::X6},
+ { "__llvm_slsblr_thunk_x7", AArch64::X7},
+ { "__llvm_slsblr_thunk_x8", AArch64::X8},
+ { "__llvm_slsblr_thunk_x9", AArch64::X9},
+ { "__llvm_slsblr_thunk_x10", AArch64::X10},
+ { "__llvm_slsblr_thunk_x11", AArch64::X11},
+ { "__llvm_slsblr_thunk_x12", AArch64::X12},
+ { "__llvm_slsblr_thunk_x13", AArch64::X13},
+ { "__llvm_slsblr_thunk_x14", AArch64::X14},
+ { "__llvm_slsblr_thunk_x15", AArch64::X15},
+ // X16 and X17 are deliberately missing, as the mitigation requires those
+ // register to not be used in BLR. See comment in ConvertBLRToBL for more
+ // details.
+ { "__llvm_slsblr_thunk_x18", AArch64::X18},
+ { "__llvm_slsblr_thunk_x19", AArch64::X19},
+ { "__llvm_slsblr_thunk_x20", AArch64::X20},
+ { "__llvm_slsblr_thunk_x21", AArch64::X21},
+ { "__llvm_slsblr_thunk_x22", AArch64::X22},
+ { "__llvm_slsblr_thunk_x23", AArch64::X23},
+ { "__llvm_slsblr_thunk_x24", AArch64::X24},
+ { "__llvm_slsblr_thunk_x25", AArch64::X25},
+ { "__llvm_slsblr_thunk_x26", AArch64::X26},
+ { "__llvm_slsblr_thunk_x27", AArch64::X27},
+ { "__llvm_slsblr_thunk_x28", AArch64::X28},
+ { "__llvm_slsblr_thunk_x29", AArch64::FP},
+ // X30 is deliberately missing, for similar reasons as X16 and X17 are
+ // missing.
+ { "__llvm_slsblr_thunk_x31", AArch64::XZR},
+};
namespace {
struct SLSBLRThunkInserter : ThunkInserter<SLSBLRThunkInserter> {
@@ -189,22 +199,18 @@ void SLSBLRThunkInserter::insertThunks(MachineModuleInfo &MMI) {
// FIXME: It probably would be possible to filter which thunks to produce
// based on which registers are actually used in BLR instructions in this
// function. But would that be a worthwhile optimization?
- for (StringRef Name : SLSBLRThunkNames)
- createThunkFunction(MMI, Name);
+ for (auto T : SLSBLRThunks)
+ createThunkFunction(MMI, T.Name);
}
void SLSBLRThunkInserter::populateThunk(MachineFunction &MF) {
// FIXME: How to better communicate Register number, rather than through
// name and lookup table?
assert(MF.getName().startswith(getThunkPrefix()));
- int Index = -1;
- for (int i = 0; i < (int)SLSBLRThunkNames.size(); ++i)
- if (MF.getName() == SLSBLRThunkNames[i]) {
- Index = i;
- break;
- }
- assert(Index != -1);
- Register ThunkReg = SLSBLRThunkRegs[Index];
+ auto ThunkIt = llvm::find_if(
+ SLSBLRThunks, [&MF](auto T) { return T.Name == MF.getName(); });
+ assert(ThunkIt != std::end(SLSBLRThunks));
+ Register ThunkReg = ThunkIt->Reg;
const TargetInstrInfo *TII =
MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
@@ -317,8 +323,10 @@ AArch64SLSHardening::ConvertBLRToBL(MachineBasicBlock &MBB,
// for the future when LLVM can start producing BLRA* instructions.
MachineFunction &MF = *MBBI->getMF();
MCContext &Context = MBB.getParent()->getContext();
- MCSymbol *Sym = Context.getOrCreateSymbol("__llvm_slsblr_thunk_x" +
- utostr(Reg - AArch64::X0));
+ auto ThunkIt =
+ llvm::find_if(SLSBLRThunks, [Reg](auto T) { return T.Reg == Reg; });
+ assert (ThunkIt != std::end(SLSBLRThunks));
+ MCSymbol *Sym = Context.getOrCreateSymbol(ThunkIt->Name);
MachineInstr *BL = BuildMI(MBB, MBBI, DL, TII->get(BLOpcode)).addSym(Sym);
diff --git a/llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll b/llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll
index 15a02148ec5f..7de1611a44ef 100644
--- a/llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll
+++ b/llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mattr=harden-sls-retbr,harden-sls-blr -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,HARDEN,ISBDSB
-; RUN: llc -mattr=harden-sls-retbr,harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,HARDEN,SB
+; RUN: llc -mattr=harden-sls-retbr,harden-sls-blr -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,HARDEN,ISBDSB,ISBDSBDAGISEL
+; RUN: llc -mattr=harden-sls-retbr,harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,HARDEN,SB,SBDAGISEL
; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,NOHARDEN
@@ -166,6 +166,41 @@ entry:
; CHECK: .Lfunc_end
}
+; Verify that the transformation works correctly for x29 when it is not
+; reserved to be used as a frame pointer.
+; Since this is sensitive to register allocation choices, only check this with
+; DAGIsel to avoid too much accidental breaking of this test that is a bit
+; brittle.
+define i64 @check_x29(i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp,
+ i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp2,
+ i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp3)
+"target-features"="+neon,+reserve-x10,+reserve-x11,+reserve-x12,+reserve-x13,+reserve-x14,+reserve-x15,+reserve-x18,+reserve-x20,+reserve-x21,+reserve-x22,+reserve-x23,+reserve-x24,+reserve-x25,+reserve-x26,+reserve-x27,+reserve-x28,+reserve-x9"
+"frame-pointer"="none"
+{
+entry:
+; CHECK-LABEL: check_x29:
+ %0 = load i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp, align 8
+ %1 = bitcast i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp2 to i8**
+ %2 = load i8*, i8** %1, align 8
+ %3 = load i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp2, align 8
+ %4 = bitcast i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp3 to i8**
+ %5 = load i8*, i8** %4, align 8
+ %6 = load i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp3, align 8
+ %7 = bitcast i64 (i8*, i8*, i64, i64, i64, i64, i64, i64)** %fp to i8**
+ %8 = load i8*, i8** %7, align 8
+ %call = call i64 %0(i8* %2, i8* %5, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
+ %call1 = call i64 %3(i8* %2, i8* %5, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
+; NOHARDEN: blr x29
+; ISBDSBDAGISEL: bl __llvm_slsblr_thunk_x29
+; SBDAGISEL: bl __llvm_slsblr_thunk_x29
+; CHECK
+ %call2 = call i64 %6(i8* %2, i8* %8, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
+ %add = add nsw i64 %call1, %call
+ %add1 = add nsw i64 %call2, %add
+ ret i64 %add1
+; CHECK: .Lfunc_end
+}
+
; HARDEN-label: __llvm_slsblr_thunk_x0:
; HARDEN: br x0
; ISBDSB-NEXT: dsb sy
More information about the llvm-commits
mailing list