[llvm] r366361 - Basic MTE stack tagging instrumentation.
Evgeniy Stepanov via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 17 12:24:12 PDT 2019
Author: eugenis
Date: Wed Jul 17 12:24:12 2019
New Revision: 366361
URL: http://llvm.org/viewvc/llvm-project?rev=366361&view=rev
Log:
Basic MTE stack tagging instrumentation.
Summary:
Use MTE intrinsics to tag stack variables in functions with
sanitize_memtag attribute.
Reviewers: pcc, vitalybuka, hctim, ostannard
Subscribers: srhines, mgorny, javed.absar, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64173
Added:
llvm/trunk/lib/Target/AArch64/AArch64StackTagging.cpp
llvm/trunk/test/CodeGen/AArch64/stack-tagging-dbg.ll
llvm/trunk/test/CodeGen/AArch64/stack-tagging.ll
Modified:
llvm/trunk/lib/Target/AArch64/AArch64.h
llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp
llvm/trunk/lib/Target/AArch64/CMakeLists.txt
llvm/trunk/test/CodeGen/AArch64/O0-pipeline.ll
llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll
Modified: llvm/trunk/lib/Target/AArch64/AArch64.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64.h?rev=366361&r1=366360&r2=366361&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64.h Wed Jul 17 12:24:12 2019
@@ -56,6 +56,7 @@ InstructionSelector *
createAArch64InstructionSelector(const AArch64TargetMachine &,
AArch64Subtarget &, AArch64RegisterBankInfo &);
FunctionPass *createAArch64PreLegalizeCombiner();
+FunctionPass *createAArch64StackTaggingPass();
void initializeAArch64A53Fix835769Pass(PassRegistry&);
void initializeAArch64A57FPLoadBalancingPass(PassRegistry&);
@@ -78,6 +79,7 @@ void initializeAArch64StorePairSuppressP
void initializeFalkorHWPFFixPass(PassRegistry&);
void initializeFalkorMarkStridedAccessesLegacyPass(PassRegistry&);
void initializeLDTLSCleanupPass(PassRegistry&);
+void initializeAArch64StackTaggingPass(PassRegistry&);
} // end namespace llvm
#endif
Added: llvm/trunk/lib/Target/AArch64/AArch64StackTagging.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64StackTagging.cpp?rev=366361&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64StackTagging.cpp (added)
+++ llvm/trunk/lib/Target/AArch64/AArch64StackTagging.cpp Wed Jul 17 12:24:12 2019
@@ -0,0 +1,345 @@
+//===- AArch64StackTagging.cpp - Stack tagging in IR --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64.h"
+#include "AArch64InstrInfo.h"
+#include "AArch64Subtarget.h"
+#include "AArch64TargetMachine.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/LiveRegUnits.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include <cassert>
+#include <iterator>
+#include <utility>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "stack-tagging"
+
+static constexpr unsigned kTagGranuleSize = 16;
+
+namespace {
+class AArch64StackTagging : public FunctionPass {
+ struct AllocaInfo {
+ AllocaInst *AI;
+ SmallVector<IntrinsicInst *, 2> LifetimeStart;
+ SmallVector<IntrinsicInst *, 2> LifetimeEnd;
+ SmallVector<DbgVariableIntrinsic *, 2> DbgVariableIntrinsics;
+ int Tag; // -1 for non-tagged allocations
+ };
+
+public:
+ static char ID; // Pass ID, replacement for typeid
+
+ AArch64StackTagging() : FunctionPass(ID) {
+ initializeAArch64StackTaggingPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool isInterestingAlloca(const AllocaInst &AI);
+ void alignAndPadAlloca(AllocaInfo &Info);
+
+ void tagAlloca(AllocaInst *AI, Instruction *InsertBefore, Value *Ptr,
+ uint64_t Size);
+ void untagAlloca(AllocaInst *AI, Instruction *InsertBefore, uint64_t Size);
+
+ Instruction *
+ insertBaseTaggedPointer(const MapVector<AllocaInst *, AllocaInfo> &Allocas,
+ const DominatorTree *DT);
+ bool runOnFunction(Function &F) override;
+
+ StringRef getPassName() const override { return "AArch64 Stack Tagging"; }
+
+private:
+ Function *F;
+ Function *SetTagFunc;
+ const DataLayout *DL;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ }
+};
+
+} // end anonymous namespace
+
+char AArch64StackTagging::ID = 0;
+
+INITIALIZE_PASS_BEGIN(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",
+ false, false)
+INITIALIZE_PASS_END(AArch64StackTagging, DEBUG_TYPE, "AArch64 Stack Tagging",
+ false, false)
+
+FunctionPass *llvm::createAArch64StackTaggingPass() {
+ return new AArch64StackTagging();
+}
+
+bool AArch64StackTagging::isInterestingAlloca(const AllocaInst &AI) {
+ // FIXME: support dynamic allocas
+ bool IsInteresting =
+ AI.getAllocatedType()->isSized() && AI.isStaticAlloca() &&
+ // alloca() may be called with 0 size, ignore it.
+ AI.getAllocationSizeInBits(*DL).getValue() > 0 &&
+ // inalloca allocas are not treated as static, and we don't want
+ // dynamic alloca instrumentation for them as well.
+ !AI.isUsedWithInAlloca() &&
+ // swifterror allocas are register promoted by ISel
+ !AI.isSwiftError();
+ return IsInteresting;
+}
+
+void AArch64StackTagging::tagAlloca(AllocaInst *AI, Instruction *InsertBefore,
+ Value *Ptr, uint64_t Size) {
+ IRBuilder<> IRB(InsertBefore);
+ IRB.CreateCall(SetTagFunc, {Ptr, ConstantInt::get(IRB.getInt64Ty(), Size)});
+}
+
+void AArch64StackTagging::untagAlloca(AllocaInst *AI, Instruction *InsertBefore,
+ uint64_t Size) {
+ IRBuilder<> IRB(InsertBefore);
+ IRB.CreateCall(SetTagFunc, {IRB.CreatePointerCast(AI, IRB.getInt8PtrTy()),
+ ConstantInt::get(IRB.getInt64Ty(), Size)});
+}
+
+Instruction *AArch64StackTagging::insertBaseTaggedPointer(
+ const MapVector<AllocaInst *, AllocaInfo> &Allocas,
+ const DominatorTree *DT) {
+ BasicBlock *PrologueBB = nullptr;
+ // Try sinking IRG as deep as possible to avoid hurting shrink wrap.
+ for (auto &I : Allocas) {
+ const AllocaInfo &Info = I.second;
+ AllocaInst *AI = Info.AI;
+ if (Info.Tag < 0)
+ continue;
+ if (!PrologueBB) {
+ PrologueBB = AI->getParent();
+ continue;
+ }
+ PrologueBB = DT->findNearestCommonDominator(PrologueBB, AI->getParent());
+ }
+ assert(PrologueBB);
+
+ IRBuilder<> IRB(&PrologueBB->front());
+ Function *IRG_SP =
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_irg_sp);
+ Instruction *Base =
+ IRB.CreateCall(IRG_SP, {Constant::getNullValue(IRB.getInt64Ty())});
+ Base->setName("basetag");
+ return Base;
+}
+
+void AArch64StackTagging::alignAndPadAlloca(AllocaInfo &Info) {
+ unsigned NewAlignment = std::max(Info.AI->getAlignment(), kTagGranuleSize);
+ Info.AI->setAlignment(NewAlignment);
+
+ uint64_t Size = Info.AI->getAllocationSizeInBits(*DL).getValue() / 8;
+ uint64_t AlignedSize = alignTo(Size, kTagGranuleSize);
+ if (Size == AlignedSize)
+ return;
+
+ // Add padding to the alloca.
+ Type *AllocatedType =
+ Info.AI->isArrayAllocation()
+ ? ArrayType::get(
+ Info.AI->getAllocatedType(),
+ dyn_cast<ConstantInt>(Info.AI->getArraySize())->getZExtValue())
+ : Info.AI->getAllocatedType();
+ Type *PaddingType =
+ ArrayType::get(Type::getInt8Ty(F->getContext()), AlignedSize - Size);
+ Type *TypeWithPadding = StructType::get(AllocatedType, PaddingType);
+ auto *NewAI = new AllocaInst(
+ TypeWithPadding, Info.AI->getType()->getAddressSpace(), nullptr, "", Info.AI);
+ NewAI->takeName(Info.AI);
+ NewAI->setAlignment(Info.AI->getAlignment());
+ NewAI->setUsedWithInAlloca(Info.AI->isUsedWithInAlloca());
+ NewAI->setSwiftError(Info.AI->isSwiftError());
+ NewAI->copyMetadata(*Info.AI);
+
+ auto *NewPtr = new BitCastInst(NewAI, Info.AI->getType(), "", Info.AI);
+ Info.AI->replaceAllUsesWith(NewPtr);
+ Info.AI->eraseFromParent();
+ Info.AI = NewAI;
+}
+
+// FIXME: check for MTE extension
+bool AArch64StackTagging::runOnFunction(Function &Fn) {
+ if (!Fn.hasFnAttribute(Attribute::SanitizeMemTag))
+ return false;
+
+ F = &Fn;
+ DL = &Fn.getParent()->getDataLayout();
+
+ MapVector<AllocaInst *, AllocaInfo> Allocas; // need stable iteration order
+ SmallVector<Instruction *, 8> RetVec;
+ DenseMap<Value *, AllocaInst *> AllocaForValue;
+ SmallVector<Instruction *, 4> UnrecognizedLifetimes;
+
+ for (auto &BB : *F) {
+ for (BasicBlock::iterator IT = BB.begin(); IT != BB.end(); ++IT) {
+ Instruction *I = &*IT;
+ if (auto *AI = dyn_cast<AllocaInst>(I)) {
+ Allocas[AI].AI = AI;
+ continue;
+ }
+
+ if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(I)) {
+ if (auto *AI =
+ dyn_cast_or_null<AllocaInst>(DVI->getVariableLocation())) {
+ Allocas[AI].DbgVariableIntrinsics.push_back(DVI);
+ }
+ continue;
+ }
+
+ auto *II = dyn_cast<IntrinsicInst>(I);
+ if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
+ II->getIntrinsicID() == Intrinsic::lifetime_end)) {
+ AllocaInst *AI =
+ llvm::findAllocaForValue(II->getArgOperand(1), AllocaForValue);
+ if (!AI) {
+ UnrecognizedLifetimes.push_back(I);
+ continue;
+ }
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start)
+ Allocas[AI].LifetimeStart.push_back(II);
+ else
+ Allocas[AI].LifetimeEnd.push_back(II);
+ }
+
+ if (isa<ReturnInst>(I) || isa<ResumeInst>(I) || isa<CleanupReturnInst>(I))
+ RetVec.push_back(I);
+ }
+ }
+
+ if (Allocas.empty())
+ return false;
+
+ int NextTag = 0;
+ int NumInterestingAllocas = 0;
+ for (auto &I : Allocas) {
+ AllocaInfo &Info = I.second;
+ assert(Info.AI);
+
+ if (!isInterestingAlloca(*Info.AI)) {
+ Info.Tag = -1;
+ continue;
+ }
+
+ alignAndPadAlloca(Info);
+ NumInterestingAllocas++;
+ Info.Tag = NextTag;
+ NextTag = (NextTag + 1) % 16;
+ }
+
+ if (NumInterestingAllocas == 0)
+ return true;
+
+ SetTagFunc =
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag);
+
+ // Compute DT only if the function has the attribute, there are more than 1
+ // interesting allocas, and it is not available for free.
+ Instruction *Base;
+ if (NumInterestingAllocas > 1) {
+ auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ if (DTWP) {
+ Base = insertBaseTaggedPointer(Allocas, &DTWP->getDomTree());
+ } else {
+ DominatorTree DT(*F);
+ Base = insertBaseTaggedPointer(Allocas, &DT);
+ }
+ } else {
+ Base = insertBaseTaggedPointer(Allocas, nullptr);
+ }
+
+ for (auto &I : Allocas) {
+ const AllocaInfo &Info = I.second;
+ AllocaInst *AI = Info.AI;
+ if (Info.Tag < 0)
+ continue;
+
+ // Replace alloca with tagp(alloca).
+ IRBuilder<> IRB(Info.AI->getNextNode());
+ Function *TagP = Intrinsic::getDeclaration(
+ F->getParent(), Intrinsic::aarch64_tagp, {Info.AI->getType()});
+ Instruction *TagPCall =
+ IRB.CreateCall(TagP, {Constant::getNullValue(Info.AI->getType()), Base,
+ ConstantInt::get(IRB.getInt64Ty(), Info.Tag)});
+ if (Info.AI->hasName())
+ TagPCall->setName(Info.AI->getName() + ".tag");
+ Info.AI->replaceAllUsesWith(TagPCall);
+ TagPCall->setOperand(0, Info.AI);
+
+ if (UnrecognizedLifetimes.empty() && Info.LifetimeStart.size() == 1 &&
+ Info.LifetimeEnd.size() == 1) {
+ IntrinsicInst *Start = Info.LifetimeStart[0];
+ uint64_t Size =
+ dyn_cast<ConstantInt>(Start->getArgOperand(0))->getZExtValue();
+ Size = alignTo(Size, kTagGranuleSize);
+ tagAlloca(AI, Start->getNextNode(), Start->getArgOperand(1), Size);
+ untagAlloca(AI, Info.LifetimeEnd[0], Size);
+ } else {
+ uint64_t Size = Info.AI->getAllocationSizeInBits(*DL).getValue() / 8;
+ tagAlloca(AI, TagPCall->getNextNode(),
+ IRB.CreatePointerCast(TagPCall, IRB.getInt8PtrTy()), Size);
+ for (auto &RI : RetVec) {
+ untagAlloca(AI, RI, Size);
+ }
+ // We may have inserted tag/untag outside of any lifetime interval.
+ // Remove all lifetime intrinsics for this alloca.
+ for (auto &II : Info.LifetimeStart)
+ II->eraseFromParent();
+ for (auto &II : Info.LifetimeEnd)
+ II->eraseFromParent();
+ }
+
+ // Fixup debug intrinsics to point to the new alloca.
+ for (auto DVI : Info.DbgVariableIntrinsics)
+ DVI->setArgOperand(
+ 0,
+ MetadataAsValue::get(F->getContext(), LocalAsMetadata::get(Info.AI)));
+ }
+
+ // If we have instrumented at least one alloca, all unrecognized lifetime
+ // instrinsics have to go.
+ for (auto &I : UnrecognizedLifetimes)
+ I->eraseFromParent();
+
+ return true;
+}
Modified: llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp?rev=366361&r1=366360&r2=366361&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp Wed Jul 17 12:24:12 2019
@@ -179,6 +179,7 @@ extern "C" void LLVMInitializeAArch64Tar
initializeFalkorMarkStridedAccessesLegacyPass(*PR);
initializeLDTLSCleanupPass(*PR);
initializeAArch64SpeculationHardeningPass(*PR);
+ initializeAArch64StackTaggingPass(*PR);
}
//===----------------------------------------------------------------------===//
@@ -446,6 +447,8 @@ void AArch64PassConfig::addIRPasses() {
// invariant.
addPass(createLICMPass());
}
+
+ addPass(createAArch64StackTaggingPass());
}
// Pass Pipeline Configuration
Modified: llvm/trunk/lib/Target/AArch64/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/CMakeLists.txt?rev=366361&r1=366360&r2=366361&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/AArch64/CMakeLists.txt Wed Jul 17 12:24:12 2019
@@ -55,6 +55,7 @@ add_llvm_target(AArch64CodeGen
AArch64RegisterInfo.cpp
AArch64SelectionDAGInfo.cpp
AArch64SpeculationHardening.cpp
+ AArch64StackTagging.cpp
AArch64StorePairSuppress.cpp
AArch64Subtarget.cpp
AArch64TargetMachine.cpp
Modified: llvm/trunk/test/CodeGen/AArch64/O0-pipeline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/O0-pipeline.ll?rev=366361&r1=366360&r2=366361&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/O0-pipeline.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/O0-pipeline.ll Wed Jul 17 12:24:12 2019
@@ -25,6 +25,7 @@
; CHECK-NEXT: Instrument function entry/exit with calls to e.g. mcount() (post inlining)
; CHECK-NEXT: Scalarize Masked Memory Intrinsics
; CHECK-NEXT: Expand reduction intrinsics
+; CHECK-NEXT: AArch64 Stack Tagging
; CHECK-NEXT: Rewrite Symbols
; CHECK-NEXT: FunctionPass Manager
; CHECK-NEXT: Dominator Tree Construction
Modified: llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll?rev=366361&r1=366360&r2=366361&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll Wed Jul 17 12:24:12 2019
@@ -55,6 +55,7 @@
; CHECK-NEXT: Interleaved Load Combine Pass
; CHECK-NEXT: Dominator Tree Construction
; CHECK-NEXT: Interleaved Access Pass
+; CHECK-NEXT: AArch64 Stack Tagging
; CHECK-NEXT: Natural Loop Information
; CHECK-NEXT: CodeGen Prepare
; CHECK-NEXT: Rewrite Symbols
Added: llvm/trunk/test/CodeGen/AArch64/stack-tagging-dbg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/stack-tagging-dbg.ll?rev=366361&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/stack-tagging-dbg.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/stack-tagging-dbg.ll Wed Jul 17 12:24:12 2019
@@ -0,0 +1,37 @@
+; RUN: opt < %s -stack-tagging -S -o - | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-android"
+
+declare void @use32(i32*)
+declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone speculatable
+
+; Debug intrinsics use the new alloca directly, not through a GEP or a tagp.
+define void @DbgIntrinsics() sanitize_memtag {
+entry:
+ %x = alloca i32, align 4
+ call void @llvm.dbg.declare(metadata i32* %x, metadata !6, metadata !DIExpression()), !dbg !10
+ store i32 42, i32* %x, align 4
+ call void @use32(i32* %x)
+ ret void
+}
+
+; CHECK-LABEL: define void @DbgIntrinsics(
+; CHECK: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16
+; CHECK: call void @llvm.dbg.declare(metadata { i32, [12 x i8] }* [[X]],
+
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!8, !9}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, producer: "clang version 9.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, nameTableKind: None)
+!1 = !DIFile(filename: "stack-tagging.cc", directory: "/tmp")
+!2 = !{}
+!3 = distinct !DISubprogram(name: "DbgIntrinsics", linkageName: "DbgIntrinsics", scope: !1, file: !1, line: 3, type: !4, scopeLine: 3, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !2)
+!4 = !DISubroutineType(types: !5)
+!5 = !{null}
+!6 = !DILocalVariable(name: "x", scope: !3, file: !1, line: 4, type: !7)
+!7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!8 = !{i32 2, !"Dwarf Version", i32 4}
+!9 = !{i32 2, !"Debug Info Version", i32 3}
+!10 = !DILocation(line: 1, column: 2, scope: !3)
Added: llvm/trunk/test/CodeGen/AArch64/stack-tagging.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/stack-tagging.ll?rev=366361&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/stack-tagging.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/stack-tagging.ll Wed Jul 17 12:24:12 2019
@@ -0,0 +1,187 @@
+; RUN: opt < %s -stack-tagging -S -o - | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-android"
+
+declare void @use8(i8*)
+declare void @use32(i32*)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+
+define void @OneVar() sanitize_memtag {
+entry:
+ %x = alloca i32, align 4
+ call void @use32(i32* %x)
+ ret void
+}
+
+; CHECK-LABEL: define void @OneVar(
+; CHECK: [[BASE:%.*]] = call i8* @llvm.aarch64.irg.sp(i64 0)
+; CHECK: [[X:%.*]] = alloca { i32, [12 x i8] }, align 16
+; CHECK: [[TX:%.*]] = call { i32, [12 x i8] }* @llvm.aarch64.tagp.{{.*}}({ i32, [12 x i8] }* [[X]], i8* [[BASE]], i64 0)
+; CHECK: [[TX8:%.*]] = bitcast { i32, [12 x i8] }* [[TX]] to i8*
+; CHECK: call void @llvm.aarch64.settag(i8* [[TX8]], i64 16)
+; CHECK: [[GEP32:%.*]] = bitcast { i32, [12 x i8] }* [[TX]] to i32*
+; CHECK: call void @use32(i32* [[GEP32]])
+; CHECK: [[GEP8:%.*]] = bitcast { i32, [12 x i8] }* [[X]] to i8*
+; CHECK: call void @llvm.aarch64.settag(i8* [[GEP8]], i64 16)
+; CHECK: ret void
+
+
+define void @ManyVars() sanitize_memtag {
+entry:
+ %x1 = alloca i32, align 4
+ %x2 = alloca i8, align 4
+ %x3 = alloca i32, i32 11, align 4
+ call void @use32(i32* %x1)
+ call void @use8(i8* %x2)
+ call void @use32(i32* %x3)
+ ret void
+}
+
+; CHECK-LABEL: define void @ManyVars(
+; CHECK: alloca { i32, [12 x i8] }, align 16
+; CHECK: call { i32, [12 x i8] }* @llvm.aarch64.tagp.{{.*}}({ i32, [12 x i8] }* {{.*}}, i64 0)
+; CHECK: call void @llvm.aarch64.settag(i8* {{.*}}, i64 16)
+; CHECK: alloca { i8, [15 x i8] }, align 16
+; CHECK: call { i8, [15 x i8] }* @llvm.aarch64.tagp.{{.*}}({ i8, [15 x i8] }* {{.*}}, i64 1)
+; CHECK: call void @llvm.aarch64.settag(i8* {{.*}}, i64 16)
+; CHECK: alloca { [11 x i32], [4 x i8] }, align 16
+; CHECK: call { [11 x i32], [4 x i8] }* @llvm.aarch64.tagp.{{.*}}({ [11 x i32], [4 x i8] }* {{.*}}, i64 2)
+; CHECK: call void @llvm.aarch64.settag(i8* {{.*}}, i64 48)
+
+; CHECK: call void @use32(
+; CHECK: call void @use8(
+; CHECK: call void @use32(
+
+; CHECK: call void @llvm.aarch64.settag(i8* {{.*}}, i64 16)
+; CHECK: call void @llvm.aarch64.settag(i8* {{.*}}, i64 16)
+; CHECK: call void @llvm.aarch64.settag(i8* {{.*}}, i64 48)
+; CHECK-NEXT: ret void
+
+
+define void @Scope(i32 %b) sanitize_memtag {
+entry:
+ %x = alloca i32, align 4
+ %tobool = icmp eq i32 %b, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ %0 = bitcast i32* %x to i8*
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
+ call void @use8(i8* %0) #3
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: define void @Scope(
+; CHECK: br i1
+; CHECK: call void @llvm.lifetime.start.p0i8(
+; CHECK: call void @llvm.aarch64.settag(
+; CHECK: call void @use8(
+; CHECK: call void @llvm.aarch64.settag(
+; CHECK: call void @llvm.lifetime.end.p0i8(
+; CHECK: br label
+; CHECK: ret void
+
+
+; Spooked by the multiple lifetime ranges, StackTagging remove all of them and sets tags on entry and exit.
+define void @BadScope(i32 %b) sanitize_memtag {
+entry:
+ %x = alloca i32, align 4
+ %tobool = icmp eq i32 %b, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ %0 = bitcast i32* %x to i8*
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
+ call void @use8(i8* %0) #3
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
+ call void @use8(i8* %0) #3
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: define void @BadScope(
+; CHECK: call void @llvm.aarch64.settag(i8* {{.*}}, i64 16)
+; CHECK: br i1
+; CHECK: call void @use8(i8*
+; CHECK-NEXT: call void @use8(i8*
+; CHECK: br label
+; CHECK: call void @llvm.aarch64.settag(i8* {{.*}}, i64 16)
+; CHECK-NEXT: ret void
+
+define void @DynamicAllocas(i32 %cnt) sanitize_memtag {
+entry:
+ %x = alloca i32, i32 %cnt, align 4
+ br label %l
+l:
+ %y = alloca i32, align 4
+ call void @use32(i32* %x)
+ call void @use32(i32* %y)
+ ret void
+}
+
+; CHECK-LABEL: define void @DynamicAllocas(
+; CHECK-NOT: @llvm.aarch64.irg.sp
+; CHECK: %x = alloca i32, i32 %cnt, align 4
+; CHECK-NOT: @llvm.aarch64.irg.sp
+; CHECK: alloca i32, align 4
+; CHECK-NOT: @llvm.aarch64.irg.sp
+; CHECK: ret void
+
+; If we can't trace one of the lifetime markers to a single alloca, fall back
+; to poisoning all allocas at the beginning of the function.
+; Each alloca must be poisoned only once.
+define void @UnrecognizedLifetime(i8 %v) sanitize_memtag {
+entry:
+ %x = alloca i32, align 4
+ %y = alloca i32, align 4
+ %z = alloca i32, align 4
+ %cx = bitcast i32* %x to i8*
+ %cy = bitcast i32* %y to i8*
+ %cz = bitcast i32* %z to i8*
+ %tobool = icmp eq i8 %v, 0
+ %xy = select i1 %tobool, i32* %x, i32* %y
+ %cxcy = select i1 %tobool, i8* %cx, i8* %cy
+ br label %another_bb
+
+another_bb:
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %cz)
+ store i32 7, i32* %z
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %cz)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %cz)
+ store i32 7, i32* %z
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %cz)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %cxcy)
+ store i32 8, i32* %xy
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %cxcy)
+ ret void
+}
+
+; CHECK-LABEL: define void @UnrecognizedLifetime(
+; CHECK: call i8* @llvm.aarch64.irg.sp(i64 0)
+; CHECK: alloca { i32, [12 x i8] }, align 16
+; CHECK: call { i32, [12 x i8] }* @llvm.aarch64.tagp
+; CHECK: call void @llvm.aarch64.settag(
+; CHECK: alloca { i32, [12 x i8] }, align 16
+; CHECK: call { i32, [12 x i8] }* @llvm.aarch64.tagp
+; CHECK: call void @llvm.aarch64.settag(
+; CHECK: alloca { i32, [12 x i8] }, align 16
+; CHECK: call { i32, [12 x i8] }* @llvm.aarch64.tagp
+; CHECK: call void @llvm.aarch64.settag(
+; CHECK: store i32
+; CHECK: store i32
+; CHECK: store i32
+; CHECK: call void @llvm.aarch64.settag(
+; CHECK: call void @llvm.aarch64.settag(
+; CHECK: call void @llvm.aarch64.settag(
+; CHECK: ret void
More information about the llvm-commits
mailing list