[llvm] d6b3be3 - [NFC][Asan] Prepare AddressSanitizer to detect inserted runtime calls (#84223)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 7 07:54:45 PST 2024
Author: sylvain-audi
Date: 2024-03-07T10:54:41-05:00
New Revision: d6b3be375ffed14fefc93c2031cd56e680afd0c1
URL: https://github.com/llvm/llvm-project/commit/d6b3be375ffed14fefc93c2031cd56e680afd0c1
DIFF: https://github.com/llvm/llvm-project/commit/d6b3be375ffed14fefc93c2031cd56e680afd0c1.diff
LOG: [NFC][Asan] Prepare AddressSanitizer to detect inserted runtime calls (#84223)
This is in preparation for an upcoming commit that will add "funclet"
OpBundle to the inserted runtime calls where the function's EH
personality requires it.
See PR https://github.com/llvm/llvm-project/pull/82533
Added:
Modified:
llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index f22f53b8cd8fc6..c95a50a033b1b2 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -642,6 +642,23 @@ static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple) {
}
namespace {
+/// Helper RAII class to post-process inserted asan runtime calls during a
+/// pass on a single Function. This is a no-op implementation, for a first NFC
+/// commit. Coming up: detect and add "funclet" opBundle to function calls that
+/// need them.
+class RuntimeCallInserter {
+ Function *OwnerFn = nullptr;
+
+public:
+ RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {}
+
+ CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee,
+ ArrayRef<Value *> Args = {},
+ const Twine &Name = "") {
+ assert(IRB.GetInsertBlock()->getParent() == OwnerFn);
+ return IRB.CreateCall(Callee, Args, Name, nullptr);
+ }
+};
/// AddressSanitizer: instrument the code in module to find memory bugs.
struct AddressSanitizer {
@@ -691,12 +708,14 @@ struct AddressSanitizer {
void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
InterestingMemoryOperand &O, bool UseCalls,
- const DataLayout &DL);
- void instrumentPointerComparisonOrSubtraction(Instruction *I);
+ const DataLayout &DL, RuntimeCallInserter &RTCI);
+ void instrumentPointerComparisonOrSubtraction(Instruction *I,
+ RuntimeCallInserter &RTCI);
void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
Value *Addr, MaybeAlign Alignment,
uint32_t TypeStoreSize, bool IsWrite,
- Value *SizeArgument, bool UseCalls, uint32_t Exp);
+ Value *SizeArgument, bool UseCalls, uint32_t Exp,
+ RuntimeCallInserter &RTCI);
Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
Instruction *InsertBefore, Value *Addr,
uint32_t TypeStoreSize, bool IsWrite,
@@ -707,20 +726,22 @@ struct AddressSanitizer {
Instruction *InsertBefore, Value *Addr,
TypeSize TypeStoreSize, bool IsWrite,
Value *SizeArgument, bool UseCalls,
- uint32_t Exp);
+ uint32_t Exp,
+ RuntimeCallInserter &RTCI);
void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
Type *IntptrTy, Value *Mask, Value *EVL,
Value *Stride, Instruction *I, Value *Addr,
MaybeAlign Alignment, unsigned Granularity,
Type *OpType, bool IsWrite,
Value *SizeArgument, bool UseCalls,
- uint32_t Exp);
+ uint32_t Exp, RuntimeCallInserter &RTCI);
Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
Value *ShadowValue, uint32_t TypeStoreSize);
Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
bool IsWrite, size_t AccessSizeIndex,
- Value *SizeArgument, uint32_t Exp);
- void instrumentMemIntrinsic(MemIntrinsic *MI);
+ Value *SizeArgument, uint32_t Exp,
+ RuntimeCallInserter &RTCI);
+ void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
bool suppressInstrumentationSiteForDebug(int &Instrumented);
bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
@@ -912,6 +933,7 @@ class ModuleAddressSanitizer {
struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
Function &F;
AddressSanitizer &ASan;
+ RuntimeCallInserter &RTCI;
DIBuilder DIB;
LLVMContext *C;
Type *IntptrTy;
@@ -948,10 +970,12 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
bool HasReturnsTwiceCall = false;
bool PoisonStack;
- FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
- : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false),
- C(ASan.C), IntptrTy(ASan.IntptrTy),
- IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping),
+ FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
+ RuntimeCallInserter &RTCI)
+ : F(F), ASan(ASan), RTCI(RTCI),
+ DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C),
+ IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)),
+ Mapping(ASan.Mapping),
PoisonStack(ClStack &&
!Triple(F.getParent()->getTargetTriple()).isAMDGPU()) {}
@@ -1034,8 +1058,8 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
DynamicAreaOffset);
}
- IRB.CreateCall(
- AsanAllocasUnpoisonFunc,
+ RTCI.createRuntimeCall(
+ IRB, AsanAllocasUnpoisonFunc,
{IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
}
@@ -1251,16 +1275,18 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
}
// Instrument memset/memmove/memcpy
-void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
+void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
+ RuntimeCallInserter &RTCI) {
InstrumentationIRBuilder IRB(MI);
if (isa<MemTransferInst>(MI)) {
- IRB.CreateCall(isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
- {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
- IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
- IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
+ RTCI.createRuntimeCall(
+ IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
+ {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
+ IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
+ IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
} else if (isa<MemSetInst>(MI)) {
- IRB.CreateCall(
- AsanMemset,
+ RTCI.createRuntimeCall(
+ IRB, AsanMemset,
{IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
@@ -1498,7 +1524,7 @@ bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
}
void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
- Instruction *I) {
+ Instruction *I, RuntimeCallInserter &RTCI) {
IRBuilder<> IRB(I);
FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
@@ -1506,7 +1532,7 @@ void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
if (i->getType()->isPointerTy())
i = IRB.CreatePointerCast(i, IntptrTy);
}
- IRB.CreateCall(F, Param);
+ RTCI.createRuntimeCall(IRB, F, Param);
}
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
@@ -1514,7 +1540,7 @@ static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
MaybeAlign Alignment, unsigned Granularity,
TypeSize TypeStoreSize, bool IsWrite,
Value *SizeArgument, bool UseCalls,
- uint32_t Exp) {
+ uint32_t Exp, RuntimeCallInserter &RTCI) {
// Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
// if the data is properly aligned.
if (!TypeStoreSize.isScalable()) {
@@ -1529,18 +1555,19 @@ static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
*Alignment >= FixedSize / 8)
return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
FixedSize, IsWrite, nullptr, UseCalls,
- Exp);
+ Exp, RTCI);
}
}
Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
- IsWrite, nullptr, UseCalls, Exp);
+ IsWrite, nullptr, UseCalls, Exp, RTCI);
}
void AddressSanitizer::instrumentMaskedLoadOrStore(
AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
Value *EVL, Value *Stride, Instruction *I, Value *Addr,
MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
- Value *SizeArgument, bool UseCalls, uint32_t Exp) {
+ Value *SizeArgument, bool UseCalls, uint32_t Exp,
+ RuntimeCallInserter &RTCI) {
auto *VTy = cast<VectorType>(OpType);
TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
auto Zero = ConstantInt::get(IntptrTy, 0);
@@ -1595,15 +1622,16 @@ void AddressSanitizer::instrumentMaskedLoadOrStore(
} else {
InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
}
- doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(),
- InstrumentedAddress, Alignment, Granularity,
- ElemTypeSize, IsWrite, SizeArgument, UseCalls, Exp);
+ doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress,
+ Alignment, Granularity, ElemTypeSize, IsWrite,
+ SizeArgument, UseCalls, Exp, RTCI);
});
}
void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
InterestingMemoryOperand &O, bool UseCalls,
- const DataLayout &DL) {
+ const DataLayout &DL,
+ RuntimeCallInserter &RTCI) {
Value *Addr = O.getPtr();
// Optimization experiments.
@@ -1649,11 +1677,11 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
O.MaybeStride, O.getInsn(), Addr, O.Alignment,
Granularity, O.OpType, O.IsWrite, nullptr,
- UseCalls, Exp);
+ UseCalls, Exp, RTCI);
} else {
doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
- Granularity, O.TypeStoreSize, O.IsWrite, nullptr, UseCalls,
- Exp);
+ Granularity, O.TypeStoreSize, O.IsWrite, nullptr,
+ UseCalls, Exp, RTCI);
}
}
@@ -1661,24 +1689,25 @@ Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
Value *Addr, bool IsWrite,
size_t AccessSizeIndex,
Value *SizeArgument,
- uint32_t Exp) {
+ uint32_t Exp,
+ RuntimeCallInserter &RTCI) {
InstrumentationIRBuilder IRB(InsertBefore);
Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
CallInst *Call = nullptr;
if (SizeArgument) {
if (Exp == 0)
- Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][0],
- {Addr, SizeArgument});
+ Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
+ {Addr, SizeArgument});
else
- Call = IRB.CreateCall(AsanErrorCallbackSized[IsWrite][1],
- {Addr, SizeArgument, ExpVal});
+ Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
+ {Addr, SizeArgument, ExpVal});
} else {
if (Exp == 0)
- Call =
- IRB.CreateCall(AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
+ Call = RTCI.createRuntimeCall(
+ IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
else
- Call = IRB.CreateCall(AsanErrorCallback[IsWrite][1][AccessSizeIndex],
- {Addr, ExpVal});
+ Call = RTCI.createRuntimeCall(
+ IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
}
Call->setCannotMerge();
@@ -1754,7 +1783,8 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
MaybeAlign Alignment,
uint32_t TypeStoreSize, bool IsWrite,
Value *SizeArgument, bool UseCalls,
- uint32_t Exp) {
+ uint32_t Exp,
+ RuntimeCallInserter &RTCI) {
if (TargetTriple.isAMDGPU()) {
InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
TypeStoreSize, IsWrite, SizeArgument);
@@ -1779,11 +1809,12 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
if (UseCalls) {
if (Exp == 0)
- IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],
- AddrLong);
+ RTCI.createRuntimeCall(
+ IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
else
- IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
- {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
+ RTCI.createRuntimeCall(
+ IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
+ {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
return;
}
@@ -1830,8 +1861,8 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
}
- Instruction *Crash = generateCrashCode(CrashTerm, AddrLong, IsWrite,
- AccessSizeIndex, SizeArgument, Exp);
+ Instruction *Crash = generateCrashCode(
+ CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
if (OrigIns->getDebugLoc())
Crash->setDebugLoc(OrigIns->getDebugLoc());
}
@@ -1841,8 +1872,9 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
// to report the actual access size.
void AddressSanitizer::instrumentUnusualSizeOrAlignment(
- Instruction *I, Instruction *InsertBefore, Value *Addr, TypeSize TypeStoreSize,
- bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) {
+ Instruction *I, Instruction *InsertBefore, Value *Addr,
+ TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls,
+ uint32_t Exp, RuntimeCallInserter &RTCI) {
InstrumentationIRBuilder IRB(InsertBefore);
Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
@@ -1850,19 +1882,21 @@ void AddressSanitizer::instrumentUnusualSizeOrAlignment(
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
if (UseCalls) {
if (Exp == 0)
- IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][0],
- {AddrLong, Size});
+ RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
+ {AddrLong, Size});
else
- IRB.CreateCall(AsanMemoryAccessCallbackSized[IsWrite][1],
- {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
+ RTCI.createRuntimeCall(
+ IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
+ {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
} else {
Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
Value *LastByte = IRB.CreateIntToPtr(
IRB.CreateAdd(AddrLong, SizeMinusOne),
Addr->getType());
- instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp);
+ instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp,
+ RTCI);
instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
- Exp);
+ Exp, RTCI);
}
}
@@ -2881,6 +2915,8 @@ bool AddressSanitizer::instrumentFunction(Function &F,
FunctionStateRAII CleanupObj(this);
+ RuntimeCallInserter RTCI(F);
+
FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
// We can't instrument allocas used with llvm.localescape. Only static allocas
@@ -2963,27 +2999,27 @@ bool AddressSanitizer::instrumentFunction(Function &F,
for (auto &Operand : OperandsToInstrument) {
if (!suppressInstrumentationSiteForDebug(NumInstrumented))
instrumentMop(ObjSizeVis, Operand, UseCalls,
- F.getParent()->getDataLayout());
+ F.getParent()->getDataLayout(), RTCI);
FunctionModified = true;
}
for (auto *Inst : IntrinToInstrument) {
if (!suppressInstrumentationSiteForDebug(NumInstrumented))
- instrumentMemIntrinsic(Inst);
+ instrumentMemIntrinsic(Inst, RTCI);
FunctionModified = true;
}
- FunctionStackPoisoner FSP(F, *this);
+ FunctionStackPoisoner FSP(F, *this, RTCI);
bool ChangedStack = FSP.runOnFunction();
// We must unpoison the stack before NoReturn calls (throw, _exit, etc).
// See e.g. https://github.com/google/sanitizers/issues/37
for (auto *CI : NoReturnCalls) {
IRBuilder<> IRB(CI);
- IRB.CreateCall(AsanHandleNoReturnFunc, {});
+ RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
}
for (auto *Inst : PointerComparisonsOrSubtracts) {
- instrumentPointerComparisonOrSubtraction(Inst);
+ instrumentPointerComparisonOrSubtraction(Inst, RTCI);
FunctionModified = true;
}
@@ -3128,9 +3164,10 @@ void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
if (j - i >= ASan.MaxInlinePoisoningSize) {
copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
- IRB.CreateCall(AsanSetShadowFunc[Val],
- {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
- ConstantInt::get(IntptrTy, j - i)});
+ RTCI.createRuntimeCall(
+ IRB, AsanSetShadowFunc[Val],
+ {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
+ ConstantInt::get(IntptrTy, j - i)});
Done = j;
}
}
@@ -3417,8 +3454,8 @@ void FunctionStackPoisoner::processStaticAllocas() {
StackMallocIdx = StackMallocSizeClass(LocalStackSize);
assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
Value *FakeStackValue =
- IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx],
- ConstantInt::get(IntptrTy, LocalStackSize));
+ RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
+ ConstantInt::get(IntptrTy, LocalStackSize));
IRB.SetInsertPoint(InsBefore);
FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
ConstantInt::get(IntptrTy, 0));
@@ -3428,7 +3465,8 @@ void FunctionStackPoisoner::processStaticAllocas() {
// void *LocalStackBase = (FakeStack) ? FakeStack :
// alloca(LocalStackSize);
StackMallocIdx = StackMallocSizeClass(LocalStackSize);
- FakeStack = IRB.CreateCall(AsanStackMallocFunc[StackMallocIdx],
+ FakeStack =
+ RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
ConstantInt::get(IntptrTy, LocalStackSize));
}
Value *NoFakeStack =
@@ -3563,8 +3601,8 @@ void FunctionStackPoisoner::processStaticAllocas() {
IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
} else {
// For larger frames call __asan_stack_free_*.
- IRBPoison.CreateCall(
- AsanStackFreeFunc[StackMallocIdx],
+ RTCI.createRuntimeCall(
+ IRBPoison, AsanStackFreeFunc[StackMallocIdx],
{FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
}
@@ -3585,8 +3623,8 @@ void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
// For now just insert the call to ASan runtime.
Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
Value *SizeArg = ConstantInt::get(IntptrTy, Size);
- IRB.CreateCall(
- DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
+ RTCI.createRuntimeCall(
+ IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
{AddrArg, SizeArg});
}
@@ -3647,7 +3685,7 @@ void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
ConstantInt::get(IntptrTy, Alignment.value()));
// Insert __asan_alloca_poison call for new created alloca.
- IRB.CreateCall(AsanAllocaPoisonFunc, {NewAddress, OldSize});
+ RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
// Store the last alloca's address to DynamicAllocaLayout. We'll need this
// for unpoisoning stuff.
More information about the llvm-commits
mailing list