[llvm] 7e7754d - Revert an accidental commit of four AddressSanitizer refactor CLs
Alexander Potapenko via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 30 07:15:55 PDT 2020
Author: Alexander Potapenko
Date: 2020-04-30T16:15:43+02:00
New Revision: 7e7754df3249917b6057396c9a24fdca4a5f9c0d
URL: https://github.com/llvm/llvm-project/commit/7e7754df3249917b6057396c9a24fdca4a5f9c0d
DIFF: https://github.com/llvm/llvm-project/commit/7e7754df3249917b6057396c9a24fdca4a5f9c0d.diff
LOG: Revert an accidental commit of four AddressSanitizer refactor CLs
I couldn't make arc land the changes properly, for some reason they all got
squashed. Reverting them now to land cleanly.
Summary: This reverts commit cfb5f89b62f885a7a50b1dd49a0cb5532fc75176.
Reviewers: kcc, thejh
Subscribers:
Added:
Modified:
llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
Removed:
llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
llvm/test/Instrumentation/AddressSanitizer/byval-args.ll
################################################################################
diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
deleted file mode 100644
index ef33fa2147d1..000000000000
--- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//===--------- Definition of the AddressSanitizer class ---------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares common infrastructure for AddressSanitizer and
-// HWAddressSanitizer.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H
-#define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H
-
-#include "llvm/IR/Instruction.h"
-#include "llvm/IR/Module.h"
-
-namespace llvm {
-
-class InterestingMemoryOperand {
-public:
- Use *PtrUse;
- bool IsWrite;
- Type *OpType;
- uint64_t TypeSize;
- unsigned Alignment;
- // The mask Value, if we're looking at a masked load/store.
- Value *MaybeMask;
-
- InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite,
- class Type *OpType, unsigned Alignment,
- Value *MaybeMask = nullptr)
- : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
- MaybeMask(MaybeMask) {
- const DataLayout &DL = I->getModule()->getDataLayout();
- TypeSize = DL.getTypeStoreSizeInBits(OpType);
- PtrUse = &I->getOperandUse(OperandNo);
- }
-
- Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); }
-
- Value *getPtr() { return PtrUse->get(); }
-};
-
-} // namespace llvm
-
-#endif
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 93326c8fd13a..4556e3275a10 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -69,7 +69,6 @@
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Instrumentation.h"
-#include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
#include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -213,11 +212,6 @@ static cl::opt<bool> ClInstrumentAtomics(
cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
cl::init(true));
-static cl::opt<bool>
- ClInstrumentByval("asan-instrument-byval",
- cl::desc("instrument byval call arguments"), cl::Hidden,
- cl::init(true));
-
static cl::opt<bool> ClAlwaysSlowPath(
"asan-always-slow-path",
cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
@@ -618,13 +612,16 @@ struct AddressSanitizer {
/// Check if we want (and can) handle this alloca.
bool isInterestingAlloca(const AllocaInst &AI);
- bool ignoreAccess(Value *Ptr);
- void getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
+ /// If it is an interesting memory access, return the PointerOperand
+ /// and set IsWrite/Alignment. Otherwise return nullptr.
+ /// MaybeMask is an output parameter for the mask Value, if we're looking at a
+ /// masked load/store.
+ Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
+ uint64_t *TypeSize, unsigned *Alignment,
+ Value **MaybeMask = nullptr);
- void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
- InterestingMemoryOperand &O, bool UseCalls,
- const DataLayout &DL);
+ void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I,
+ bool UseCalls, const DataLayout &DL);
void instrumentPointerComparisonOrSubtraction(Instruction *I);
void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
Value *Addr, uint32_t TypeSize, bool IsWrite,
@@ -641,7 +638,6 @@ struct AddressSanitizer {
Value *SizeArgument, uint32_t Exp);
void instrumentMemIntrinsic(MemIntrinsic *MI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
- bool suppressInstrumentationSiteForDebug(int &Instrumented);
bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
void maybeInsertDynamicShadowAtFunctionEntry(Function &F);
@@ -1343,92 +1339,98 @@ bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
return IsInteresting;
}
-bool AddressSanitizer::ignoreAccess(Value *Ptr) {
- // Do not instrument acesses from
diff erent address spaces; we cannot deal
- // with them.
- Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
- if (PtrTy->getPointerAddressSpace() != 0)
- return true;
-
- // Ignore swifterror addresses.
- // swifterror memory addresses are mem2reg promoted by instruction
- // selection. As such they cannot have regular uses like an instrumentation
- // function and it makes no sense to track them as memory.
- if (Ptr->isSwiftError())
- return true;
-
- // Treat memory accesses to promotable allocas as non-interesting since they
- // will not cause memory violations. This greatly speeds up the instrumented
- // executable at -O0.
- if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
- if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
- return true;
-
- return false;
-}
-
-void AddressSanitizer::getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
+Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I,
+ bool *IsWrite,
+ uint64_t *TypeSize,
+ unsigned *Alignment,
+ Value **MaybeMask) {
// Skip memory accesses inserted by another instrumentation.
- if (I->hasMetadata("nosanitize"))
- return;
+ if (I->hasMetadata("nosanitize")) return nullptr;
// Do not instrument the load fetching the dynamic shadow address.
if (LocalDynamicShadow == I)
- return;
+ return nullptr;
+ Value *PtrOperand = nullptr;
+ const DataLayout &DL = I->getModule()->getDataLayout();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
- if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand()))
- return;
- Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
- LI->getType(), LI->getAlignment());
+ if (!ClInstrumentReads) return nullptr;
+ *IsWrite = false;
+ *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
+ *Alignment = LI->getAlignment();
+ PtrOperand = LI->getPointerOperand();
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
- if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand()))
- return;
- Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
- SI->getValueOperand()->getType(),
- SI->getAlignment());
+ if (!ClInstrumentWrites) return nullptr;
+ *IsWrite = true;
+ *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
+ *Alignment = SI->getAlignment();
+ PtrOperand = SI->getPointerOperand();
} else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
- if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand()))
- return;
- Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
- RMW->getValOperand()->getType(), 0);
+ if (!ClInstrumentAtomics) return nullptr;
+ *IsWrite = true;
+ *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
+ *Alignment = 0;
+ PtrOperand = RMW->getPointerOperand();
} else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
- if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand()))
- return;
- Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
- XCHG->getCompareOperand()->getType(), 0);
+ if (!ClInstrumentAtomics) return nullptr;
+ *IsWrite = true;
+ *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
+ *Alignment = 0;
+ PtrOperand = XCHG->getPointerOperand();
} else if (auto CI = dyn_cast<CallInst>(I)) {
auto *F = CI->getCalledFunction();
if (F && (F->getName().startswith("llvm.masked.load.") ||
F->getName().startswith("llvm.masked.store."))) {
- bool IsWrite = F->getName().startswith("llvm.masked.store.");
- // Masked store has an initial operand for the value.
- unsigned OpOffset = IsWrite ? 1 : 0;
- if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
- return;
-
- auto BasePtr = CI->getOperand(OpOffset);
- if (ignoreAccess(BasePtr))
- return;
+ unsigned OpOffset = 0;
+ if (F->getName().startswith("llvm.masked.store.")) {
+ if (!ClInstrumentWrites)
+ return nullptr;
+ // Masked store has an initial operand for the value.
+ OpOffset = 1;
+ *IsWrite = true;
+ } else {
+ if (!ClInstrumentReads)
+ return nullptr;
+ *IsWrite = false;
+ }
+
+ auto BasePtr = CI->getOperand(0 + OpOffset);
auto Ty = cast<PointerType>(BasePtr->getType())->getElementType();
- unsigned Alignment = 1;
- // Otherwise no alignment guarantees. We probably got Undef.
+ *TypeSize = DL.getTypeStoreSizeInBits(Ty);
if (auto AlignmentConstant =
dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
- Alignment = (unsigned)AlignmentConstant->getZExtValue();
- Value *Mask = CI->getOperand(2 + OpOffset);
- Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
- } else {
- for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) {
- if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
- ignoreAccess(CI->getArgOperand(ArgNo)))
- continue;
- Type *Ty = CI->getParamByValType(ArgNo);
- Interesting.emplace_back(I, ArgNo, false, Ty, 1);
- }
+ *Alignment = (unsigned)AlignmentConstant->getZExtValue();
+ else
+ *Alignment = 1; // No alignment guarantees. We probably got Undef
+ if (MaybeMask)
+ *MaybeMask = CI->getOperand(2 + OpOffset);
+ PtrOperand = BasePtr;
}
}
+
+ if (PtrOperand) {
+ // Do not instrument acesses from
diff erent address spaces; we cannot deal
+ // with them.
+ Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
+ if (PtrTy->getPointerAddressSpace() != 0)
+ return nullptr;
+
+ // Ignore swifterror addresses.
+ // swifterror memory addresses are mem2reg promoted by instruction
+ // selection. As such they cannot have regular uses like an instrumentation
+ // function and it makes no sense to track them as memory.
+ if (PtrOperand->isSwiftError())
+ return nullptr;
+ }
+
+ // Treat memory accesses to promotable allocas as non-interesting since they
+ // will not cause memory violations. This greatly speeds up the instrumented
+ // executable at -O0.
+ if (ClSkipPromotableAllocas)
+ if (auto AI = dyn_cast_or_null<AllocaInst>(PtrOperand))
+ return isInterestingAlloca(*AI) ? AI : nullptr;
+
+ return PtrOperand;
}
static bool isPointerOperand(Value *V) {
@@ -1543,9 +1545,15 @@ static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass,
}
void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
- InterestingMemoryOperand &O, bool UseCalls,
+ Instruction *I, bool UseCalls,
const DataLayout &DL) {
- Value *Addr = O.getPtr();
+ bool IsWrite = false;
+ unsigned Alignment = 0;
+ uint64_t TypeSize = 0;
+ Value *MaybeMask = nullptr;
+ Value *Addr =
+ isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
+ assert(Addr);
// Optimization experiments.
// The experiments can be used to evaluate potential optimizations that remove
@@ -1565,7 +1573,7 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
// dynamically initialized global is always valid.
GlobalVariable *G = dyn_cast<GlobalVariable>(GetUnderlyingObject(Addr, DL));
if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
- isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
+ isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
NumOptimizedAccessesToGlobalVar++;
return;
}
@@ -1574,26 +1582,25 @@ void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
if (ClOpt && ClOptStack) {
// A direct inbounds access to a stack variable is always valid.
if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
- isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) {
+ isSafeAccess(ObjSizeVis, Addr, TypeSize)) {
NumOptimizedAccessesToStackVar++;
return;
}
}
- if (O.IsWrite)
+ if (IsWrite)
NumInstrumentedWrites++;
else
NumInstrumentedReads++;
unsigned Granularity = 1 << Mapping.Scale;
- if (O.MaybeMask) {
- instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.getInsn(),
- Addr, O.Alignment, Granularity, O.TypeSize,
- O.IsWrite, nullptr, UseCalls, Exp);
+ if (MaybeMask) {
+ instrumentMaskedLoadOrStore(this, DL, IntptrTy, MaybeMask, I, Addr,
+ Alignment, Granularity, TypeSize, IsWrite,
+ nullptr, UseCalls, Exp);
} else {
- doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
- Granularity, O.TypeSize, O.IsWrite, nullptr, UseCalls,
- Exp);
+ doInstrumentAddress(this, I, I, Addr, Alignment, Granularity, TypeSize,
+ IsWrite, nullptr, UseCalls, Exp);
}
}
@@ -2603,14 +2610,6 @@ void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
}
}
-bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
- bool ShouldInstrument =
- ClDebugMin < 0 || ClDebugMax < 0 ||
- (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
- Instrumented++;
- return !ShouldInstrument;
-}
-
bool AddressSanitizer::instrumentFunction(Function &F,
const TargetLibraryInfo *TLI) {
if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
@@ -2643,12 +2642,14 @@ bool AddressSanitizer::instrumentFunction(Function &F,
// We want to instrument every address only once per basic block (unless there
// are calls between uses).
SmallPtrSet<Value *, 16> TempsToInstrument;
- SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
- SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
+ SmallVector<Instruction *, 16> ToInstrument;
SmallVector<Instruction *, 8> NoReturnCalls;
SmallVector<BasicBlock *, 16> AllBlocks;
SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
int NumAllocas = 0;
+ bool IsWrite;
+ unsigned Alignment;
+ uint64_t TypeSize;
// Fill the set of memory operations to instrument.
for (auto &BB : F) {
@@ -2657,36 +2658,29 @@ bool AddressSanitizer::instrumentFunction(Function &F,
int NumInsnsPerBB = 0;
for (auto &Inst : BB) {
if (LooksLikeCodeInBug11395(&Inst)) return false;
- SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
- getInterestingMemoryOperands(&Inst, InterestingOperands);
-
- if (!InterestingOperands.empty()) {
- for (auto &Operand : InterestingOperands) {
- if (ClOpt && ClOptSameTemp) {
- Value *Ptr = Operand.getPtr();
- // If we have a mask, skip instrumentation if we've already
- // instrumented the full object. But don't add to TempsToInstrument
- // because we might get another load/store with a
diff erent mask.
- if (Operand.MaybeMask) {
- if (TempsToInstrument.count(Ptr))
- continue; // We've seen this (whole) temp in the current BB.
- } else {
- if (!TempsToInstrument.insert(Ptr).second)
- continue; // We've seen this temp in the current BB.
- }
+ Value *MaybeMask = nullptr;
+ if (Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
+ &Alignment, &MaybeMask)) {
+ if (ClOpt && ClOptSameTemp) {
+ // If we have a mask, skip instrumentation if we've already
+ // instrumented the full object. But don't add to TempsToInstrument
+ // because we might get another load/store with a
diff erent mask.
+ if (MaybeMask) {
+ if (TempsToInstrument.count(Addr))
+ continue; // We've seen this (whole) temp in the current BB.
+ } else {
+ if (!TempsToInstrument.insert(Addr).second)
+ continue; // We've seen this temp in the current BB.
}
- OperandsToInstrument.push_back(Operand);
- NumInsnsPerBB++;
}
} else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
isInterestingPointerComparison(&Inst)) ||
((ClInvalidPointerPairs || ClInvalidPointerSub) &&
isInterestingPointerSubtraction(&Inst))) {
PointerComparisonsOrSubtracts.push_back(&Inst);
- } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
+ continue;
+ } else if (isa<MemIntrinsic>(Inst)) {
// ok, take it.
- IntrinToInstrument.push_back(MI);
- NumInsnsPerBB++;
} else {
if (isa<AllocaInst>(Inst)) NumAllocas++;
if (auto *CB = dyn_cast<CallBase>(&Inst)) {
@@ -2697,14 +2691,17 @@ bool AddressSanitizer::instrumentFunction(Function &F,
}
if (CallInst *CI = dyn_cast<CallInst>(&Inst))
maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
+ continue;
}
+ ToInstrument.push_back(&Inst);
+ NumInsnsPerBB++;
if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
}
}
- bool UseCalls = (ClInstrumentationWithCallsThreshold >= 0 &&
- OperandsToInstrument.size() + IntrinToInstrument.size() >
- (unsigned)ClInstrumentationWithCallsThreshold);
+ bool UseCalls =
+ (ClInstrumentationWithCallsThreshold >= 0 &&
+ ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold);
const DataLayout &DL = F.getParent()->getDataLayout();
ObjectSizeOpts ObjSizeOpts;
ObjSizeOpts.RoundToAlign = true;
@@ -2712,16 +2709,16 @@ bool AddressSanitizer::instrumentFunction(Function &F,
// Instrument.
int NumInstrumented = 0;
- for (auto &Operand : OperandsToInstrument) {
- if (!suppressInstrumentationSiteForDebug(NumInstrumented))
- instrumentMop(ObjSizeVis, Operand, UseCalls,
- F.getParent()->getDataLayout());
- FunctionModified = true;
- }
- for (auto Inst : IntrinToInstrument) {
- if (!suppressInstrumentationSiteForDebug(NumInstrumented))
- instrumentMemIntrinsic(Inst);
- FunctionModified = true;
+ for (auto Inst : ToInstrument) {
+ if (ClDebugMin < 0 || ClDebugMax < 0 ||
+ (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
+ if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment))
+ instrumentMop(ObjSizeVis, Inst, UseCalls,
+ F.getParent()->getDataLayout());
+ else
+ instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
+ }
+ NumInstrumented++;
}
FunctionStackPoisoner FSP(F, *this);
@@ -2736,10 +2733,10 @@ bool AddressSanitizer::instrumentFunction(Function &F,
for (auto Inst : PointerComparisonsOrSubtracts) {
instrumentPointerComparisonOrSubtraction(Inst);
- FunctionModified = true;
+ NumInstrumented++;
}
- if (ChangedStack || !NoReturnCalls.empty())
+ if (NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty())
FunctionModified = true;
LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 0b9856b5126a..9470cb2cfb28 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -45,7 +45,6 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Instrumentation.h"
-#include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
@@ -97,10 +96,6 @@ static cl::opt<bool> ClInstrumentAtomics(
cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
cl::init(true));
-static cl::opt<bool> ClInstrumentByval("hwasan-instrument-byval",
- cl::desc("instrument byval arguments"),
- cl::Hidden, cl::init(true));
-
static cl::opt<bool> ClRecover(
"hwasan-recover",
cl::desc("Enable recovery mode (continue-after-error)."),
@@ -216,10 +211,10 @@ class HWAddressSanitizer {
unsigned AccessSizeIndex,
Instruction *InsertBefore);
void instrumentMemIntrinsic(MemIntrinsic *MI);
- bool instrumentMemAccess(InterestingMemoryOperand &O);
- bool ignoreAccess(Value *Ptr);
- void getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting);
+ bool instrumentMemAccess(Instruction *I);
+ Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
+ uint64_t *TypeSize, unsigned *Alignment,
+ Value **MaybeMask);
bool isInterestingAlloca(const AllocaInst &AI);
bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
@@ -505,63 +500,62 @@ Value *HWAddressSanitizer::getDynamicShadowNonTls(IRBuilder<> &IRB) {
}
}
-bool HWAddressSanitizer::ignoreAccess(Value *Ptr) {
- // Do not instrument acesses from
diff erent address spaces; we cannot deal
- // with them.
- Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
- if (PtrTy->getPointerAddressSpace() != 0)
- return true;
-
- // Ignore swifterror addresses.
- // swifterror memory addresses are mem2reg promoted by instruction
- // selection. As such they cannot have regular uses like an instrumentation
- // function and it makes no sense to track them as memory.
- if (Ptr->isSwiftError())
- return true;
-
- return false;
-}
-
-void HWAddressSanitizer::getInterestingMemoryOperands(
- Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
+Value *HWAddressSanitizer::isInterestingMemoryAccess(Instruction *I,
+ bool *IsWrite,
+ uint64_t *TypeSize,
+ unsigned *Alignment,
+ Value **MaybeMask) {
// Skip memory accesses inserted by another instrumentation.
- if (I->hasMetadata("nosanitize"))
- return;
+ if (I->hasMetadata("nosanitize")) return nullptr;
// Do not instrument the load fetching the dynamic shadow address.
if (LocalDynamicShadow == I)
- return;
+ return nullptr;
+ Value *PtrOperand = nullptr;
+ const DataLayout &DL = I->getModule()->getDataLayout();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
- if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand()))
- return;
- Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
- LI->getType(), LI->getAlignment());
+ if (!ClInstrumentReads) return nullptr;
+ *IsWrite = false;
+ *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
+ *Alignment = LI->getAlignment();
+ PtrOperand = LI->getPointerOperand();
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
- if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand()))
- return;
- Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
- SI->getValueOperand()->getType(),
- SI->getAlignment());
+ if (!ClInstrumentWrites) return nullptr;
+ *IsWrite = true;
+ *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
+ *Alignment = SI->getAlignment();
+ PtrOperand = SI->getPointerOperand();
} else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
- if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand()))
- return;
- Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
- RMW->getValOperand()->getType(), 0);
+ if (!ClInstrumentAtomics) return nullptr;
+ *IsWrite = true;
+ *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
+ *Alignment = 0;
+ PtrOperand = RMW->getPointerOperand();
} else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
- if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand()))
- return;
- Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
- XCHG->getCompareOperand()->getType(), 0);
- } else if (auto CI = dyn_cast<CallInst>(I)) {
- for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) {
- if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
- ignoreAccess(CI->getArgOperand(ArgNo)))
- continue;
- Type *Ty = CI->getParamByValType(ArgNo);
- Interesting.emplace_back(I, ArgNo, false, Ty, 1);
- }
+ if (!ClInstrumentAtomics) return nullptr;
+ *IsWrite = true;
+ *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
+ *Alignment = 0;
+ PtrOperand = XCHG->getPointerOperand();
}
+
+ if (PtrOperand) {
+ // Do not instrument accesses from
diff erent address spaces; we cannot deal
+ // with them.
+ Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
+ if (PtrTy->getPointerAddressSpace() != 0)
+ return nullptr;
+
+ // Ignore swifterror addresses.
+ // swifterror memory addresses are mem2reg promoted by instruction
+ // selection. As such they cannot have regular uses like an instrumentation
+ // function and it makes no sense to track them as memory.
+ if (PtrOperand->isSwiftError())
+ return nullptr;
+ }
+
+ return PtrOperand;
}
static unsigned getPointerOperandIndex(Instruction *I) {
@@ -719,32 +713,45 @@ void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
MI->eraseFromParent();
}
-bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) {
- Value *Addr = O.getPtr();
+bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
+ LLVM_DEBUG(dbgs() << "Instrumenting: " << *I << "\n");
+ bool IsWrite = false;
+ unsigned Alignment = 0;
+ uint64_t TypeSize = 0;
+ Value *MaybeMask = nullptr;
+
+ if (ClInstrumentMemIntrinsics && isa<MemIntrinsic>(I)) {
+ instrumentMemIntrinsic(cast<MemIntrinsic>(I));
+ return true;
+ }
+
+ Value *Addr =
+ isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
- LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n");
+ if (!Addr)
+ return false;
- if (O.MaybeMask)
+ if (MaybeMask)
return false; //FIXME
- IRBuilder<> IRB(O.getInsn());
- if (isPowerOf2_64(O.TypeSize) &&
- (O.TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) &&
- (O.Alignment >= (1UL << Mapping.Scale) || O.Alignment == 0 ||
- O.Alignment >= O.TypeSize / 8)) {
- size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeSize);
+ IRBuilder<> IRB(I);
+ if (isPowerOf2_64(TypeSize) &&
+ (TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) &&
+ (Alignment >= (1UL << Mapping.Scale) || Alignment == 0 ||
+ Alignment >= TypeSize / 8)) {
+ size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
if (ClInstrumentWithCalls) {
- IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
+ IRB.CreateCall(HwasanMemoryAccessCallback[IsWrite][AccessSizeIndex],
IRB.CreatePointerCast(Addr, IntptrTy));
} else {
- instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn());
+ instrumentMemAccessInline(Addr, IsWrite, AccessSizeIndex, I);
}
} else {
- IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite],
+ IRB.CreateCall(HwasanMemoryAccessCallbackSized[IsWrite],
{IRB.CreatePointerCast(Addr, IntptrTy),
- ConstantInt::get(IntptrTy, O.TypeSize / 8)});
+ ConstantInt::get(IntptrTy, TypeSize / 8)});
}
- untagPointerOperand(O.getInsn(), Addr);
+ untagPointerOperand(I, Addr);
return true;
}
@@ -1082,8 +1089,7 @@ bool HWAddressSanitizer::sanitizeFunction(Function &F) {
LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
- SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
- SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
+ SmallVector<Instruction*, 16> ToInstrument;
SmallVector<AllocaInst*, 8> AllocasToInstrument;
SmallVector<Instruction*, 8> RetVec;
SmallVector<Instruction*, 8> LandingPadVec;
@@ -1109,10 +1115,14 @@ bool HWAddressSanitizer::sanitizeFunction(Function &F) {
if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
LandingPadVec.push_back(&Inst);
- getInterestingMemoryOperands(&Inst, OperandsToInstrument);
-
- if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
- IntrinToInstrument.push_back(MI);
+ Value *MaybeMask = nullptr;
+ bool IsWrite;
+ unsigned Alignment;
+ uint64_t TypeSize;
+ Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
+ &Alignment, &MaybeMask);
+ if (Addr || isa<MemIntrinsic>(Inst))
+ ToInstrument.push_back(&Inst);
}
}
@@ -1128,8 +1138,7 @@ bool HWAddressSanitizer::sanitizeFunction(Function &F) {
F.setPersonalityFn(nullptr);
}
- if (AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
- IntrinToInstrument.empty())
+ if (AllocasToInstrument.empty() && ToInstrument.empty())
return false;
assert(!LocalDynamicShadow);
@@ -1207,14 +1216,8 @@ bool HWAddressSanitizer::sanitizeFunction(Function &F) {
}
}
- for (auto &Operand : OperandsToInstrument)
- Changed |= instrumentMemAccess(Operand);
-
- if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) {
- for (auto Inst : IntrinToInstrument)
- instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
- Changed = true;
- }
+ for (auto Inst : ToInstrument)
+ Changed |= instrumentMemAccess(Inst);
LocalDynamicShadow = nullptr;
StackBaseTag = nullptr;
diff --git a/llvm/test/Instrumentation/AddressSanitizer/byval-args.ll b/llvm/test/Instrumentation/AddressSanitizer/byval-args.ll
deleted file mode 100644
index a070cedca37d..000000000000
--- a/llvm/test/Instrumentation/AddressSanitizer/byval-args.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: opt < %s -asan -S | FileCheck %s
-; Test that for call instructions, the by-value arguments are instrumented.
-
-target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-%struct.bar = type { %struct.foo }
-%struct.foo = type { i8*, i8*, i8* }
-define dso_local void @func2(%struct.foo* %foo) sanitize_address {
-; CHECK-LABEL: @func2
- tail call void @func1(%struct.foo* byval(%struct.foo) align 8 %foo) #2
-; CHECK: call void @__asan_report_load
- ret void
-; CHECK: ret void
-}
-declare dso_local void @func1(%struct.foo* byval(%struct.foo) align 8)
-
-!0 = !{i32 1, !"wchar_size", i32 4}
More information about the llvm-commits
mailing list