[llvm] [AMDGPU] Update instrumentAddress method to support aligned size and unusual size accesses. (PR #104804)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 19 08:42:46 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: Chaitanya (skc7)
<details>
<summary>Changes</summary>
This PR updates instrumentAddress api to support properly aligned sizes and unsual size accesses. Changes ported from asan pass.
---
Full diff: https://github.com/llvm/llvm-project/pull/104804.diff
2 Files Affected:
- (modified) llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp (+47-5)
- (modified) llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h (+1-1)
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
index 593fca5bc3ed68..739813310f67db 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
@@ -147,11 +147,13 @@ static Value *memToShadow(Module &M, IRBuilder<> &IRB, Type *IntptrTy,
return IRB.CreateAdd(Shadow, ShadowBase);
}
-void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
- Instruction *InsertBefore, Value *Addr,
- MaybeAlign Alignment, uint32_t TypeStoreSize,
- bool IsWrite, Value *SizeArgument, bool UseCalls,
- bool Recover, int AsanScale, int AsanOffset) {
+static void instrumentAddressImpl(Module &M, IRBuilder<> &IRB,
+ Instruction *OrigIns,
+ Instruction *InsertBefore, Value *Addr,
+ MaybeAlign Alignment, uint32_t TypeStoreSize,
+ bool IsWrite, Value *SizeArgument,
+ bool UseCalls, bool Recover, int AsanScale,
+ int AsanOffset) {
Type *AddrTy = Addr->getType();
Type *IntptrTy = M.getDataLayout().getIntPtrType(
M.getContext(), AddrTy->getPointerAddressSpace());
@@ -179,6 +181,46 @@ void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
return;
}
+void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
+ Instruction *InsertBefore, Value *Addr,
+ MaybeAlign Alignment, TypeSize TypeStoreSize,
+ bool IsWrite, Value *SizeArgument, bool UseCalls,
+ bool Recover, int AsanScale, int AsanOffset) {
+ // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
+ // if the data is properly aligned.
+ if (!TypeStoreSize.isScalable()) {
+ unsigned Granularity = 1 << AsanScale;
+ const auto FixedSize = TypeStoreSize.getFixedValue();
+ switch (FixedSize) {
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ if (!Alignment || *Alignment >= Granularity ||
+ *Alignment >= FixedSize / 8)
+ return instrumentAddressImpl(
+ M, IRB, OrigIns, InsertBefore, Addr, Alignment, FixedSize, IsWrite,
+ SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);
+ }
+ }
+ // Instrument unusual size or unusual alignment.
+ IRB.SetInsertPoint(InsertBefore);
+ Type *AddrTy = Addr->getType();
+ Type *IntptrTy = M.getDataLayout().getIntPtrType(
+ M.getContext(), AddrTy->getPointerAddressSpace());
+ Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
+ Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
+ Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
+ Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
+ Value *LastByte =
+ IRB.CreateIntToPtr(IRB.CreateAdd(AddrLong, SizeMinusOne), AddrTy);
+ instrumentAddressImpl(M, IRB, OrigIns, InsertBefore, Addr, {}, 8, IsWrite,
+ SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);
+ instrumentAddressImpl(M, IRB, OrigIns, InsertBefore, LastByte, {}, 8, IsWrite,
+ SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);
+}
+
void getInterestingMemoryOperands(
Module &M, Instruction *I,
SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
index 64d78c4aeb6925..4dd2bb44d970a5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
@@ -44,7 +44,7 @@ uint64_t getRedzoneSizeForGlobal(int Scale, uint64_t SizeInBytes);
/// Generates report blocks that catch the addressing errors.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
Instruction *InsertBefore, Value *Addr,
- MaybeAlign Alignment, uint32_t TypeStoreSize,
+ MaybeAlign Alignment, TypeSize TypeStoreSize,
bool IsWrite, Value *SizeArgument, bool UseCalls,
bool Recover, int Scale, int Offset);
``````````
</details>
https://github.com/llvm/llvm-project/pull/104804
More information about the llvm-commits
mailing list