[llvm] [AMDGPU] Update instrumentAddress method to support aligned size and unusual size accesses. (PR #104804)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 20 00:17:00 PDT 2024
https://github.com/skc7 updated https://github.com/llvm/llvm-project/pull/104804
>From d90bec27e6ab083458cb0824664ebef78655e580 Mon Sep 17 00:00:00 2001
From: skc7 <Krishna.Sankisa at amd.com>
Date: Mon, 19 Aug 2024 21:07:57 +0530
Subject: [PATCH 1/3] [AMDGPU] Update instrumentAddress method to support
aligned size and unsusual size access.
---
.../AMDGPU/AMDGPUAsanInstrumentation.cpp | 52 +++++++++++++++++--
.../Target/AMDGPU/AMDGPUAsanInstrumentation.h | 2 +-
2 files changed, 48 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
index 593fca5bc3ed68..739813310f67db 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
@@ -147,11 +147,13 @@ static Value *memToShadow(Module &M, IRBuilder<> &IRB, Type *IntptrTy,
return IRB.CreateAdd(Shadow, ShadowBase);
}
-void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
- Instruction *InsertBefore, Value *Addr,
- MaybeAlign Alignment, uint32_t TypeStoreSize,
- bool IsWrite, Value *SizeArgument, bool UseCalls,
- bool Recover, int AsanScale, int AsanOffset) {
+static void instrumentAddressImpl(Module &M, IRBuilder<> &IRB,
+ Instruction *OrigIns,
+ Instruction *InsertBefore, Value *Addr,
+ MaybeAlign Alignment, uint32_t TypeStoreSize,
+ bool IsWrite, Value *SizeArgument,
+ bool UseCalls, bool Recover, int AsanScale,
+ int AsanOffset) {
Type *AddrTy = Addr->getType();
Type *IntptrTy = M.getDataLayout().getIntPtrType(
M.getContext(), AddrTy->getPointerAddressSpace());
@@ -179,6 +181,46 @@ void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
return;
}
+void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
+ Instruction *InsertBefore, Value *Addr,
+ MaybeAlign Alignment, TypeSize TypeStoreSize,
+ bool IsWrite, Value *SizeArgument, bool UseCalls,
+ bool Recover, int AsanScale, int AsanOffset) {
+ // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
+ // if the data is properly aligned.
+ if (!TypeStoreSize.isScalable()) {
+ unsigned Granularity = 1 << AsanScale;
+ const auto FixedSize = TypeStoreSize.getFixedValue();
+ switch (FixedSize) {
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ if (!Alignment || *Alignment >= Granularity ||
+ *Alignment >= FixedSize / 8)
+ return instrumentAddressImpl(
+ M, IRB, OrigIns, InsertBefore, Addr, Alignment, FixedSize, IsWrite,
+ SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);
+ }
+ }
+ // Instrument unusual size or unusual alignment.
+ IRB.SetInsertPoint(InsertBefore);
+ Type *AddrTy = Addr->getType();
+ Type *IntptrTy = M.getDataLayout().getIntPtrType(
+ M.getContext(), AddrTy->getPointerAddressSpace());
+ Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
+ Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
+ Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
+ Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
+ Value *LastByte =
+ IRB.CreateIntToPtr(IRB.CreateAdd(AddrLong, SizeMinusOne), AddrTy);
+ instrumentAddressImpl(M, IRB, OrigIns, InsertBefore, Addr, {}, 8, IsWrite,
+ SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);
+ instrumentAddressImpl(M, IRB, OrigIns, InsertBefore, LastByte, {}, 8, IsWrite,
+ SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);
+}
+
void getInterestingMemoryOperands(
Module &M, Instruction *I,
SmallVectorImpl<InterestingMemoryOperand> &Interesting) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
index 64d78c4aeb6925..4dd2bb44d970a5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
@@ -44,7 +44,7 @@ uint64_t getRedzoneSizeForGlobal(int Scale, uint64_t SizeInBytes);
/// Generates report blocks that catch the addressing errors.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
Instruction *InsertBefore, Value *Addr,
- MaybeAlign Alignment, uint32_t TypeStoreSize,
+ MaybeAlign Alignment, TypeSize TypeStoreSize,
bool IsWrite, Value *SizeArgument, bool UseCalls,
bool Recover, int Scale, int Offset);
>From f51cb1907944853d5d60d4f3c396f2db673cfb31 Mon Sep 17 00:00:00 2001
From: skc7 <Krishna.Sankisa at amd.com>
Date: Mon, 19 Aug 2024 23:38:48 +0530
Subject: [PATCH 2/3] [AMDGPU] Use Align instead of MaybeAlign
---
.../AMDGPU/AMDGPUAsanInstrumentation.cpp | 23 ++++++++-----------
.../Target/AMDGPU/AMDGPUAsanInstrumentation.h | 8 +++----
2 files changed, 14 insertions(+), 17 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
index 739813310f67db..c6e3da89bc1e72 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
@@ -150,7 +150,7 @@ static Value *memToShadow(Module &M, IRBuilder<> &IRB, Type *IntptrTy,
static void instrumentAddressImpl(Module &M, IRBuilder<> &IRB,
Instruction *OrigIns,
Instruction *InsertBefore, Value *Addr,
- MaybeAlign Alignment, uint32_t TypeStoreSize,
+ Align Alignment, uint32_t TypeStoreSize,
bool IsWrite, Value *SizeArgument,
bool UseCalls, bool Recover, int AsanScale,
int AsanOffset) {
@@ -166,7 +166,7 @@ static void instrumentAddressImpl(Module &M, IRBuilder<> &IRB,
Value *ShadowPtr =
memToShadow(M, IRB, IntptrTy, AddrLong, AsanScale, AsanOffset);
const uint64_t ShadowAlign =
- std::max<uint64_t>(Alignment.valueOrOne().value() >> AsanScale, 1);
+ std::max<uint64_t>(Alignment.value() >> AsanScale, 1);
Value *ShadowValue = IRB.CreateAlignedLoad(
ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
@@ -182,12 +182,10 @@ static void instrumentAddressImpl(Module &M, IRBuilder<> &IRB,
}
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
- Instruction *InsertBefore, Value *Addr,
- MaybeAlign Alignment, TypeSize TypeStoreSize,
- bool IsWrite, Value *SizeArgument, bool UseCalls,
- bool Recover, int AsanScale, int AsanOffset) {
- // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
- // if the data is properly aligned.
+ Instruction *InsertBefore, Value *Addr, Align Alignment,
+ TypeSize TypeStoreSize, bool IsWrite,
+ Value *SizeArgument, bool UseCalls, bool Recover,
+ int AsanScale, int AsanOffset) {
if (!TypeStoreSize.isScalable()) {
unsigned Granularity = 1 << AsanScale;
const auto FixedSize = TypeStoreSize.getFixedValue();
@@ -197,8 +195,8 @@ void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
case 32:
case 64:
case 128:
- if (!Alignment || *Alignment >= Granularity ||
- *Alignment >= FixedSize / 8)
+ if (Alignment.value() >= Granularity ||
+ Alignment.value() >= FixedSize / 8)
return instrumentAddressImpl(
M, IRB, OrigIns, InsertBefore, Addr, Alignment, FixedSize, IsWrite,
SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);
@@ -207,11 +205,10 @@ void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
// Instrument unusual size or unusual alignment.
IRB.SetInsertPoint(InsertBefore);
Type *AddrTy = Addr->getType();
- Type *IntptrTy = M.getDataLayout().getIntPtrType(
- M.getContext(), AddrTy->getPointerAddressSpace());
+ Type *IntptrTy = M.getDataLayout().getIntPtrType(AddrTy);
Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
- Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
+ Value *AddrLong = IRB.CreatePtrToInt(Addr, IntptrTy);
Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
Value *LastByte =
IRB.CreateIntToPtr(IRB.CreateAdd(AddrLong, SizeMinusOne), AddrTy);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
index 4dd2bb44d970a5..f28952b431410e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
@@ -43,10 +43,10 @@ uint64_t getRedzoneSizeForGlobal(int Scale, uint64_t SizeInBytes);
/// Instrument the memory operand Addr.
/// Generates report blocks that catch the addressing errors.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
- Instruction *InsertBefore, Value *Addr,
- MaybeAlign Alignment, TypeSize TypeStoreSize,
- bool IsWrite, Value *SizeArgument, bool UseCalls,
- bool Recover, int Scale, int Offset);
+ Instruction *InsertBefore, Value *Addr, Align Alignment,
+ TypeSize TypeStoreSize, bool IsWrite,
+ Value *SizeArgument, bool UseCalls, bool Recover,
+ int Scale, int Offset);
/// Get all the memory operands from the instruction
/// that needs to be instrumented
>From 3aa64d07724cd6cef4abd7c11f989c8241d1be84 Mon Sep 17 00:00:00 2001
From: skc7 <Krishna.Sankisa at amd.com>
Date: Tue, 20 Aug 2024 12:46:07 +0530
Subject: [PATCH 3/3] [AMDGPU] Use CreateAdd for SizeMinusOne
---
llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
index c6e3da89bc1e72..4c8ddbd9aabd5a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
@@ -209,7 +209,7 @@ void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
Value *AddrLong = IRB.CreatePtrToInt(Addr, IntptrTy);
- Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
+ Value *SizeMinusOne = IRB.CreateAdd(Size, ConstantInt::get(IntptrTy, -1));
Value *LastByte =
IRB.CreateIntToPtr(IRB.CreateAdd(AddrLong, SizeMinusOne), AddrTy);
instrumentAddressImpl(M, IRB, OrigIns, InsertBefore, Addr, {}, 8, IsWrite,
More information about the llvm-commits
mailing list