[llvm] 33f1aed - [AMDGPU] Update instrumentAddress method to support aligned size and unusual size accesses. (#104804)

via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 20 22:01:46 PDT 2024


Author: Chaitanya
Date: 2024-08-21T10:31:42+05:30
New Revision: 33f1aedef878931f61208b39c0220aa0d4bc9381

URL: https://github.com/llvm/llvm-project/commit/33f1aedef878931f61208b39c0220aa0d4bc9381
DIFF: https://github.com/llvm/llvm-project/commit/33f1aedef878931f61208b39c0220aa0d4bc9381.diff

LOG: [AMDGPU] Update instrumentAddress method to support aligned size and unusual size accesses. (#104804)

This PR updates instrumentAddress api to support properly aligned sizes
and unsual size accesses. Changes ported from asan pass.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
    llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
index 593fca5bc3ed68..4c8ddbd9aabd5a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.cpp
@@ -147,11 +147,13 @@ static Value *memToShadow(Module &M, IRBuilder<> &IRB, Type *IntptrTy,
   return IRB.CreateAdd(Shadow, ShadowBase);
 }
 
-void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
-                       Instruction *InsertBefore, Value *Addr,
-                       MaybeAlign Alignment, uint32_t TypeStoreSize,
-                       bool IsWrite, Value *SizeArgument, bool UseCalls,
-                       bool Recover, int AsanScale, int AsanOffset) {
+static void instrumentAddressImpl(Module &M, IRBuilder<> &IRB,
+                                  Instruction *OrigIns,
+                                  Instruction *InsertBefore, Value *Addr,
+                                  Align Alignment, uint32_t TypeStoreSize,
+                                  bool IsWrite, Value *SizeArgument,
+                                  bool UseCalls, bool Recover, int AsanScale,
+                                  int AsanOffset) {
   Type *AddrTy = Addr->getType();
   Type *IntptrTy = M.getDataLayout().getIntPtrType(
       M.getContext(), AddrTy->getPointerAddressSpace());
@@ -164,7 +166,7 @@ void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
   Value *ShadowPtr =
       memToShadow(M, IRB, IntptrTy, AddrLong, AsanScale, AsanOffset);
   const uint64_t ShadowAlign =
-      std::max<uint64_t>(Alignment.valueOrOne().value() >> AsanScale, 1);
+      std::max<uint64_t>(Alignment.value() >> AsanScale, 1);
   Value *ShadowValue = IRB.CreateAlignedLoad(
       ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
   Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
@@ -179,6 +181,43 @@ void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
   return;
 }
 
+void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
+                       Instruction *InsertBefore, Value *Addr, Align Alignment,
+                       TypeSize TypeStoreSize, bool IsWrite,
+                       Value *SizeArgument, bool UseCalls, bool Recover,
+                       int AsanScale, int AsanOffset) {
+  if (!TypeStoreSize.isScalable()) {
+    unsigned Granularity = 1 << AsanScale;
+    const auto FixedSize = TypeStoreSize.getFixedValue();
+    switch (FixedSize) {
+    case 8:
+    case 16:
+    case 32:
+    case 64:
+    case 128:
+      if (Alignment.value() >= Granularity ||
+          Alignment.value() >= FixedSize / 8)
+        return instrumentAddressImpl(
+            M, IRB, OrigIns, InsertBefore, Addr, Alignment, FixedSize, IsWrite,
+            SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);
+    }
+  }
+  // Instrument unusual size or unusual alignment.
+  IRB.SetInsertPoint(InsertBefore);
+  Type *AddrTy = Addr->getType();
+  Type *IntptrTy = M.getDataLayout().getIntPtrType(AddrTy);
+  Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
+  Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
+  Value *AddrLong = IRB.CreatePtrToInt(Addr, IntptrTy);
+  Value *SizeMinusOne = IRB.CreateAdd(Size, ConstantInt::get(IntptrTy, -1));
+  Value *LastByte =
+      IRB.CreateIntToPtr(IRB.CreateAdd(AddrLong, SizeMinusOne), AddrTy);
+  instrumentAddressImpl(M, IRB, OrigIns, InsertBefore, Addr, {}, 8, IsWrite,
+                        SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);
+  instrumentAddressImpl(M, IRB, OrigIns, InsertBefore, LastByte, {}, 8, IsWrite,
+                        SizeArgument, UseCalls, Recover, AsanScale, AsanOffset);
+}
+
 void getInterestingMemoryOperands(
     Module &M, Instruction *I,
     SmallVectorImpl<InterestingMemoryOperand> &Interesting) {

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
index b10fded57b1a7e..b2b8ec19b49ece 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsanInstrumentation.h
@@ -43,10 +43,10 @@ uint64_t getRedzoneSizeForGlobal(int Scale, uint64_t SizeInBytes);
 /// Instrument the memory operand Addr.
 /// Generates report blocks that catch the addressing errors.
 void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns,
-                       Instruction *InsertBefore, Value *Addr,
-                       MaybeAlign Alignment, uint32_t TypeStoreSize,
-                       bool IsWrite, Value *SizeArgument, bool UseCalls,
-                       bool Recover, int Scale, int Offset);
+                       Instruction *InsertBefore, Value *Addr, Align Alignment,
+                       TypeSize TypeStoreSize, bool IsWrite,
+                       Value *SizeArgument, bool UseCalls, bool Recover,
+                       int Scale, int Offset);
 
 /// Get all the memory operands from the instruction
 /// that needs to be instrumented


        


More information about the llvm-commits mailing list