[llvm] a3f4139 - [asan] Implemented flag to emit intrinsics to optimize ASan callbacks.

Kirill Stoimenov via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 26 13:34:11 PDT 2021


Author: Kirill Stoimenov
Date: 2021-08-26T20:33:57Z
New Revision: a3f413962627a1e70099731620cbf8308390f734

URL: https://github.com/llvm/llvm-project/commit/a3f413962627a1e70099731620cbf8308390f734
DIFF: https://github.com/llvm/llvm-project/commit/a3f413962627a1e70099731620cbf8308390f734.diff

LOG: [asan] Implemented flag to emit intrinsics to optimize ASan callbacks.

Reviewed By: vitalybuka

Differential Revision: https://reviews.llvm.org/D108377

Added: 
    llvm/test/Instrumentation/AddressSanitizer/asan-optimize-callbacks.ll

Modified: 
    llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 480bb084f0986..a981ab790455e 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -356,6 +356,10 @@ static cl::opt<uint64_t>
 static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
                            cl::Hidden, cl::init(true));
 
+static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
+                                         cl::desc("Optimize callbacks"),
+                                         cl::Hidden, cl::init(false));
+
 static cl::opt<bool> ClOptSameTemp(
     "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
     cl::Hidden, cl::init(true));
@@ -657,6 +661,8 @@ struct AddressSanitizer {
     C = &(M.getContext());
     LongSize = M.getDataLayout().getPointerSizeInBits();
     IntptrTy = Type::getIntNTy(*C, LongSize);
+    Int8PtrTy = Type::getInt8PtrTy(*C);
+    Int32Ty = Type::getInt32Ty(*C);
     TargetTriple = Triple(M.getTargetTriple());
 
     Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
@@ -747,6 +753,8 @@ struct AddressSanitizer {
   bool UseAfterScope;
   AsanDetectStackUseAfterReturnMode UseAfterReturn;
   Type *IntptrTy;
+  Type *Int8PtrTy;
+  Type *Int32Ty;
   ShadowMapping Mapping;
   FunctionCallee AsanHandleNoReturnFunc;
   FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
@@ -1766,9 +1774,20 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
   }
 
   IRBuilder<> IRB(InsertBefore);
-  Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
   size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
+  const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
 
+  if (UseCalls && ClOptimizeCallbacks) {
+    const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
+    Module *M = IRB.GetInsertBlock()->getParent()->getParent();
+    IRB.CreateCall(
+        Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess),
+        {IRB.CreatePointerCast(Addr, Int8PtrTy),
+         ConstantInt::get(Int32Ty, AccessInfo.Packed)});
+    return;
+  }
+
+  Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
   if (UseCalls) {
     if (Exp == 0)
       IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],

diff  --git a/llvm/test/Instrumentation/AddressSanitizer/asan-optimize-callbacks.ll b/llvm/test/Instrumentation/AddressSanitizer/asan-optimize-callbacks.ll
new file mode 100644
index 0000000000000..b937d38187755
--- /dev/null
+++ b/llvm/test/Instrumentation/AddressSanitizer/asan-optimize-callbacks.ll
@@ -0,0 +1,86 @@
+; RUN: opt < %s -asan -enable-new-pm=0 -asan-instrumentation-with-call-threshold=0 \
+; RUN:   -asan-optimize-callbacks -S | FileCheck %s --check-prefixes=LOAD,STORE
+; RUN: opt < %s -asan -enable-new-pm=0 -asan-instrumentation-with-call-threshold=0 \
+; RUN:   -asan-optimize-callbacks --asan-kernel -S | \
+; RUN:   FileCheck %s --check-prefixes=LOAD-KERNEL,STORE-KERNEL
+
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @load(i8* %p1, i16* %p2, i32* %p4, i64* %p8, i128* %p16)
+sanitize_address {
+  %n1 = load i8, i8* %p1, align 1
+  %n2 = load i16, i16* %p2, align 2
+  %n4 = load i32, i32* %p4, align 4
+  %n8 = load i64, i64* %p8, align 8
+  %n16 = load i128, i128* %p16, align 16
+; LOAD:      call void @llvm.asan.check.memaccess(i8* %p1, i32 0)
+; LOAD-NEXT: %n1 = load i8, i8* %p1, align 1
+; LOAD-NEXT: %1 = bitcast i16* %p2 to i8*
+; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 2)
+; LOAD-NEXT: %n2 = load i16, i16* %p2, align 2
+; LOAD-NEXT: %2 = bitcast i32* %p4 to i8*
+; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 4)
+; LOAD-NEXT: %n4 = load i32, i32* %p4, align 4
+; LOAD-NEXT: %3 = bitcast i64* %p8 to i8*
+; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 6)
+; LOAD-NEXT: %n8 = load i64, i64* %p8, align 8
+; LOAD-NEXT: %4 = bitcast i128* %p16 to i8*
+; LOAD-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 8)
+; LOAD-NEXT: %n16 = load i128, i128* %p16, align 16
+
+; LOAD-KERNEL:      call void @llvm.asan.check.memaccess(i8* %p1, i32 1)
+; LOAD-KERNEL-NEXT: %n1 = load i8, i8* %p1, align 1
+; LOAD-KERNEL-NEXT: %1 = bitcast i16* %p2 to i8*
+; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 3)
+; LOAD-KERNEL-NEXT: %n2 = load i16, i16* %p2, align 2
+; LOAD-KERNEL-NEXT: %2 = bitcast i32* %p4 to i8*
+; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 5)
+; LOAD-KERNEL-NEXT: %n4 = load i32, i32* %p4, align 4
+; LOAD-KERNEL-NEXT: %3 = bitcast i64* %p8 to i8*
+; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 7)
+; LOAD-KERNEL-NEXT: %n8 = load i64, i64* %p8, align 8
+; LOAD-KERNEL-NEXT: %4 = bitcast i128* %p16 to i8*
+; LOAD-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 9)
+; LOAD-KERNEL-NEXT: %n16 = load i128, i128* %p16, align 16
+  ret void
+}
+
+define void @store(i8* %p1, i16* %p2, i32* %p4, i64* %p8, i128* %p16)
+sanitize_address {
+  store i8 0, i8* %p1, align 1
+  store i16 0, i16* %p2, align 2
+  store i32 0, i32* %p4, align 4
+  store i64 0, i64* %p8, align 8
+  store i128 0, i128* %p16, align 16
+; STORE:      call void @llvm.asan.check.memaccess(i8* %p1, i32 32)
+; STORE-NEXT: store i8 0, i8* %p1, align 1
+; STORE-NEXT: %1 = bitcast i16* %p2 to i8*
+; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 34)
+; STORE-NEXT: store i16 0, i16* %p2, align 2
+; STORE-NEXT: %2 = bitcast i32* %p4 to i8*
+; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 36)
+; STORE-NEXT: store i32 0, i32* %p4, align 4
+; STORE-NEXT: %3 = bitcast i64* %p8 to i8*
+; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 38)
+; STORE-NEXT: store i64 0, i64* %p8, align 8
+; STORE-NEXT: %4 = bitcast i128* %p16 to i8*
+; STORE-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 40)
+; STORE-NEXT: store i128 0, i128* %p16, align 16
+
+; STORE-KERNEL:      call void @llvm.asan.check.memaccess(i8* %p1, i32 33)
+; STORE-KERNEL-NEXT: store i8 0, i8* %p1, align 1
+; STORE-KERNEL-NEXT: %1 = bitcast i16* %p2 to i8*
+; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %1, i32 35)
+; STORE-KERNEL-NEXT: store i16 0, i16* %p2, align 2
+; STORE-KERNEL-NEXT: %2 = bitcast i32* %p4 to i8*
+; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %2, i32 37)
+; STORE-KERNEL-NEXT: store i32 0, i32* %p4, align 4
+; STORE-KERNEL-NEXT: %3 = bitcast i64* %p8 to i8*
+; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %3, i32 39)
+; STORE-KERNEL-NEXT: store i64 0, i64* %p8, align 8
+; STORE-KERNEL-NEXT: %4 = bitcast i128* %p16 to i8*
+; STORE-KERNEL-NEXT: call void @llvm.asan.check.memaccess(i8* %4, i32 41)
+; STORE-KERNEL-NEXT: store i128 0, i128* %p16, align 16
+; STORE-KERNEL-NEXT: ret void
+  ret void
+}


        


More information about the llvm-commits mailing list