[llvm] de71056 - [ASAN] Initial support memory checks on scalable vector typed allocas

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 15 07:59:50 PDT 2023


Author: Philip Reames
Date: 2023-03-15T07:59:42-07:00
New Revision: de71056a7dbce33186687153af267e0503bbacdc

URL: https://github.com/llvm/llvm-project/commit/de71056a7dbce33186687153af267e0503bbacdc
DIFF: https://github.com/llvm/llvm-project/commit/de71056a7dbce33186687153af267e0503bbacdc.diff

LOG: [ASAN] Initial support memory checks on scalable vector typed allocas

This patch adjusts the memory instrumentation to account for scalable vector types in allocas. Note that we don't allow scalable vector globals, so we don't need to update that codepath.

A couple points.

First, this simply disables the optimization for scalable allocas. We can revisit this in the future, but it requires a bit of plumbing to get scalable object sizes through the visitor to be useful.

Second, I am simply disabling stack poisoning for scalable vector allocas. This is mostly for staging the change as I can't write a working test for memory instrumentation without doing so. I don't think it's unreasonable to do on it's own basis as without the bailout, we crash the compiler.

Differential Revision: https://reviews.llvm.org/D145259

Added: 
    

Modified: 
    llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
    llvm/test/Instrumentation/AddressSanitizer/vector-load-store.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 65841f253ca8c..a6ae6c79523a2 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -667,7 +667,7 @@ struct AddressSanitizer {
     assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
   }
 
-  uint64_t getAllocaSizeInBytes(const AllocaInst &AI) const {
+  TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const {
     uint64_t ArraySize = 1;
     if (AI.isArrayAllocation()) {
       const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
@@ -675,7 +675,7 @@ struct AddressSanitizer {
       ArraySize = CI->getZExtValue();
     }
     Type *Ty = AI.getAllocatedType();
-    uint64_t SizeInBytes =
+    TypeSize SizeInBytes =
         AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
     return SizeInBytes * ArraySize;
   }
@@ -1040,7 +1040,9 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
 
   /// Collect Alloca instructions we want (and can) handle.
   void visitAllocaInst(AllocaInst &AI) {
-    if (!ASan.isInterestingAlloca(AI)) {
+    // FIXME: Handle scalable vectors instead of ignoring them.
+    if (!ASan.isInterestingAlloca(AI) ||
+        isa<ScalableVectorType>(AI.getAllocatedType())) {
       if (AI.isStaticAlloca()) {
         // Skip over allocas that are present *before* the first instrumented
         // alloca, we don't want to move those around.
@@ -1254,7 +1256,7 @@ bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
   bool IsInteresting =
       (AI.getAllocatedType()->isSized() &&
        // alloca() may be called with 0 size, ignore it.
-       ((!AI.isStaticAlloca()) || getAllocaSizeInBytes(AI) > 0) &&
+       ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) &&
        // We are only interested in allocas not promotable to registers.
        // Promotable allocas are common under -O0.
        (!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) &&
@@ -3492,6 +3494,10 @@ void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
 // constant inbounds index.
 bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
                                     Value *Addr, TypeSize TypeStoreSize) const {
+  if (TypeStoreSize.isScalable())
+    // TODO: We can use vscale_range to convert a scalable value to an
+    // upper bound on the access size.
+    return false;
   SizeOffsetType SizeOffset = ObjSizeVis.compute(Addr);
   if (!ObjSizeVis.bothKnown(SizeOffset)) return false;
   uint64_t Size = SizeOffset.first.getZExtValue();

diff  --git a/llvm/test/Instrumentation/AddressSanitizer/vector-load-store.ll b/llvm/test/Instrumentation/AddressSanitizer/vector-load-store.ll
index c0b781e2aa90b..750c65d8d91f1 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/vector-load-store.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/vector-load-store.ll
@@ -947,3 +947,67 @@ define void @store.nxv16i32(ptr %p) sanitize_address {
   ret void
 }
 
+declare void @clobber(ptr)
+
+define <vscale x 2 x i32> @local_alloca() sanitize_address {
+; CHECK-LABEL: @local_alloca(
+; CHECK-NEXT:    [[A:%.*]] = alloca <vscale x 2 x i32>, align 8
+; CHECK-NEXT:    call void @clobber(ptr [[A]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[TMP1]], 64
+; CHECK-NEXT:    [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
+; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[A]] to i64
+; CHECK-NEXT:    [[TMP9:%.*]] = lshr i64 [[TMP8]], 3
+; CHECK-NEXT:    [[TMP10:%.*]] = or i64 [[TMP9]], 17592186044416
+; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; CHECK-NEXT:    [[TMP12:%.*]] = load i8, ptr [[TMP11]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
+; CHECK-NEXT:    br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP19:%.*]], !prof [[PROF0]]
+; CHECK:       14:
+; CHECK-NEXT:    [[TMP15:%.*]] = and i64 [[TMP8]], 7
+; CHECK-NEXT:    [[TMP16:%.*]] = trunc i64 [[TMP15]] to i8
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp sge i8 [[TMP16]], [[TMP12]]
+; CHECK-NEXT:    br i1 [[TMP17]], label [[TMP18:%.*]], label [[TMP19]]
+; CHECK:       18:
+; CHECK-NEXT:    call void @__asan_report_load_n(i64 [[TMP8]], i64 [[TMP3]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       19:
+; CHECK-NEXT:    [[TMP20:%.*]] = ptrtoint ptr [[TMP7]] to i64
+; CHECK-NEXT:    [[TMP21:%.*]] = lshr i64 [[TMP20]], 3
+; CHECK-NEXT:    [[TMP22:%.*]] = or i64 [[TMP21]], 17592186044416
+; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
+; CHECK-NEXT:    [[TMP24:%.*]] = load i8, ptr [[TMP23]], align 1
+; CHECK-NEXT:    [[TMP25:%.*]] = icmp ne i8 [[TMP24]], 0
+; CHECK-NEXT:    br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP31:%.*]], !prof [[PROF0]]
+; CHECK:       26:
+; CHECK-NEXT:    [[TMP27:%.*]] = and i64 [[TMP20]], 7
+; CHECK-NEXT:    [[TMP28:%.*]] = trunc i64 [[TMP27]] to i8
+; CHECK-NEXT:    [[TMP29:%.*]] = icmp sge i8 [[TMP28]], [[TMP24]]
+; CHECK-NEXT:    br i1 [[TMP29]], label [[TMP30:%.*]], label [[TMP31]]
+; CHECK:       30:
+; CHECK-NEXT:    call void @__asan_report_load_n(i64 [[TMP20]], i64 [[TMP3]]) #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       31:
+; CHECK-NEXT:    [[RES:%.*]] = load <vscale x 2 x i32>, ptr [[A]], align 8
+; CHECK-NEXT:    ret <vscale x 2 x i32> [[RES]]
+;
+; CALLS-LABEL: @local_alloca(
+; CALLS-NEXT:    [[A:%.*]] = alloca <vscale x 2 x i32>, align 8
+; CALLS-NEXT:    call void @clobber(ptr [[A]])
+; CALLS-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
+; CALLS-NEXT:    [[TMP2:%.*]] = mul i64 [[TMP1]], 64
+; CALLS-NEXT:    [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
+; CALLS-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
+; CALLS-NEXT:    call void @__asan_loadN(i64 [[TMP4]], i64 [[TMP3]])
+; CALLS-NEXT:    [[RES:%.*]] = load <vscale x 2 x i32>, ptr [[A]], align 8
+; CALLS-NEXT:    ret <vscale x 2 x i32> [[RES]]
+;
+  %a = alloca <vscale x 2 x i32>
+  call void @clobber(ptr %a)
+  %res = load <vscale x 2 x i32>, ptr %a
+  ret <vscale x 2 x i32> %res
+}


        


More information about the llvm-commits mailing list