[llvm] 5f185a8 - [AddressSanitizer] Fix for wrong argument values appearing in backtraces

Vedant Kumar via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 6 15:59:35 PDT 2020


Author: Vedant Kumar
Date: 2020-04-06T15:59:25-07:00
New Revision: 5f185a89991e85eed10c630028fc9d2569514491

URL: https://github.com/llvm/llvm-project/commit/5f185a89991e85eed10c630028fc9d2569514491
DIFF: https://github.com/llvm/llvm-project/commit/5f185a89991e85eed10c630028fc9d2569514491.diff

LOG: [AddressSanitizer] Fix for wrong argument values appearing in backtraces

Summary:
In some cases, ASan may insert instrumentation before function arguments
have been stored into their allocas. This causes two issues:

1) The argument value must be spilled until it can be stored into the
   reserved alloca, wasting a stack slot.

2) Until the store occurs in a later basic block, the debug location
   will point to the wrong frame offset, and backtraces will show an
   uninitialized value.

The proposed solution is to move instructions which initialize allocas
for arguments up into the entry block, before the position where ASan
starts inserting its instrumentation.

For the motivating test case, before the patch we see:

```
 | 0033: movq %rdi, 0x68(%rbx)  |   | DW_TAG_formal_parameter     |
 | ...                          |   |   DW_AT_name ("a")          |
 | 00d1: movq 0x68(%rbx), %rsi  |   |   DW_AT_location (RBX+0x90) |
 | 00d5: movq %rsi, 0x90(%rbx)  |   |       ^ not correct ...     |
```

and after the patch we see:

```
 | 002f: movq %rdi, 0x70(%rbx)  |   | DW_TAG_formal_parameter     |
 |                              |   |   DW_AT_name ("a")          |
 |                              |   |   DW_AT_location (RBX+0x70) |
```

rdar://61122691

Reviewers: aprantl, eugenis

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D77182

Added: 
    llvm/test/Instrumentation/AddressSanitizer/hoist-argument-init-insts.ll

Modified: 
    llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index af6434cd6033..b21a100de12b 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -2983,6 +2983,59 @@ void FunctionStackPoisoner::processDynamicAllocas() {
   unpoisonDynamicAllocas();
 }
 
+/// Collect instructions in the entry block after \p InsBefore which initialize
+/// permanent storage for a function argument. These instructions must remain in
+/// the entry block so that uninitialized values do not appear in backtraces. An
+/// added benefit is that this conserves spill slots. This does not move stores
+/// before instrumented / "interesting" allocas.
+static void findStoresToUninstrumentedArgAllocas(
+    AddressSanitizer &ASan, Instruction &InsBefore,
+    SmallVectorImpl<Instruction *> &InitInsts) {
+  Instruction *Start = InsBefore.getNextNonDebugInstruction();
+  for (Instruction *It = Start; It; It = It->getNextNonDebugInstruction()) {
+    // Argument initialization looks like:
+    // 1) store <Argument>, <Alloca> OR
+    // 2) <CastArgument> = cast <Argument> to ...
+    //    store <CastArgument> to <Alloca>
+    // Do not consider any other kind of instruction.
+    //
+    // Note: This covers all known cases, but may not be exhaustive. An
+    // alternative to pattern-matching stores is to DFS over all Argument uses:
+    // this might be more general, but is probably much more complicated.
+    if (isa<AllocaInst>(It) || isa<CastInst>(It))
+      continue;
+    if (auto *Store = dyn_cast<StoreInst>(It)) {
+      // The store destination must be an alloca that isn't interesting for
+      // ASan to instrument. These are moved up before InsBefore, and they're
+      // not interesting because allocas for arguments can be mem2reg'd.
+      auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
+      if (!Alloca || ASan.isInterestingAlloca(*Alloca))
+        continue;
+
+      Value *Val = Store->getValueOperand();
+      bool IsDirectArgInit = isa<Argument>(Val);
+      bool IsArgInitViaCast =
+          isa<CastInst>(Val) &&
+          isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
+          // Check that the cast appears directly before the store. Otherwise
+          // moving the cast before InsBefore may break the IR.
+          Val == It->getPrevNonDebugInstruction();
+      bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
+      if (!IsArgInit)
+        continue;
+
+      if (IsArgInitViaCast)
+        InitInsts.push_back(cast<Instruction>(Val));
+      InitInsts.push_back(Store);
+      continue;
+    }
+
+    // Do not reorder past unknown instructions: argument initialization should
+    // only involve casts and stores.
+    return;
+  }
+}
+
 void FunctionStackPoisoner::processStaticAllocas() {
   if (AllocaVec.empty()) {
     assert(StaticAllocaPoisonCallVec.empty());
@@ -3006,6 +3059,15 @@ void FunctionStackPoisoner::processStaticAllocas() {
     if (AI->getParent() == InsBeforeB)
       AI->moveBefore(InsBefore);
 
+  // Move stores of arguments into entry-block allocas as well. This prevents
+  // extra stack slots from being generated (to house the argument values until
+  // they can be stored into the allocas). This also prevents uninitialized
+  // values from being shown in backtraces.
+  SmallVector<Instruction *, 8> ArgInitInsts;
+  findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
+  for (Instruction *ArgInitInst : ArgInitInsts)
+    ArgInitInst->moveBefore(InsBefore);
+
   // If we have a call to llvm.localescape, keep it in the entry block.
   if (LocalEscapeCall) LocalEscapeCall->moveBefore(InsBefore);
 

diff  --git a/llvm/test/Instrumentation/AddressSanitizer/hoist-argument-init-insts.ll b/llvm/test/Instrumentation/AddressSanitizer/hoist-argument-init-insts.ll
new file mode 100644
index 000000000000..1414b2122d98
--- /dev/null
+++ b/llvm/test/Instrumentation/AddressSanitizer/hoist-argument-init-insts.ll
@@ -0,0 +1,173 @@
+; RUN: opt < %s -asan -asan-module -asan-use-after-return -S | FileCheck %s
+
+; Source (-O0 -fsanitize=address -fsanitize-address-use-after-scope):
+;; struct S { int x, y; };
+;; void swap(S *a, S *b, bool doit) {
+;;   if (!doit)
+;;     return;
+;;   auto tmp = *a;
+;;   *a = *b;
+;;   *b = tmp;
+;; }
+
+target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.14.0"
+
+%struct.S = type { i32, i32 }
+
+; CHECK-LABEL: define {{.*}} @_Z4swapP1SS0_b(
+
+; First come the argument allocas.
+; CHECK:      [[argA:%.*]] = alloca %struct.S*,
+; CHECK-NEXT: [[argB:%.*]] = alloca %struct.S*,
+; CHECK-NEXT: [[argDoit:%.*]] = alloca i8,
+
+; Next, the stores into the argument allocas.
+; CHECK-NEXT: store %struct.S* {{.*}}, %struct.S** [[argA]]
+; CHECK-NEXT: store %struct.S* {{.*}}, %struct.S** [[argB]]
+; CHECK-NEXT: [[frombool:%.*]] = zext i1 {{.*}} to i8
+; CHECK-NEXT: store i8 [[frombool]], i8* [[argDoit]]
+
+define void @_Z4swapP1SS0_b(%struct.S* %a, %struct.S* %b, i1 zeroext %doit) sanitize_address {
+entry:
+  %a.addr = alloca %struct.S*, align 8
+  %b.addr = alloca %struct.S*, align 8
+  %doit.addr = alloca i8, align 1
+  %tmp = alloca %struct.S, align 4
+  store %struct.S* %a, %struct.S** %a.addr, align 8
+  store %struct.S* %b, %struct.S** %b.addr, align 8
+  %frombool = zext i1 %doit to i8
+  store i8 %frombool, i8* %doit.addr, align 1
+  %0 = load i8, i8* %doit.addr, align 1
+  %tobool = trunc i8 %0 to i1
+  br i1 %tobool, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  br label %return
+
+if.end:                                           ; preds = %entry
+  %1 = load %struct.S*, %struct.S** %a.addr, align 8
+  %2 = bitcast %struct.S* %tmp to i8*
+  %3 = bitcast %struct.S* %1 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %2, i8* align 4 %3, i64 8, i1 false)
+  %4 = load %struct.S*, %struct.S** %b.addr, align 8
+  %5 = load %struct.S*, %struct.S** %a.addr, align 8
+  %6 = bitcast %struct.S* %5 to i8*
+  %7 = bitcast %struct.S* %4 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %6, i8* align 4 %7, i64 8, i1 false)
+  %8 = load %struct.S*, %struct.S** %b.addr, align 8
+  %9 = bitcast %struct.S* %8 to i8*
+  %10 = bitcast %struct.S* %tmp to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %9, i8* align 4 %10, i64 8, i1 false)
+  br label %return
+
+return:                                           ; preds = %if.end, %if.then
+  ret void
+}
+
+; Synthetic test case, meant to check that we do not reorder instructions past
+; a load when attempting to hoist argument init insts.
+; CHECK-LABEL: define {{.*}} @func_with_load_in_arginit_sequence
+; CHECK:      [[argA:%.*]] = alloca %struct.S*,
+; CHECK-NEXT: [[argB:%.*]] = alloca %struct.S*,
+; CHECK-NEXT: [[argDoit:%.*]] = alloca i8,
+; CHECK-NEXT: store %struct.S* {{.*}}, %struct.S** [[argA]]
+; CHECK-NEXT: store %struct.S* {{.*}}, %struct.S** [[argB]]
+; CHECK-NEXT: [[stack_base:%.*]] = alloca i64
+define void @func_with_load_in_arginit_sequence(%struct.S* %a, %struct.S* %b, i1 zeroext %doit) sanitize_address {
+entry:
+  %a.addr = alloca %struct.S*, align 8
+  %b.addr = alloca %struct.S*, align 8
+  %doit.addr = alloca i8, align 1
+  %tmp = alloca %struct.S, align 4
+  store %struct.S* %a, %struct.S** %a.addr, align 8
+  store %struct.S* %b, %struct.S** %b.addr, align 8
+
+  ; This load prevents the next argument init sequence from being moved.
+  %0 = load i8, i8* %doit.addr, align 1 
+
+  %frombool = zext i1 %doit to i8
+  store i8 %frombool, i8* %doit.addr, align 1
+  %tobool = trunc i8 %0 to i1
+  br i1 %tobool, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  br label %return
+
+if.end:                                           ; preds = %entry
+  %1 = load %struct.S*, %struct.S** %a.addr, align 8
+  %2 = bitcast %struct.S* %tmp to i8*
+  %3 = bitcast %struct.S* %1 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %2, i8* align 4 %3, i64 8, i1 false)
+  %4 = load %struct.S*, %struct.S** %b.addr, align 8
+  %5 = load %struct.S*, %struct.S** %a.addr, align 8
+  %6 = bitcast %struct.S* %5 to i8*
+  %7 = bitcast %struct.S* %4 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %6, i8* align 4 %7, i64 8, i1 false)
+  %8 = load %struct.S*, %struct.S** %b.addr, align 8
+  %9 = bitcast %struct.S* %8 to i8*
+  %10 = bitcast %struct.S* %tmp to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %9, i8* align 4 %10, i64 8, i1 false)
+  br label %return
+
+return:                                           ; preds = %if.end, %if.then
+  ret void
+}
+
+; Synthetic test case, meant to check that we can handle functions with more
+; than one interesting alloca.
+; CHECK-LABEL: define {{.*}} @func_with_multiple_interesting_allocas
+; CHECK:      [[argA:%.*]] = alloca %struct.S*,
+; CHECK-NEXT: [[argB:%.*]] = alloca %struct.S*,
+; CHECK-NEXT: [[argDoit:%.*]] = alloca i8,
+; CHECK-NEXT: store %struct.S* {{.*}}, %struct.S** [[argA]]
+; CHECK-NEXT: store %struct.S* {{.*}}, %struct.S** [[argB]]
+; CHECK-NEXT: [[frombool:%.*]] = zext i1 {{.*}} to i8
+; CHECK-NEXT: store i8 [[frombool]], i8* [[argDoit]]
+define void @func_with_multiple_interesting_allocas(%struct.S* %a, %struct.S* %b, i1 zeroext %doit) sanitize_address {
+entry:
+  %a.addr = alloca %struct.S*, align 8
+  %b.addr = alloca %struct.S*, align 8
+  %doit.addr = alloca i8, align 1
+  %tmp = alloca %struct.S, align 4
+  %tmp2 = alloca %struct.S, align 4
+  store %struct.S* %a, %struct.S** %a.addr, align 8
+  store %struct.S* %b, %struct.S** %b.addr, align 8
+  %frombool = zext i1 %doit to i8
+  store i8 %frombool, i8* %doit.addr, align 1
+  %0 = load i8, i8* %doit.addr, align 1
+  %tobool = trunc i8 %0 to i1
+  br i1 %tobool, label %if.end, label %if.then
+
+if.then:                                          ; preds = %entry
+  br label %return
+
+if.end:                                           ; preds = %entry
+  %1 = load %struct.S*, %struct.S** %a.addr, align 8
+  %2 = bitcast %struct.S* %tmp to i8*
+  %3 = bitcast %struct.S* %1 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %2, i8* align 4 %3, i64 8, i1 false)
+  %4 = load %struct.S*, %struct.S** %b.addr, align 8
+  %5 = bitcast %struct.S* %tmp2 to i8*
+  %6 = bitcast %struct.S* %4 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %5, i8* align 4 %6, i64 8, i1 false)
+  %7 = load %struct.S*, %struct.S** %b.addr, align 8
+  %8 = load %struct.S*, %struct.S** %a.addr, align 8
+  %9 = bitcast %struct.S* %8 to i8*
+  %10 = bitcast %struct.S* %7 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %9, i8* align 4 %10, i64 8, i1 false)
+  %11 = load %struct.S*, %struct.S** %b.addr, align 8
+  %12 = bitcast %struct.S* %11 to i8*
+  %13 = bitcast %struct.S* %tmp to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %12, i8* align 4 %13, i64 8, i1 false)
+  %14 = load %struct.S*, %struct.S** %a.addr, align 8
+  %15 = bitcast %struct.S* %14 to i8*
+  %16 = bitcast %struct.S* %tmp2 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %15, i8* align 4 %16, i64 8, i1 false)
+  br label %return
+
+return:                                           ; preds = %if.end, %if.then
+  ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)


        


More information about the llvm-commits mailing list