[llvm] r329054 - MSan: introduce the conservative assembly handling mode.

Alexander Potapenko via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 3 02:50:07 PDT 2018


Author: glider
Date: Tue Apr  3 02:50:06 2018
New Revision: 329054

URL: http://llvm.org/viewvc/llvm-project?rev=329054&view=rev
Log:
MSan: introduce the conservative assembly handling mode.

The default assembly handling mode may introduce false positives in the
cases when MSan doesn't understand that the assembly call initializes
the memory pointed to by one of its arguments.

We introduce the conservative mode, which initializes the first
|sizeof(type)| bytes for every |type*| pointer passed into the
assembly statement.


Added:
    llvm/trunk/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll
Modified:
    llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp

Modified: llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp?rev=329054&r1=329053&r2=329054&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp (original)
+++ llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp Tue Apr  3 02:50:06 2018
@@ -199,6 +199,18 @@ static cl::opt<bool> ClHandleICmpExact("
        cl::desc("exact handling of relational integer ICmp"),
        cl::Hidden, cl::init(false));
 
+// When compiling the Linux kernel, we sometimes see false positives related to
+// MSan being unable to understand that inline assembly calls may initialize
+// local variables.
+// This flag makes the compiler conservatively unpoison every memory location
+// passed into an assembly call. Note that this may cause false positives.
+// Because it's impossible to figure out the array sizes, we can only unpoison
+// the first sizeof(type) bytes for each type* pointer.
+static cl::opt<bool> ClHandleAsmConservative(
+    "msan-handle-asm-conservative",
+    cl::desc("conservative handling of inline assembly"), cl::Hidden,
+    cl::init(false));
+
 // This flag controls whether we check the shadow of the address
 // operand of load or store. Such bugs are very rare, since load from
 // a garbage address typically results in SEGV, but still happen
@@ -2758,7 +2770,10 @@ struct MemorySanitizerVisitor : public I
       // outputs as clean. Note that any side effects of the inline asm that are
       // not immediately visible in its constraints are not handled.
       if (Call->isInlineAsm()) {
-        visitInstruction(I);
+        if (ClHandleAsmConservative)
+          visitAsmInstruction(I);
+        else
+          visitInstruction(I);
         return;
       }
 
@@ -3083,6 +3098,39 @@ struct MemorySanitizerVisitor : public I
     // Nothing to do here.
   }
 
+  void visitAsmInstruction(Instruction &I) {
+    // Conservative inline assembly handling: check for poisoned shadow of
+    // asm() arguments, then unpoison the result and all the memory locations
+    // pointed to by those arguments.
+    CallInst *CI = dyn_cast<CallInst>(&I);
+
+    for (size_t i = 0, n = CI->getNumOperands(); i < n; i++) {
+      Value *Operand = CI->getOperand(i);
+      if (Operand->getType()->isSized())
+        insertShadowCheck(Operand, &I);
+    }
+    setShadow(&I, getCleanShadow(&I));
+    setOrigin(&I, getCleanOrigin());
+    IRBuilder<> IRB(&I);
+    IRB.SetInsertPoint(I.getNextNode());
+    for (size_t i = 0, n = CI->getNumOperands(); i < n; i++) {
+      Value *Operand = CI->getOperand(i);
+      Type *OpType = Operand->getType();
+      if (!OpType->isPointerTy())
+        continue;
+      Type *ElType = OpType->getPointerElementType();
+      if (!ElType->isSized())
+        continue;
+      Value *ShadowPtr, *OriginPtr;
+      std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
+          Operand, IRB, ElType, /*Alignment*/ 1, /*isStore*/ true);
+      Value *CShadow = getCleanShadow(ElType);
+      IRB.CreateStore(
+          CShadow,
+          IRB.CreatePointerCast(ShadowPtr, CShadow->getType()->getPointerTo()));
+    }
+  }
+
   void visitInstruction(Instruction &I) {
     // Everything else: stop propagating and check for poisoned shadow.
     if (ClDumpStrictInstructions)

Added: llvm/trunk/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll?rev=329054&view=auto
==============================================================================
--- llvm/trunk/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll (added)
+++ llvm/trunk/test/Instrumentation/MemorySanitizer/msan_x86_bts_asm.ll Tue Apr  3 02:50:06 2018
@@ -0,0 +1,83 @@
+; Test for the conservative assembly handling mode used by KMSAN.
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-handle-asm-conservative=0 -S | FileCheck -check-prefixes=CHECK,CHECK-NONCONS %s
+; RUN: opt < %s -msan -msan-check-access-address=0 -msan-handle-asm-conservative=1 -S | FileCheck -check-prefixes=CHECK,CHECK-CONS %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; The IR below was generated from the following source:
+;  int main() {
+;    bool bit;
+;    unsigned long value = 2;
+;    long nr = 0;
+;    unsigned long *addr = &value;
+;    asm("btsq %2, %1; setc %0" : "=qm" (bit), "=m" (addr): "Ir" (nr));
+;    if (bit)
+;      return 0
+;    else
+;      return 1;
+;  }
+;
+; In the regular instrumentation mode MSan is unable to understand that |bit|
+; is initialized by the asm() call, and therefore reports a false positive on
+; the if-statement.
+; The conservative assembly handling mode initializes every memory location
+; passed by pointer into an asm() call. This prevents false positive reports,
+; but may introduce false negatives.
+;
+; This test makes sure that the conservative mode unpoisons the shadow of |bit|
+; by writing 0 to it.
+
+define dso_local i32 @main() sanitize_memory {
+entry:
+  %retval = alloca i32, align 4
+  %bit = alloca i8, align 1
+  %value = alloca i64, align 8
+  %nr = alloca i64, align 8
+  %addr = alloca i64*, align 8
+  store i32 0, i32* %retval, align 4
+  store i64 2, i64* %value, align 8
+  store i64 0, i64* %nr, align 8
+  store i64* %value, i64** %addr, align 8
+  %0 = load i64, i64* %nr, align 8
+  call void asm "btsq $2, $1; setc $0", "=*qm,=*m,Ir,~{dirflag},~{fpsr},~{flags}"(i8* %bit, i64** %addr, i64 %0)
+  %1 = load i8, i8* %bit, align 1
+  %tobool = trunc i8 %1 to i1
+  br i1 %tobool, label %if.then, label %if.else
+
+if.then:                                          ; preds = %entry
+  ret i32 0
+
+if.else:                                          ; preds = %entry
+  ret i32 1
+}
+
+; Start with the asm call
+; CHECK: call void asm "btsq $2, $1; setc $0"
+
+; Calculating the shadow offset of %bit.
+; CHECK: [[PTR:%.*]] = ptrtoint {{.*}} %bit to i64
+; CHECK: [[SH_NUM:%.*]] = xor i64 [[PTR]], [[OFF:[0-9]*]]
+; CHECK: [[SHADOW:%.*]] = inttoptr i64 [[SH_NUM]] {{.*}}
+
+; In the conservative mode, unpoison the shadow.
+; CHECK-CONS: store i8 0, i8* [[SHADOW]]
+; Now calculate the shadow address again, because MSan does this for every
+; shadow access.
+; CHECK-CONS: [[PTR2:%.*]] = ptrtoint {{.*}} %bit to i64
+; CHECK-CONS: [[SH_NUM2:%.*]] = xor i64 [[PTR2]], [[OFF]]
+; CHECK-CONS: [[SHADOW2:%.*]] = inttoptr i64 [[SH_NUM2]] {{.*}}
+
+; Now load the shadow value for the boolean.
+; CHECK-NONCONS: [[MSLD:%.*]] = load {{.*}} [[SHADOW]]
+; CHECK-CONS: [[MSLD:%.*]] = load {{.*}} [[SHADOW2]]
+; CHECK: [[MSPROP:%.*]] = trunc i8 [[MSLD]] to i1
+
+; Is the shadow poisoned?
+; CHECK: [[MSCMP:%.*]] = icmp ne i1 [[MSPROP]], false
+; CHECK: br i1 [[MSCMP]], label %[[IFTRUE:.*]], label {{.*}}
+
+; If yes, raise a warning.
+; CHECK: <label>:[[IFTRUE]]
+; CHECK: call void @__msan_warning_noreturn()
+




More information about the llvm-commits mailing list