[llvm] r240722 - [asan] Don't run stack malloc on functions containing inline assembly.

Anna Zaks ganna at apple.com
Thu Jun 25 16:35:45 PDT 2015


Author: zaks
Date: Thu Jun 25 18:35:45 2015
New Revision: 240722

URL: http://llvm.org/viewvc/llvm-project?rev=240722&view=rev
Log:
[asan] Don't run stack malloc on functions containing inline assembly.

It makes LLVM run out of registers even on 64-bit platforms. For example, the
following test case fails on darwin.

clang -cc1 -O0 -triple x86_64-apple-macosx10.10.0 -emit-obj -fsanitize=address -mstackrealign -o ~/tmp/ex.o -x c ex.c
error: inline assembly requires more registers than available

void TestInlineAssembly(const unsigned char *S, unsigned int pS, unsigned char *D, unsigned int pD, unsigned int h) {

unsigned int sr = 4, pDiffD = pD - 5;
unsigned int pDiffS = (pS << 1) - 5;
char flagSA = ((pS & 15) == 0),
flagDA = ((pD & 15) == 0);
asm volatile (
  "mov %0,  %%"PTR_REG("si")"\n"
  "mov %2,  %%"PTR_REG("cx")"\n"
  "mov %1,  %%"PTR_REG("di")"\n"
  "mov %8,  %%"PTR_REG("ax")"\n"
  :
  : "m" (S), "m" (D), "m" (pS), "m" (pDiffS), "m" (pDiffD), "m" (sr), "m" (flagSA), "m" (flagDA), "m" (h)
  : "%"PTR_REG("si"), "%"PTR_REG("di"), "%"PTR_REG("ax"), "%"PTR_REG("cx"), "%"PTR_REG("dx"), "memory"
);
}

http://reviews.llvm.org/D10719

Added:
    llvm/trunk/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll
Modified:
    llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp

Modified: llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp?rev=240722&r1=240721&r2=240722&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp (original)
+++ llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp Thu Jun 25 18:35:45 2015
@@ -1753,11 +1753,10 @@ void FunctionStackPoisoner::poisonStack(
   uint64_t LocalStackSize = L.FrameSize;
   bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel &&
                        LocalStackSize <= kMaxStackMallocSize;
-  // Don't do dynamic alloca in presence of inline asm: too often it makes
-  // assumptions on which registers are available. Don't do stack malloc in the
-  // presence of inline asm on 32-bit platforms for the same reason.
+  // Don't do dynamic alloca or stack malloc in presence of inline asm:
+  // too often it makes assumptions on which registers are available.
   bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm;
-  DoStackMalloc &= !HasNonEmptyInlineAsm || ASan.LongSize != 32;
+  DoStackMalloc &= !HasNonEmptyInlineAsm;
 
   Value *StaticAlloca =
       DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);

Added: llvm/trunk/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll?rev=240722&view=auto
==============================================================================
--- llvm/trunk/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll (added)
+++ llvm/trunk/test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll Thu Jun 25 18:35:45 2015
@@ -0,0 +1,56 @@
+; RUN: opt < %s -asan -S -o %t.ll
+; RUN: FileCheck %s < %t.ll
+
+; Don't do stack malloc on functions containing inline assembly on 64-bit
+; platforms. It makes LLVM run out of registers.
+
+; CHECK-LABEL: define void @TestAbsenceOfStackMalloc(i8* %S, i32 %pS, i8* %D, i32 %pD, i32 %h)
+; CHECK: %MyAlloca
+; CHECK-NOT: call {{.*}} @__asan_stack_malloc
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+define void @TestAbsenceOfStackMalloc(i8* %S, i32 %pS, i8* %D, i32 %pD, i32 %h) #0 {
+entry:
+  %S.addr = alloca i8*, align 8
+  %pS.addr = alloca i32, align 4
+  %D.addr = alloca i8*, align 8
+  %pD.addr = alloca i32, align 4
+  %h.addr = alloca i32, align 4
+  %sr = alloca i32, align 4
+  %pDiffD = alloca i32, align 4
+  %pDiffS = alloca i32, align 4
+  %flagSA = alloca i8, align 1
+  %flagDA = alloca i8, align 1
+  store i8* %S, i8** %S.addr, align 8
+  store i32 %pS, i32* %pS.addr, align 4
+  store i8* %D, i8** %D.addr, align 8
+  store i32 %pD, i32* %pD.addr, align 4
+  store i32 %h, i32* %h.addr, align 4
+  store i32 4, i32* %sr, align 4
+  %0 = load i32, i32* %pD.addr, align 4
+  %sub = sub i32 %0, 5
+  store i32 %sub, i32* %pDiffD, align 4
+  %1 = load i32, i32* %pS.addr, align 4
+  %shl = shl i32 %1, 1
+  %sub1 = sub i32 %shl, 5
+  store i32 %sub1, i32* %pDiffS, align 4
+  %2 = load i32, i32* %pS.addr, align 4
+  %and = and i32 %2, 15
+  %cmp = icmp eq i32 %and, 0
+  %conv = zext i1 %cmp to i32
+  %conv2 = trunc i32 %conv to i8
+  store i8 %conv2, i8* %flagSA, align 1
+  %3 = load i32, i32* %pD.addr, align 4
+  %and3 = and i32 %3, 15
+  %cmp4 = icmp eq i32 %and3, 0
+  %conv5 = zext i1 %cmp4 to i32
+  %conv6 = trunc i32 %conv5 to i8
+  store i8 %conv6, i8* %flagDA, align 1
+  call void asm sideeffect "mov\09\09\09$0,\09\09\09\09\09\09\09\09\09\09%rsi\0Amov\09\09\09$2,\09\09\09\09\09\09\09\09\09\09%rcx\0Amov\09\09\09$1,\09\09\09\09\09\09\09\09\09\09%rdi\0Amov\09\09\09$8,\09\09\09\09\09\09\09\09\09\09%rax\0A", "*m,*m,*m,*m,*m,*m,*m,*m,*m,~{rsi},~{rdi},~{rax},~{rcx},~{rdx},~{memory},~{dirflag},~{fpsr},~{flags}"(i8** %S.addr, i8** %D.addr, i32* %pS.addr, i32* %pDiffS, i32* %pDiffD, i32* %sr, i8* %flagSA, i8* %flagDA, i32* %h.addr) #1
+  ret void
+}
+
+attributes #0 = { nounwind sanitize_address }
+attributes #1 = { nounwind }





More information about the llvm-commits mailing list