[PATCH] [asan] Disable stack malloc on functions containing inline assembly

Anna Zaks zaks.anna at gmail.com
Wed Jun 24 17:17:57 PDT 2015


Hi samsonov, glider,

Don't run stack malloc on functions containing inline assembly even on 64-bit platforms. It makes LLVM run out of registers.

It fails with the following test case on darwin, 64 bit.

// clang  -cc1 -O0 -triple x86_64-apple-macosx10.10.0 -emit-obj -fsanitize=address -mstackrealign -o ~/tmp/ex.o -x c ex.c
// error: inline assembly requires more registers than available

#define PTR_REG(val) "r" val
void TestInlineAssembly(const unsigned char *S, unsigned int pS, unsigned char *D, unsigned int pD, unsigned int h) {
  unsigned int sr = 4, pDiffD = pD - 5;
  unsigned int pDiffS = (pS << 1) - 5;
  char flagSA = ((pS & 15) == 0),
  flagDA = ((pD & 15) == 0);
  asm volatile (
    "mov %0,  %%"PTR_REG("si")"\n"
    "mov %2,  %%"PTR_REG("cx")"\n"
    "mov %1,  %%"PTR_REG("di")"\n"
    "mov %8,  %%"PTR_REG("ax")"\n"
    :
    : "m" (S), "m" (D), "m" (pS), "m" (pDiffS), "m" (pDiffD), "m" (sr), "m" (flagSA), "m" (flagDA), "m" (h)
    : "%"PTR_REG("si"), "%"PTR_REG("di"), "%"PTR_REG("ax"), "%"PTR_REG("cx"), "%"PTR_REG("dx"), "memory"
);
}

This functionality has already been disabled for 32-bit in http://reviews.llvm.org/D8790, r233979.

http://reviews.llvm.org/D10719

Files:
  lib/Transforms/Instrumentation/AddressSanitizer.cpp
  test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll

Index: lib/Transforms/Instrumentation/AddressSanitizer.cpp
===================================================================
--- lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1757,7 +1757,7 @@
   // assumptions on which registers are available. Don't do stack malloc in the
   // presence of inline asm on 32-bit platforms for the same reason.
   bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm;
-  DoStackMalloc &= !HasNonEmptyInlineAsm || ASan.LongSize != 32;
+  DoStackMalloc &= !HasNonEmptyInlineAsm;
 
   Value *StaticAlloca =
       DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
Index: test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll
===================================================================
--- /dev/null
+++ test/Instrumentation/AddressSanitizer/X86/asm_more_registers_than_available.ll
@@ -0,0 +1,56 @@
+; RUN: opt < %s -asan -S -o %t.ll
+; RUN: FileCheck %s < %t.ll
+
+; Don't do stack malloc on functions containing inline assembly on 64-bit
+; platforms. It makes LLVM run out of registers.
+
+; CHECK-LABEL: define void @TestAbsenceOfStackMalloc(i8* %S, i32 %pS, i8* %D, i32 %pD, i32 %h)
+; CHECK: %MyAlloca
+; CHECK-NOT: call {{.*}} @__asan_stack_malloc
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+define void @TestAbsenceOfStackMalloc(i8* %S, i32 %pS, i8* %D, i32 %pD, i32 %h) #0 {
+entry:
+  %S.addr = alloca i8*, align 8
+  %pS.addr = alloca i32, align 4
+  %D.addr = alloca i8*, align 8
+  %pD.addr = alloca i32, align 4
+  %h.addr = alloca i32, align 4
+  %sr = alloca i32, align 4
+  %pDiffD = alloca i32, align 4
+  %pDiffS = alloca i32, align 4
+  %flagSA = alloca i8, align 1
+  %flagDA = alloca i8, align 1
+  store i8* %S, i8** %S.addr, align 8
+  store i32 %pS, i32* %pS.addr, align 4
+  store i8* %D, i8** %D.addr, align 8
+  store i32 %pD, i32* %pD.addr, align 4
+  store i32 %h, i32* %h.addr, align 4
+  store i32 4, i32* %sr, align 4
+  %0 = load i32, i32* %pD.addr, align 4
+  %sub = sub i32 %0, 5
+  store i32 %sub, i32* %pDiffD, align 4
+  %1 = load i32, i32* %pS.addr, align 4
+  %shl = shl i32 %1, 1
+  %sub1 = sub i32 %shl, 5
+  store i32 %sub1, i32* %pDiffS, align 4
+  %2 = load i32, i32* %pS.addr, align 4
+  %and = and i32 %2, 15
+  %cmp = icmp eq i32 %and, 0
+  %conv = zext i1 %cmp to i32
+  %conv2 = trunc i32 %conv to i8
+  store i8 %conv2, i8* %flagSA, align 1
+  %3 = load i32, i32* %pD.addr, align 4
+  %and3 = and i32 %3, 15
+  %cmp4 = icmp eq i32 %and3, 0
+  %conv5 = zext i1 %cmp4 to i32
+  %conv6 = trunc i32 %conv5 to i8
+  store i8 %conv6, i8* %flagDA, align 1
+  call void asm sideeffect "mov\09\09\09$0,\09\09\09\09\09\09\09\09\09\09%rsi\0Amov\09\09\09$2,\09\09\09\09\09\09\09\09\09\09%rcx\0Amov\09\09\09$1,\09\09\09\09\09\09\09\09\09\09%rdi\0Amov\09\09\09$8,\09\09\09\09\09\09\09\09\09\09%rax\0A", "*m,*m,*m,*m,*m,*m,*m,*m,*m,~{rsi},~{rdi},~{rax},~{rcx},~{rdx},~{memory},~{dirflag},~{fpsr},~{flags}"(i8** %S.addr, i8** %D.addr, i32* %pS.addr, i32* %pDiffS, i32* %pDiffD, i32* %sr, i8* %flagSA, i8* %flagDA, i32* %h.addr) #1
+  ret void
+}
+
+attributes #0 = { nounwind sanitize_address }
+attributes #1 = { nounwind }

EMAIL PREFERENCES
  http://reviews.llvm.org/settings/panel/emailpreferences/
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D10719.28423.patch
Type: text/x-patch
Size: 3292 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20150625/da815edc/attachment.bin>


More information about the llvm-commits mailing list