[llvm] r364985 - [WebAssembly] Prevent inline assembly from being mangled by SjLj

Guanzhong Chen via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 2 17:37:49 PDT 2019


Author: quantum
Date: Tue Jul  2 17:37:49 2019
New Revision: 364985

URL: http://llvm.org/viewvc/llvm-project?rev=364985&view=rev
Log:
[WebAssembly] Prevent inline assembly from being mangled by SjLj

Summary:
Before, inline assembly gets mangled by the SjLj transformation.

For example, in a function with setjmp/longjmp, this LLVM IR code

    call void asm sideeffect "", ""()

would be transformed into

    call void @__invoke_void(void ()* asm sideeffect "", "")

This is invalid, and results in the error:

    Cannot take the address of an inline asm!

In this diff, we skip the transformation for inline assembly.

Reviewers: aheejin, tlively

Subscribers: dschuff, sbc100, jgravelle-google, hiraditya, sunfish, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D64115

Modified:
    llvm/trunk/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
    llvm/trunk/test/CodeGen/WebAssembly/lower-em-sjlj.ll

Modified: llvm/trunk/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp?rev=364985&r1=364984&r2=364985&view=diff
==============================================================================
--- llvm/trunk/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp (original)
+++ llvm/trunk/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp Tue Jul  2 17:37:49 2019
@@ -485,6 +485,13 @@ bool WebAssemblyLowerEmscriptenEHSjLj::c
     if (CalleeF->isIntrinsic())
       return false;
 
+  // Attempting to transform inline assembly will result in something like:
+  //     call void @__invoke_void(void ()* asm ...)
+  // which is invalid because inline assembly blocks do not have addresses
+  // and can't be passed by pointer. The result is a crash with illegal IR.
+  if (isa<InlineAsm>(Callee))
+    return false;
+
   // The reason we include malloc/free here is to exclude the malloc/free
   // calls generated in setjmp prep / cleanup routines.
   Function *SetjmpF = M.getFunction("setjmp");

Modified: llvm/trunk/test/CodeGen/WebAssembly/lower-em-sjlj.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/WebAssembly/lower-em-sjlj.ll?rev=364985&r1=364984&r2=364985&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/WebAssembly/lower-em-sjlj.ll (original)
+++ llvm/trunk/test/CodeGen/WebAssembly/lower-em-sjlj.ll Tue Jul  2 17:37:49 2019
@@ -188,6 +188,27 @@ entry:
 ; CHECK-NEXT: call void @emscripten_longjmp_jmpbuf(%struct.__jmp_buf_tag* %[[ARRAYDECAY]], i32 5) #1
 }
 
+; Test inline asm handling
+define hidden void @inline_asm() #0 {
+; CHECK-LABEL: @inline_asm
+entry:
+  %env = alloca [1 x %struct.__jmp_buf_tag], align 16
+  %arraydecay = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %env, i32 0, i32 0
+  %call = call i32 @setjmp(%struct.__jmp_buf_tag* %arraydecay) #4
+  %cmp = icmp eq i32 %call, 0
+  br i1 %cmp, label %if.then, label %if.else
+
+if.then:                                          ; preds = %entry
+; CHECK: call void asm sideeffect "", ""()
+  call void asm sideeffect "", ""()
+  %arraydecay1 = getelementptr inbounds [1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* %env, i32 0, i32 0
+  call void @longjmp(%struct.__jmp_buf_tag* %arraydecay1, i32 1) #5
+  unreachable
+
+if.else:                                          ; preds = %entry
+  ret void
+}
+
 declare void @foo()
 ; Function Attrs: returns_twice
 declare i32 @setjmp(%struct.__jmp_buf_tag*) #0




More information about the llvm-commits mailing list