[llvm] 6974f18 - [test] Improve CodeGen/*/semantic-interposition-asm.ll

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Sun May 16 11:17:19 PDT 2021


Author: Fangrui Song
Date: 2021-05-16T11:17:09-07:00
New Revision: 6974f1843321ee4d15b7d82d0ccfb4e8cbe76f0b

URL: https://github.com/llvm/llvm-project/commit/6974f1843321ee4d15b7d82d0ccfb4e8cbe76f0b
DIFF: https://github.com/llvm/llvm-project/commit/6974f1843321ee4d15b7d82d0ccfb4e8cbe76f0b.diff

LOG: [test] Improve CodeGen/*/semantic-interposition-asm.ll

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/semantic-interposition-asm.ll
    llvm/test/CodeGen/X86/semantic-interposition-asm.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/semantic-interposition-asm.ll b/llvm/test/CodeGen/AArch64/semantic-interposition-asm.ll
index ac9a8c9dec841..e09d16d16f13c 100644
--- a/llvm/test/CodeGen/AArch64/semantic-interposition-asm.ll
+++ b/llvm/test/CodeGen/AArch64/semantic-interposition-asm.ll
@@ -1,13 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64 -relocation-model=pic < %s | FileCheck %s
 
-;; Test that we use the local alias for dso_local globals in inline assembly.
+;; Test that we use the local alias for dso_local functions in inline assembly.
 
 @gv0 = dso_local global i32 0
 @gv1 = dso_preemptable global i32 1
 
-define i32 @load() nounwind {
-; CHECK-LABEL: load:
+define i64 @test_var() nounwind {
+; CHECK-LABEL: test_var:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    adrp x0, .Lgv0$local
@@ -16,10 +16,40 @@ define i32 @load() nounwind {
 ; CHECK-NEXT:    ldr w8, [x8, :lo12:gv1]
 ; CHECK-NEXT:    add x0, x8, x0
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call i64 asm "adrp $0, $1\0Aldr ${0:w}, [$0, :lo12:$1]\0Aadrp x8, $2\0Aldr w8, [x8, :lo12:$2]\0Aadd $0,x8,$0", "=r,S,S,~{x8}"(i32* nonnull @gv0, i32* nonnull @gv1)
-  %conv = trunc i64 %0 to i32
-  ret i32 %conv
+  ret i64 %0
+}
+
+define dso_local void @fun0() nounwind {
+; CHECK-LABEL: fun0:
+; CHECK:       .Lfun0$local:
+; CHECK-NEXT:  // %bb.0: // %entry
+; CHECK-NEXT:    ret
+entry:
+  ret void
+}
+
+define dso_preemptable void @fun1() nounwind {
+; CHECK-LABEL: fun1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ret
+entry:
+  ret void
+}
+
+define i64 @test_fun() nounwind {
+; CHECK-LABEL: test_fun:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    //APP
+; CHECK-NEXT:    adrp x0, :got:.Lfun0$local
+; CHECK-NEXT:    ldr x0, [x0, :got_lo12:.Lfun0$local]
+; CHECK-NEXT:    adrp x8, :got:fun1
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:fun1]
+; CHECK-NEXT:    //NO_APP
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call i64 asm "adrp $0, :got:$1\0Aldr $0, [$0, :got_lo12:$1]\0Aadrp x8, :got:$2\0Aldr x8, [x8, :got_lo12:$2]", "=r,S,S,~{x8}"(void ()* nonnull @fun0, void ()* nonnull @fun1)
+  ret i64 %0
 }

diff  --git a/llvm/test/CodeGen/X86/semantic-interposition-asm.ll b/llvm/test/CodeGen/X86/semantic-interposition-asm.ll
index a9c1895256ebd..89e001c8261d7 100644
--- a/llvm/test/CodeGen/X86/semantic-interposition-asm.ll
+++ b/llvm/test/CodeGen/X86/semantic-interposition-asm.ll
@@ -1,21 +1,72 @@
-; RUN: llc -mtriple=x86_64 -relocation-model=pic < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64 -relocation-model=static < %s | \
+; RUN:   FileCheck --check-prefixes=COMMON,STATIC %s
+; RUN: llc -mtriple=x86_64 -relocation-model=pic < %s | \
+; RUN:   FileCheck --check-prefixes=COMMON,CHECK %s
 
-;; Test that we use the local alias for dso_local globals in inline assembly.
+;; Test that we use the local alias for dso_local functions in inline assembly.
 
- at mxcsr0 = dso_local global i32 0
- at mxcsr1 = dso_preemptable global i32 1
+ at gv0 = dso_local global i32 0
+ at gv1 = dso_preemptable global i32 1
 
-define <2 x double> @foo(<2 x double> %a, <2 x double> %b) {
-; CHECK-LABEL: foo:
-; CHECK:        movq mxcsr1 at GOTPCREL(%rip), %rax
-; CHECK:        #APP
-; CHECK-NEXT:   ldmxcsr .Lmxcsr0$local(%rip)
-; CHECK-NEXT:   addpd %xmm1, %xmm0
-; CHECK-NEXT:   ldmxcsr (%rax)
-; CHECK-NEXT:   #NO_APP
+define i64 @test_var() nounwind {
+; STATIC-LABEL: test_var:
+; STATIC:       # %bb.0: # %entry
+; STATIC-NEXT:    movq gv1 at GOTPCREL(%rip), %rax
+; STATIC-NEXT:    #APP
+; STATIC-NEXT:    movq gv0(%rip), %rax
+; STATIC-NEXT:    movq (%rax), %rax
+; STATIC-NEXT:    #NO_APP
+; STATIC-NEXT:    retq
+;
+; CHECK-LABEL: test_var:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq gv1 at GOTPCREL(%rip), %rax
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    movq .Lgv0$local(%rip), %rax
+; CHECK-NEXT:    movq (%rax), %rax
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    retq
 entry:
-  %0 = call <2 x double> asm sideeffect "ldmxcsr $2; addpd $1, $0; ldmxcsr $3",
-         "=x,x,*m,*m,0,~{dirflag},~{fpsr},~{flags}"(
-           <2 x double> %b, i32* nonnull @mxcsr0, i32* nonnull @mxcsr1, <2 x double> %a)
-  ret <2 x double> %0
+  %0 = tail call i64 asm "movq $1, $0\0Amovq $2, $0", "=r,*m,*m"(i32* @gv0, i32* @gv1)
+  ret i64 %0
+}
+
+define dso_local void @fun0() nounwind {
+; COMMON-LABEL: fun0:
+; COMMON:       # %bb.0: # %entry
+; COMMON-NEXT:    retq
+entry:
+  ret void
+}
+
+define dso_preemptable void @fun1() nounwind {
+; COMMON-LABEL: fun1:
+; COMMON:       # %bb.0: # %entry
+; COMMON-NEXT:    retq
+entry:
+  ret void
+}
+
+define i64 @test_fun() nounwind {
+; STATIC-LABEL: test_fun:
+; STATIC:       # %bb.0: # %entry
+; STATIC-NEXT:    movq fun1@{{.*}}(%rip), %rax
+; STATIC-NEXT:    #APP
+; STATIC-NEXT:    movq {{.*}}(%rip), %rax
+; STATIC-NEXT:    movq (%rax), %rax
+; STATIC-NEXT:    #NO_APP
+; STATIC-NEXT:    retq
+;
+; CHECK-LABEL: test_fun:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movq fun1@{{.*}}(%rip), %rax
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    movq .Lfun0${{.*}}(%rip), %rax
+; CHECK-NEXT:    movq (%rax), %rax
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    retq
+entry:
+  %0 = tail call i64 asm "movq $1, $0\0Amovq $2, $0", "=r,*m,*m"(void ()* nonnull @fun0, void ()* nonnull @fun1)
+  ret i64 %0
 }


        


More information about the llvm-commits mailing list