[llvm-branch-commits] [llvm] 37f0c8d - [X86] Emit @PLT for x86-64 and keep unadorned symbols for x86-32
Fangrui Song via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Sat Dec 5 13:21:58 PST 2020
Author: Fangrui Song
Date: 2020-12-05T13:17:47-08:00
New Revision: 37f0c8df47d84ba311fc9a2c1884935ba8961e84
URL: https://github.com/llvm/llvm-project/commit/37f0c8df47d84ba311fc9a2c1884935ba8961e84
DIFF: https://github.com/llvm/llvm-project/commit/37f0c8df47d84ba311fc9a2c1884935ba8961e84.diff
LOG: [X86] Emit @PLT for x86-64 and keep unadorned symbols for x86-32
This essentially reverts the x86-64 side effect of r327198.
For x86-32, @PLT (R_386_PLT32) is not suitable in -fno-pic mode so the
code forces MO_NO_FLAG (like a forced dso_local) (https://bugs.llvm.org//show_bug.cgi?id=36674#c6).
For x86-64, both `call/jmp foo` and `call/jmp foo at PLT` emit R_X86_64_PLT32
(https://sourceware.org/bugzilla/show_bug.cgi?id=22791) so there is no
difference using @PLT. Using @PLT is actually favorable because this drops
a difference with -fpie/-fpic code and makes it possible to avoid a canonical
PLT entry when taking the address of an undefined function symbol.
Added:
Modified:
llvm/lib/Target/X86/X86Subtarget.cpp
llvm/test/CodeGen/X86/cvt16.ll
llvm/test/CodeGen/X86/extract-fp.ll
llvm/test/CodeGen/X86/extractelement-fp.ll
llvm/test/CodeGen/X86/finite-libcalls.ll
llvm/test/CodeGen/X86/fmaxnum.ll
llvm/test/CodeGen/X86/fmf-flags.ll
llvm/test/CodeGen/X86/fminnum.ll
llvm/test/CodeGen/X86/fp128-extract.ll
llvm/test/CodeGen/X86/fp128-i128.ll
llvm/test/CodeGen/X86/fp128-libcalls.ll
llvm/test/CodeGen/X86/half.ll
llvm/test/CodeGen/X86/llround-conv.ll
llvm/test/CodeGen/X86/lround-conv-i32.ll
llvm/test/CodeGen/X86/lround-conv-i64.ll
llvm/test/CodeGen/X86/memcpy-inline.ll
llvm/test/CodeGen/X86/memcpy.ll
llvm/test/CodeGen/X86/memset-nonzero.ll
llvm/test/CodeGen/X86/negative-sin.ll
llvm/test/CodeGen/X86/pow.ll
llvm/test/CodeGen/X86/powi.ll
llvm/test/CodeGen/X86/pr38865.ll
llvm/test/CodeGen/X86/vector-half-conversions.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86Subtarget.cpp b/llvm/lib/Target/X86/X86Subtarget.cpp
index 8c9248569a13..684854725817 100644
--- a/llvm/lib/Target/X86/X86Subtarget.cpp
+++ b/llvm/lib/Target/X86/X86Subtarget.cpp
@@ -203,7 +203,7 @@ X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV,
is64Bit())
return X86II::MO_GOTPCREL;
// Reference ExternalSymbol directly in static relocation model.
- if (!GV && TM.getRelocationModel() == Reloc::Static)
+ if (!is64Bit() && !GV && TM.getRelocationModel() == Reloc::Static)
return X86II::MO_NO_FLAG;
return X86II::MO_PLT;
}
diff --git a/llvm/test/CodeGen/X86/cvt16.ll b/llvm/test/CodeGen/X86/cvt16.ll
index 9b36c5a0cc34..64be9aad49be 100644
--- a/llvm/test/CodeGen/X86/cvt16.ll
+++ b/llvm/test/CodeGen/X86/cvt16.ll
@@ -60,7 +60,7 @@ define float @test2(i16* nocapture %src) {
; LIBCALL-LABEL: test2:
; LIBCALL: # %bb.0:
; LIBCALL-NEXT: movzwl (%rdi), %edi
-; LIBCALL-NEXT: jmp __gnu_h2f_ieee # TAILCALL
+; LIBCALL-NEXT: jmp __gnu_h2f_ieee at PLT # TAILCALL
;
; F16C-LABEL: test2:
; F16C: # %bb.0:
@@ -92,7 +92,7 @@ define float @test3(float %src) nounwind uwtable readnone {
; LIBCALL-NEXT: movzwl %ax, %edi
; LIBCALL-NEXT: popq %rax
; LIBCALL-NEXT: .cfi_def_cfa_offset 8
-; LIBCALL-NEXT: jmp __gnu_h2f_ieee # TAILCALL
+; LIBCALL-NEXT: jmp __gnu_h2f_ieee at PLT # TAILCALL
;
; F16C-LABEL: test3:
; F16C: # %bb.0:
@@ -154,11 +154,11 @@ define double @test4(i16* nocapture %src) {
define i16 @test5(double %src) {
; LIBCALL-LABEL: test5:
; LIBCALL: # %bb.0:
-; LIBCALL-NEXT: jmp __truncdfhf2 # TAILCALL
+; LIBCALL-NEXT: jmp __truncdfhf2 at PLT # TAILCALL
;
; F16C-LABEL: test5:
; F16C: # %bb.0:
-; F16C-NEXT: jmp __truncdfhf2 # TAILCALL
+; F16C-NEXT: jmp __truncdfhf2 at PLT # TAILCALL
;
; SOFTFLOAT-LABEL: test5:
; SOFTFLOAT: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/extract-fp.ll b/llvm/test/CodeGen/X86/extract-fp.ll
index 6281f12979f1..317f3188c2df 100644
--- a/llvm/test/CodeGen/X86/extract-fp.ll
+++ b/llvm/test/CodeGen/X86/extract-fp.ll
@@ -65,7 +65,7 @@ define float @ext_frem_v4f32(<4 x float> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT: jmp fmodf # TAILCALL
+; CHECK-NEXT: jmp fmodf at PLT # TAILCALL
%bo = frem <4 x float> %x, <float 1.0, float 2.0, float 3.0, float 42.0>
%ext = extractelement <4 x float> %bo, i32 2
ret float %ext
@@ -77,7 +77,7 @@ define float @ext_frem_v4f32_constant_op0(<4 x float> %x) {
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: jmp fmodf # TAILCALL
+; CHECK-NEXT: jmp fmodf at PLT # TAILCALL
%bo = frem <4 x float> <float 1.0, float 2.0, float 3.0, float 42.0>, %x
%ext = extractelement <4 x float> %bo, i32 1
ret float %ext
diff --git a/llvm/test/CodeGen/X86/extractelement-fp.ll b/llvm/test/CodeGen/X86/extractelement-fp.ll
index 7e3e263f0db7..137b98db28a3 100644
--- a/llvm/test/CodeGen/X86/extractelement-fp.ll
+++ b/llvm/test/CodeGen/X86/extractelement-fp.ll
@@ -231,7 +231,7 @@ define double @fdiv_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
define float @frem_v4f32(<4 x float> %x, <4 x float> %y) nounwind {
; X64-LABEL: frem_v4f32:
; X64: # %bb.0:
-; X64-NEXT: jmp fmodf # TAILCALL
+; X64-NEXT: jmp fmodf at PLT # TAILCALL
;
; X86-LABEL: frem_v4f32:
; X86: # %bb.0:
@@ -252,7 +252,7 @@ define double @frem_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: # kill: def $xmm1 killed $xmm1 killed $ymm1
; X64-NEXT: vzeroupper
-; X64-NEXT: jmp fmod # TAILCALL
+; X64-NEXT: jmp fmod at PLT # TAILCALL
;
; X86-LABEL: frem_v4f64:
; X86: # %bb.0:
@@ -443,7 +443,7 @@ define double @fsqrt_v4f64(<4 x double> %x) nounwind {
define float @fsin_v4f32(<4 x float> %x) nounwind {
; X64-LABEL: fsin_v4f32:
; X64: # %bb.0:
-; X64-NEXT: jmp sinf # TAILCALL
+; X64-NEXT: jmp sinf at PLT # TAILCALL
;
; X86-LABEL: fsin_v4f32:
; X86: # %bb.0:
@@ -462,7 +462,7 @@ define double @fsin_v4f64(<4 x double> %x) nounwind {
; X64: # %bb.0:
; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
-; X64-NEXT: jmp sin # TAILCALL
+; X64-NEXT: jmp sin at PLT # TAILCALL
;
; X86-LABEL: fsin_v4f64:
; X86: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/finite-libcalls.ll b/llvm/test/CodeGen/X86/finite-libcalls.ll
index 5a44949cb84e..8a911505084f 100644
--- a/llvm/test/CodeGen/X86/finite-libcalls.ll
+++ b/llvm/test/CodeGen/X86/finite-libcalls.ll
@@ -9,7 +9,7 @@
define float @exp_f32(float %x) #0 {
; GNU-LABEL: exp_f32:
; GNU: # %bb.0:
-; GNU-NEXT: jmp expf # TAILCALL
+; GNU-NEXT: jmp expf at PLT # TAILCALL
;
; WIN-LABEL: exp_f32:
; WIN: # %bb.0:
@@ -25,7 +25,7 @@ define float @exp_f32(float %x) #0 {
define double @exp_f64(double %x) #0 {
; GNU-LABEL: exp_f64:
; GNU: # %bb.0:
-; GNU-NEXT: jmp exp # TAILCALL
+; GNU-NEXT: jmp exp at PLT # TAILCALL
;
; WIN-LABEL: exp_f64:
; WIN: # %bb.0:
@@ -80,7 +80,7 @@ define x86_fp80 @exp_f80(x86_fp80 %x) #0 {
define float @exp2_f32(float %x) #0 {
; GNU-LABEL: exp2_f32:
; GNU: # %bb.0:
-; GNU-NEXT: jmp exp2f # TAILCALL
+; GNU-NEXT: jmp exp2f at PLT # TAILCALL
;
; WIN-LABEL: exp2_f32:
; WIN: # %bb.0:
@@ -96,7 +96,7 @@ define float @exp2_f32(float %x) #0 {
define double @exp2_f64(double %x) #0 {
; GNU-LABEL: exp2_f64:
; GNU: # %bb.0:
-; GNU-NEXT: jmp exp2 # TAILCALL
+; GNU-NEXT: jmp exp2 at PLT # TAILCALL
;
; WIN-LABEL: exp2_f64:
; WIN: # %bb.0:
@@ -151,7 +151,7 @@ define x86_fp80 @exp2_f80(x86_fp80 %x) #0 {
define float @log_f32(float %x) #0 {
; GNU-LABEL: log_f32:
; GNU: # %bb.0:
-; GNU-NEXT: jmp logf # TAILCALL
+; GNU-NEXT: jmp logf at PLT # TAILCALL
;
; WIN-LABEL: log_f32:
; WIN: # %bb.0:
@@ -167,7 +167,7 @@ define float @log_f32(float %x) #0 {
define double @log_f64(double %x) #0 {
; GNU-LABEL: log_f64:
; GNU: # %bb.0:
-; GNU-NEXT: jmp log # TAILCALL
+; GNU-NEXT: jmp log at PLT # TAILCALL
;
; WIN-LABEL: log_f64:
; WIN: # %bb.0:
@@ -222,7 +222,7 @@ define x86_fp80 @log_f80(x86_fp80 %x) #0 {
define float @log2_f32(float %x) #0 {
; GNU-LABEL: log2_f32:
; GNU: # %bb.0:
-; GNU-NEXT: jmp log2f # TAILCALL
+; GNU-NEXT: jmp log2f at PLT # TAILCALL
;
; WIN-LABEL: log2_f32:
; WIN: # %bb.0:
@@ -238,7 +238,7 @@ define float @log2_f32(float %x) #0 {
define double @log2_f64(double %x) #0 {
; GNU-LABEL: log2_f64:
; GNU: # %bb.0:
-; GNU-NEXT: jmp log2 # TAILCALL
+; GNU-NEXT: jmp log2 at PLT # TAILCALL
;
; WIN-LABEL: log2_f64:
; WIN: # %bb.0:
@@ -293,7 +293,7 @@ define x86_fp80 @log2_f80(x86_fp80 %x) #0 {
define float @log10_f32(float %x) #0 {
; GNU-LABEL: log10_f32:
; GNU: # %bb.0:
-; GNU-NEXT: jmp log10f # TAILCALL
+; GNU-NEXT: jmp log10f at PLT # TAILCALL
;
; WIN-LABEL: log10_f32:
; WIN: # %bb.0:
@@ -309,7 +309,7 @@ define float @log10_f32(float %x) #0 {
define double @log10_f64(double %x) #0 {
; GNU-LABEL: log10_f64:
; GNU: # %bb.0:
-; GNU-NEXT: jmp log10 # TAILCALL
+; GNU-NEXT: jmp log10 at PLT # TAILCALL
;
; WIN-LABEL: log10_f64:
; WIN: # %bb.0:
@@ -365,7 +365,7 @@ define float @pow_f32(float %x) #0 {
; GNU-LABEL: pow_f32:
; GNU: # %bb.0:
; GNU-NEXT: movaps %xmm0, %xmm1
-; GNU-NEXT: jmp powf # TAILCALL
+; GNU-NEXT: jmp powf at PLT # TAILCALL
;
; WIN-LABEL: pow_f32:
; WIN: # %bb.0:
@@ -384,7 +384,7 @@ define double @pow_f64(double %x) #0 {
; GNU-LABEL: pow_f64:
; GNU: # %bb.0:
; GNU-NEXT: movaps %xmm0, %xmm1
-; GNU-NEXT: jmp pow # TAILCALL
+; GNU-NEXT: jmp pow at PLT # TAILCALL
;
; WIN-LABEL: pow_f64:
; WIN: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/fmaxnum.ll b/llvm/test/CodeGen/X86/fmaxnum.ll
index fd5b638a146d..95a5cfe4ca31 100644
--- a/llvm/test/CodeGen/X86/fmaxnum.ll
+++ b/llvm/test/CodeGen/X86/fmaxnum.ll
@@ -55,7 +55,7 @@ define float @test_fmaxf(float %x, float %y) {
define float @test_fmaxf_minsize(float %x, float %y) minsize {
; CHECK-LABEL: test_fmaxf_minsize:
; CHECK: # %bb.0:
-; CHECK-NEXT: jmp fmaxf # TAILCALL
+; CHECK-NEXT: jmp fmaxf at PLT # TAILCALL
%z = call float @fmaxf(float %x, float %y) readnone
ret float %z
}
diff --git a/llvm/test/CodeGen/X86/fmf-flags.ll b/llvm/test/CodeGen/X86/fmf-flags.ll
index 6a938bf28b23..752e0c189c55 100644
--- a/llvm/test/CodeGen/X86/fmf-flags.ll
+++ b/llvm/test/CodeGen/X86/fmf-flags.ll
@@ -118,7 +118,7 @@ define float @div_arcp_by_const(half %x) {
; X64-NEXT: movzwl %ax, %edi
; X64-NEXT: popq %rax
; X64-NEXT: .cfi_def_cfa_offset 8
-; X64-NEXT: jmp __gnu_h2f_ieee # TAILCALL
+; X64-NEXT: jmp __gnu_h2f_ieee at PLT # TAILCALL
;
; X86-LABEL: div_arcp_by_const:
; X86: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/fminnum.ll b/llvm/test/CodeGen/X86/fminnum.ll
index dc1b8ca8eb4d..1bb9329c8958 100644
--- a/llvm/test/CodeGen/X86/fminnum.ll
+++ b/llvm/test/CodeGen/X86/fminnum.ll
@@ -55,7 +55,7 @@ define float @test_fminf(float %x, float %y) {
define float @test_fminf_minsize(float %x, float %y) minsize {
; CHECK-LABEL: test_fminf_minsize:
; CHECK: # %bb.0:
-; CHECK-NEXT: jmp fminf # TAILCALL
+; CHECK-NEXT: jmp fminf at PLT # TAILCALL
%z = call float @fminf(float %x, float %y) readnone
ret float %z
}
diff --git a/llvm/test/CodeGen/X86/fp128-extract.ll b/llvm/test/CodeGen/X86/fp128-extract.ll
index ac4cb28b5afc..a9fd8a1d1f2b 100644
--- a/llvm/test/CodeGen/X86/fp128-extract.ll
+++ b/llvm/test/CodeGen/X86/fp128-extract.ll
@@ -16,7 +16,7 @@ define fp128 @TestExtract(<2 x double> %x) nounwind {
; CHECK-NEXT: movaps %xmm0, %xmm1
; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-NEXT: addq $40, %rsp
-; CHECK-NEXT: jmp __multf3 # TAILCALL
+; CHECK-NEXT: jmp __multf3 at PLT # TAILCALL
entry:
; Simplified instruction pattern from the output of llvm before r289042,
; for a boost function ...::insert<...>::traverse<...>().
diff --git a/llvm/test/CodeGen/X86/fp128-i128.ll b/llvm/test/CodeGen/X86/fp128-i128.ll
index 74722a37f38d..8d9d8dc5d719 100644
--- a/llvm/test/CodeGen/X86/fp128-i128.ll
+++ b/llvm/test/CodeGen/X86/fp128-i128.ll
@@ -315,7 +315,7 @@ define fp128 @TestI128_4(fp128 %x) #0 {
; SSE-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movq $0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
-; SSE-NEXT: jmp __addtf3 # TAILCALL
+; SSE-NEXT: jmp __addtf3 at PLT # TAILCALL
;
; AVX-LABEL: TestI128_4:
; AVX: # %bb.0: # %entry
@@ -325,7 +325,7 @@ define fp128 @TestI128_4(fp128 %x) #0 {
; AVX-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movq $0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
-; AVX-NEXT: jmp __addtf3 # TAILCALL
+; AVX-NEXT: jmp __addtf3 at PLT # TAILCALL
entry:
%0 = bitcast fp128 %x to i128
%bf.clear = and i128 %0, -18446744073709551616
@@ -370,7 +370,7 @@ define fp128 @acosl(fp128 %x) #0 {
; SSE-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movq $0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
-; SSE-NEXT: jmp __addtf3 # TAILCALL
+; SSE-NEXT: jmp __addtf3 at PLT # TAILCALL
;
; AVX-LABEL: acosl:
; AVX: # %bb.0: # %entry
@@ -380,7 +380,7 @@ define fp128 @acosl(fp128 %x) #0 {
; AVX-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; AVX-NEXT: movq $0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
-; AVX-NEXT: jmp __addtf3 # TAILCALL
+; AVX-NEXT: jmp __addtf3 at PLT # TAILCALL
entry:
%0 = bitcast fp128 %x to i128
%bf.clear = and i128 %0, -18446744073709551616
diff --git a/llvm/test/CodeGen/X86/fp128-libcalls.ll b/llvm/test/CodeGen/X86/fp128-libcalls.ll
index adc8dc2d3c76..fad69fc7a25d 100644
--- a/llvm/test/CodeGen/X86/fp128-libcalls.ll
+++ b/llvm/test/CodeGen/X86/fp128-libcalls.ll
@@ -800,7 +800,7 @@ declare fp128 @llvm.round.f128(fp128)
define fp128 @Test128FMA(fp128 %a, fp128 %b, fp128 %c) nounwind {
; CHECK-LABEL: Test128FMA:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: jmp fmal # TAILCALL
+; CHECK-NEXT: jmp fmal at PLT # TAILCALL
;
; X86-LABEL: Test128FMA:
; X86: # %bb.0: # %entry
diff --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll
index fe0321eff85b..7789e7926041 100644
--- a/llvm/test/CodeGen/X86/half.ll
+++ b/llvm/test/CodeGen/X86/half.ll
@@ -75,7 +75,7 @@ define float @test_extend32(half* %addr) #0 {
; CHECK-LIBCALL-LABEL: test_extend32:
; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
-; CHECK-LIBCALL-NEXT: jmp __gnu_h2f_ieee # TAILCALL
+; CHECK-LIBCALL-NEXT: jmp __gnu_h2f_ieee at PLT # TAILCALL
;
; BWON-F16C-LABEL: test_extend32:
; BWON-F16C: # %bb.0:
@@ -864,7 +864,7 @@ define float @test_sitofp_fadd_i32(i32 %a, half* %b) #0 {
; CHECK-LIBCALL-NEXT: movzwl %ax, %edi
; CHECK-LIBCALL-NEXT: addq $16, %rsp
; CHECK-LIBCALL-NEXT: popq %rbx
-; CHECK-LIBCALL-NEXT: jmp __gnu_h2f_ieee # TAILCALL
+; CHECK-LIBCALL-NEXT: jmp __gnu_h2f_ieee at PLT # TAILCALL
;
; BWON-F16C-LABEL: test_sitofp_fadd_i32:
; BWON-F16C: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/llround-conv.ll b/llvm/test/CodeGen/X86/llround-conv.ll
index d80f227eebff..1ccda2bc27bb 100644
--- a/llvm/test/CodeGen/X86/llround-conv.ll
+++ b/llvm/test/CodeGen/X86/llround-conv.ll
@@ -28,7 +28,7 @@ define i64 @testmsxs(float %x) {
;
; X64-LABEL: testmsxs:
; X64: # %bb.0: # %entry
-; X64-NEXT: jmp llroundf # TAILCALL
+; X64-NEXT: jmp llroundf at PLT # TAILCALL
entry:
%0 = tail call i64 @llvm.llround.f32(float %x)
ret i64 %0
@@ -59,7 +59,7 @@ define i64 @testmsxd(double %x) {
;
; X64-LABEL: testmsxd:
; X64: # %bb.0: # %entry
-; X64-NEXT: jmp llround # TAILCALL
+; X64-NEXT: jmp llround at PLT # TAILCALL
entry:
%0 = tail call i64 @llvm.llround.f64(double %x)
ret i64 %0
@@ -90,7 +90,7 @@ define i64 @testmsll(x86_fp80 %x) {
;
; X64-LABEL: testmsll:
; X64: # %bb.0: # %entry
-; X64-NEXT: jmp llroundl # TAILCALL
+; X64-NEXT: jmp llroundl at PLT # TAILCALL
entry:
%0 = tail call i64 @llvm.llround.f80(x86_fp80 %x)
ret i64 %0
diff --git a/llvm/test/CodeGen/X86/lround-conv-i32.ll b/llvm/test/CodeGen/X86/lround-conv-i32.ll
index 93b8b0c8df03..06baf1f273c9 100644
--- a/llvm/test/CodeGen/X86/lround-conv-i32.ll
+++ b/llvm/test/CodeGen/X86/lround-conv-i32.ll
@@ -1,12 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s
; RUN: llc < %s -mtriple=i686-unknown -mattr=sse2 | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
define i32 @testmsws(float %x) {
; CHECK-LABEL: testmsws:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: jmp lroundf # TAILCALL
+;
+; X64-LABEL: testmsws:
+; X64: # %bb.0: # %entry
+; X64-NEXT: jmp lroundf at PLT # TAILCALL
entry:
%0 = tail call i32 @llvm.lround.i32.f32(float %x)
ret i32 %0
@@ -16,6 +20,10 @@ define i32 @testmswd(double %x) {
; CHECK-LABEL: testmswd:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: jmp lround # TAILCALL
+;
+; X64-LABEL: testmswd:
+; X64: # %bb.0: # %entry
+; X64-NEXT: jmp lround at PLT # TAILCALL
entry:
%0 = tail call i32 @llvm.lround.i32.f64(double %x)
ret i32 %0
@@ -25,6 +33,10 @@ define i32 @testmsll(x86_fp80 %x) {
; CHECK-LABEL: testmsll:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: jmp lroundl # TAILCALL
+;
+; X64-LABEL: testmsll:
+; X64: # %bb.0: # %entry
+; X64-NEXT: jmp lroundl at PLT # TAILCALL
entry:
%0 = tail call i32 @llvm.lround.i32.f80(x86_fp80 %x)
ret i32 %0
diff --git a/llvm/test/CodeGen/X86/lround-conv-i64.ll b/llvm/test/CodeGen/X86/lround-conv-i64.ll
index cd4c4994f7b9..1cfa42673658 100644
--- a/llvm/test/CodeGen/X86/lround-conv-i64.ll
+++ b/llvm/test/CodeGen/X86/lround-conv-i64.ll
@@ -4,7 +4,7 @@
define i64 @testmsxs(float %x) {
; CHECK-LABEL: testmsxs:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: jmp lroundf # TAILCALL
+; CHECK-NEXT: jmp lroundf at PLT # TAILCALL
entry:
%0 = tail call i64 @llvm.lround.i64.f32(float %x)
ret i64 %0
@@ -13,7 +13,7 @@ entry:
define i64 @testmsxd(double %x) {
; CHECK-LABEL: testmsxd:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: jmp lround # TAILCALL
+; CHECK-NEXT: jmp lround at PLT # TAILCALL
entry:
%0 = tail call i64 @llvm.lround.i64.f64(double %x)
ret i64 %0
@@ -22,7 +22,7 @@ entry:
define i64 @testmsll(x86_fp80 %x) {
; CHECK-LABEL: testmsll:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: jmp lroundl # TAILCALL
+; CHECK-NEXT: jmp lroundl at PLT # TAILCALL
entry:
%0 = tail call i64 @llvm.lround.i64.f80(x86_fp80 %x)
ret i64 %0
diff --git a/llvm/test/CodeGen/X86/memcpy-inline.ll b/llvm/test/CodeGen/X86/memcpy-inline.ll
index a91e6d482137..d360050cf69a 100644
--- a/llvm/test/CodeGen/X86/memcpy-inline.ll
+++ b/llvm/test/CodeGen/X86/memcpy-inline.ll
@@ -18,7 +18,7 @@ define void @regular_memcpy_calls_external_function(i8* %a, i8* %b) nounwind {
; CHECK-LABEL: regular_memcpy_calls_external_function:
; CHECK: # %bb.0:
; CHECK-NEXT: movl $128, %edx
-; CHECK-NEXT: jmp memcpy # TAILCALL
+; CHECK-NEXT: jmp memcpy at PLT # TAILCALL
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 128, i1 0 )
ret void
}
diff --git a/llvm/test/CodeGen/X86/memcpy.ll b/llvm/test/CodeGen/X86/memcpy.ll
index 4c0a937a7031..11c92fa2a2af 100644
--- a/llvm/test/CodeGen/X86/memcpy.ll
+++ b/llvm/test/CodeGen/X86/memcpy.ll
@@ -18,23 +18,23 @@ define i8* @test1(i8* %a, i8* %b, i64 %n) nounwind {
;
; LINUX-LABEL: test1:
; LINUX: # %bb.0: # %entry
-; LINUX-NEXT: jmp memcpy # TAILCALL
+; LINUX-NEXT: jmp memcpy at PLT # TAILCALL
;
; LINUX-SKL-LABEL: test1:
; LINUX-SKL: # %bb.0: # %entry
-; LINUX-SKL-NEXT: jmp memcpy # TAILCALL
+; LINUX-SKL-NEXT: jmp memcpy at PLT # TAILCALL
;
; LINUX-SKX-LABEL: test1:
; LINUX-SKX: # %bb.0: # %entry
-; LINUX-SKX-NEXT: jmp memcpy # TAILCALL
+; LINUX-SKX-NEXT: jmp memcpy at PLT # TAILCALL
;
; LINUX-KNL-LABEL: test1:
; LINUX-KNL: # %bb.0: # %entry
-; LINUX-KNL-NEXT: jmp memcpy # TAILCALL
+; LINUX-KNL-NEXT: jmp memcpy at PLT # TAILCALL
;
; LINUX-AVX512BW-LABEL: test1:
; LINUX-AVX512BW: # %bb.0: # %entry
-; LINUX-AVX512BW-NEXT: jmp memcpy # TAILCALL
+; LINUX-AVX512BW-NEXT: jmp memcpy at PLT # TAILCALL
entry:
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 %n, i1 0 )
ret i8* %a
@@ -48,23 +48,23 @@ define i8* @test2(i64* %a, i64* %b, i64 %n) nounwind {
;
; LINUX-LABEL: test2:
; LINUX: # %bb.0: # %entry
-; LINUX-NEXT: jmp memcpy # TAILCALL
+; LINUX-NEXT: jmp memcpy at PLT # TAILCALL
;
; LINUX-SKL-LABEL: test2:
; LINUX-SKL: # %bb.0: # %entry
-; LINUX-SKL-NEXT: jmp memcpy # TAILCALL
+; LINUX-SKL-NEXT: jmp memcpy at PLT # TAILCALL
;
; LINUX-SKX-LABEL: test2:
; LINUX-SKX: # %bb.0: # %entry
-; LINUX-SKX-NEXT: jmp memcpy # TAILCALL
+; LINUX-SKX-NEXT: jmp memcpy at PLT # TAILCALL
;
; LINUX-KNL-LABEL: test2:
; LINUX-KNL: # %bb.0: # %entry
-; LINUX-KNL-NEXT: jmp memcpy # TAILCALL
+; LINUX-KNL-NEXT: jmp memcpy at PLT # TAILCALL
;
; LINUX-AVX512BW-LABEL: test2:
; LINUX-AVX512BW: # %bb.0: # %entry
-; LINUX-AVX512BW-NEXT: jmp memcpy # TAILCALL
+; LINUX-AVX512BW-NEXT: jmp memcpy at PLT # TAILCALL
entry:
%tmp14 = bitcast i64* %a to i8*
%tmp25 = bitcast i64* %b to i8*
@@ -102,7 +102,7 @@ define void @test3(i8* nocapture %A, i8* nocapture %B) nounwind optsize noredzon
; LINUX-LABEL: test3:
; LINUX: # %bb.0: # %entry
; LINUX-NEXT: movl $64, %edx
-; LINUX-NEXT: jmp memcpy # TAILCALL
+; LINUX-NEXT: jmp memcpy at PLT # TAILCALL
;
; LINUX-SKL-LABEL: test3:
; LINUX-SKL: # %bb.0: # %entry
@@ -143,7 +143,7 @@ define void @test3_pgso(i8* nocapture %A, i8* nocapture %B) nounwind noredzone !
; LINUX-LABEL: test3_pgso:
; LINUX: # %bb.0: # %entry
; LINUX-NEXT: movl $64, %edx
-; LINUX-NEXT: jmp memcpy # TAILCALL
+; LINUX-NEXT: jmp memcpy at PLT # TAILCALL
;
; DARWIN-LABEL: test3_pgso:
; DARWIN: ## %bb.0: ## %entry
@@ -180,7 +180,7 @@ define void @test3_minsize(i8* nocapture %A, i8* nocapture %B) nounwind minsize
; LINUX: # %bb.0:
; LINUX-NEXT: pushq $64
; LINUX-NEXT: popq %rdx
-; LINUX-NEXT: jmp memcpy # TAILCALL
+; LINUX-NEXT: jmp memcpy at PLT # TAILCALL
;
; LINUX-SKL-LABEL: test3_minsize:
; LINUX-SKL: # %bb.0:
@@ -227,7 +227,7 @@ define void @test3_minsize_optsize(i8* nocapture %A, i8* nocapture %B) nounwind
; LINUX: # %bb.0:
; LINUX-NEXT: pushq $64
; LINUX-NEXT: popq %rdx
-; LINUX-NEXT: jmp memcpy # TAILCALL
+; LINUX-NEXT: jmp memcpy at PLT # TAILCALL
;
; LINUX-SKL-LABEL: test3_minsize_optsize:
; LINUX-SKL: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/memset-nonzero.ll b/llvm/test/CodeGen/X86/memset-nonzero.ll
index c499a1aa3794..84f084fedb57 100644
--- a/llvm/test/CodeGen/X86/memset-nonzero.ll
+++ b/llvm/test/CodeGen/X86/memset-nonzero.ll
@@ -535,7 +535,7 @@ define void @memset_256_nonconst_bytes(i8* %x, i8 %c) {
; SSE-LABEL: memset_256_nonconst_bytes:
; SSE: # %bb.0:
; SSE-NEXT: movl $256, %edx # imm = 0x100
-; SSE-NEXT: jmp memset # TAILCALL
+; SSE-NEXT: jmp memset at PLT # TAILCALL
;
; SSE2FAST-LABEL: memset_256_nonconst_bytes:
; SSE2FAST: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/negative-sin.ll b/llvm/test/CodeGen/X86/negative-sin.ll
index c30cd2741e6b..9f00dbb01ff6 100644
--- a/llvm/test/CodeGen/X86/negative-sin.ll
+++ b/llvm/test/CodeGen/X86/negative-sin.ll
@@ -28,7 +28,7 @@ define double @strict(double %e) nounwind {
define double @fast(double %e) nounwind {
; CHECK-LABEL: fast:
; CHECK: # %bb.0:
-; CHECK-NEXT: jmp sin # TAILCALL
+; CHECK-NEXT: jmp sin at PLT # TAILCALL
%f = fsub fast double 0.0, %e
%g = call double @sin(double %f) readonly
%h = fsub fast double 0.0, %g
@@ -40,7 +40,7 @@ define double @fast(double %e) nounwind {
define double @nsz(double %e) nounwind {
; CHECK-LABEL: nsz:
; CHECK: # %bb.0:
-; CHECK-NEXT: jmp sin # TAILCALL
+; CHECK-NEXT: jmp sin at PLT # TAILCALL
%f = fsub nsz double 0.0, %e
%g = call double @sin(double %f) readonly
%h = fsub nsz double 0.0, %g
@@ -88,7 +88,7 @@ define double @semi_strict2(double %e) nounwind {
define double @fn_attr(double %e) nounwind #0 {
; CHECK-LABEL: fn_attr:
; CHECK: # %bb.0:
-; CHECK-NEXT: jmp sin # TAILCALL
+; CHECK-NEXT: jmp sin at PLT # TAILCALL
%f = fsub double 0.0, %e
%g = call double @sin(double %f) readonly
%h = fsub double 0.0, %g
diff --git a/llvm/test/CodeGen/X86/pow.ll b/llvm/test/CodeGen/X86/pow.ll
index e382a2d32ccd..a0b85cc811cb 100644
--- a/llvm/test/CodeGen/X86/pow.ll
+++ b/llvm/test/CodeGen/X86/pow.ll
@@ -133,7 +133,7 @@ define float @pow_f32_one_fourth_not_enough_fmf(float %x) nounwind {
; CHECK-LABEL: pow_f32_one_fourth_not_enough_fmf:
; CHECK: # %bb.0:
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT: jmp powf # TAILCALL
+; CHECK-NEXT: jmp powf at PLT # TAILCALL
%r = call afn ninf float @llvm.pow.f32(float %x, float 2.5e-01)
ret float %r
}
@@ -142,7 +142,7 @@ define double @pow_f64_one_fourth_not_enough_fmf(double %x) nounwind {
; CHECK-LABEL: pow_f64_one_fourth_not_enough_fmf:
; CHECK: # %bb.0:
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: jmp pow # TAILCALL
+; CHECK-NEXT: jmp pow at PLT # TAILCALL
%r = call nsz ninf double @llvm.pow.f64(double %x, double 2.5e-01)
ret double %r
}
@@ -206,7 +206,7 @@ define <2 x double> @pow_v2f64_one_fourth_not_enough_fmf(<2 x double> %x) nounwi
define float @pow_f32_one_third_fmf(float %x) nounwind {
; CHECK-LABEL: pow_f32_one_third_fmf:
; CHECK: # %bb.0:
-; CHECK-NEXT: jmp cbrtf # TAILCALL
+; CHECK-NEXT: jmp cbrtf at PLT # TAILCALL
%one = uitofp i32 1 to float
%three = uitofp i32 3 to float
%exp = fdiv float %one, %three
@@ -217,7 +217,7 @@ define float @pow_f32_one_third_fmf(float %x) nounwind {
define double @pow_f64_one_third_fmf(double %x) nounwind {
; CHECK-LABEL: pow_f64_one_third_fmf:
; CHECK: # %bb.0:
-; CHECK-NEXT: jmp cbrt # TAILCALL
+; CHECK-NEXT: jmp cbrt at PLT # TAILCALL
%one = uitofp i32 1 to double
%three = uitofp i32 3 to double
%exp = fdiv double %one, %three
@@ -251,7 +251,7 @@ define double @pow_f64_not_exactly_one_third_fmf(double %x) nounwind {
; CHECK-LABEL: pow_f64_not_exactly_one_third_fmf:
; CHECK: # %bb.0:
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: jmp pow # TAILCALL
+; CHECK-NEXT: jmp pow at PLT # TAILCALL
%r = call nsz nnan ninf afn double @llvm.pow.f64(double %x, double 0x3fd5555555555556)
ret double %r
}
@@ -262,7 +262,7 @@ define double @pow_f64_not_enough_fmf(double %x) nounwind {
; CHECK-LABEL: pow_f64_not_enough_fmf:
; CHECK: # %bb.0:
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT: jmp pow # TAILCALL
+; CHECK-NEXT: jmp pow at PLT # TAILCALL
%r = call nsz ninf afn double @llvm.pow.f64(double %x, double 0x3fd5555555555555)
ret double %r
}
diff --git a/llvm/test/CodeGen/X86/powi.ll b/llvm/test/CodeGen/X86/powi.ll
index 3fad07064359..80779cd7f700 100644
--- a/llvm/test/CodeGen/X86/powi.ll
+++ b/llvm/test/CodeGen/X86/powi.ll
@@ -81,7 +81,7 @@ define double @pow_wrapper_optsize(double %a) optsize {
; X64-LABEL: pow_wrapper_optsize:
; X64: # %bb.0:
; X64-NEXT: movl $15, %edi
-; X64-NEXT: jmp __powidf2 # TAILCALL
+; X64-NEXT: jmp __powidf2 at PLT # TAILCALL
%ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; <double> [#uses=1]
ret double %ret
}
@@ -114,7 +114,7 @@ define double @pow_wrapper_pgso(double %a) !prof !14 {
; X64-LABEL: pow_wrapper_pgso:
; X64: # %bb.0:
; X64-NEXT: movl $15, %edi
-; X64-NEXT: jmp __powidf2 # TAILCALL
+; X64-NEXT: jmp __powidf2 at PLT # TAILCALL
%ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; <double> [#uses=1]
ret double %ret
}
@@ -150,7 +150,7 @@ define double @pow_wrapper_minsize(double %a) minsize {
; X64-NEXT: .cfi_adjust_cfa_offset 8
; X64-NEXT: popq %rdi
; X64-NEXT: .cfi_adjust_cfa_offset -8
-; X64-NEXT: jmp __powidf2 # TAILCALL
+; X64-NEXT: jmp __powidf2 at PLT # TAILCALL
%ret = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; <double> [#uses=1]
ret double %ret
}
diff --git a/llvm/test/CodeGen/X86/pr38865.ll b/llvm/test/CodeGen/X86/pr38865.ll
index 0a40f9ec06b6..83d48b5aa829 100644
--- a/llvm/test/CodeGen/X86/pr38865.ll
+++ b/llvm/test/CodeGen/X86/pr38865.ll
@@ -20,8 +20,8 @@ define void @e() nounwind {
; CHECK-NEXT: # fixup A - offset: 1, value: c, kind: FK_Data_4
; CHECK-NEXT: movl $260, %edx # encoding: [0xba,0x04,0x01,0x00,0x00]
; CHECK-NEXT: # imm = 0x104
-; CHECK-NEXT: callq memcpy # encoding: [0xe8,A,A,A,A]
-; CHECK-NEXT: # fixup A - offset: 1, value: memcpy-4, kind: FK_PCRel_4
+; CHECK-NEXT: callq memcpy at PLT # encoding: [0xe8,A,A,A,A]
+; CHECK-NEXT: # fixup A - offset: 1, value: memcpy at PLT-4, kind: FK_PCRel_4
; CHECK-NEXT: movl $32, %ecx # encoding: [0xb9,0x20,0x00,0x00,0x00]
; CHECK-NEXT: movl %esp, %edi # encoding: [0x89,0xe7]
; CHECK-NEXT: movl %ebx, %esi # encoding: [0x89,0xde]
diff --git a/llvm/test/CodeGen/X86/vector-half-conversions.ll b/llvm/test/CodeGen/X86/vector-half-conversions.ll
index 674d16670e17..1778a197de76 100644
--- a/llvm/test/CodeGen/X86/vector-half-conversions.ll
+++ b/llvm/test/CodeGen/X86/vector-half-conversions.ll
@@ -645,7 +645,7 @@ define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) nounwin
define i16 @cvt_f64_to_i16(double %a0) nounwind {
; ALL-LABEL: cvt_f64_to_i16:
; ALL: # %bb.0:
-; ALL-NEXT: jmp __truncdfhf2 # TAILCALL
+; ALL-NEXT: jmp __truncdfhf2 at PLT # TAILCALL
%1 = fptrunc double %a0 to half
%2 = bitcast half %1 to i16
ret i16 %2
More information about the llvm-branch-commits
mailing list