[llvm] dced464 - [X86] Regenerate a bunch of tests to pick up @PLT
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sat Mar 27 16:43:13 PDT 2021
Author: Craig Topper
Date: 2021-03-27T16:41:35-07:00
New Revision: dced4649af3e643c6e12e6d46d5463f2aa2ffae7
URL: https://github.com/llvm/llvm-project/commit/dced4649af3e643c6e12e6d46d5463f2aa2ffae7
DIFF: https://github.com/llvm/llvm-project/commit/dced4649af3e643c6e12e6d46d5463f2aa2ffae7.diff
LOG: [X86] Regenerate a bunch of tests to pick up @PLT
I'm prepping another patch to the same tests and this just adds
noise to my diff.
Added:
Modified:
llvm/test/CodeGen/X86/addsub-constant-folding.ll
llvm/test/CodeGen/X86/avx-cmp.ll
llvm/test/CodeGen/X86/fmf-flags.ll
llvm/test/CodeGen/X86/fp-cvt.ll
llvm/test/CodeGen/X86/fp-intrinsics.ll
llvm/test/CodeGen/X86/fp128-cast.ll
llvm/test/CodeGen/X86/fp128-i128.ll
llvm/test/CodeGen/X86/half.ll
llvm/test/CodeGen/X86/select.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/addsub-constant-folding.ll b/llvm/test/CodeGen/X86/addsub-constant-folding.ll
index 4e8a80a0c8ff..c004e77f9ae5 100644
--- a/llvm/test/CodeGen/X86/addsub-constant-folding.ll
+++ b/llvm/test/CodeGen/X86/addsub-constant-folding.ll
@@ -34,7 +34,7 @@ define i32 @add_const_add_const_extrause(i32 %arg) {
; X86-NEXT: leal 8(%esi), %eax
; X86-NEXT: pushl %eax
; X86-NEXT: .cfi_adjust_cfa_offset 4
-; X86-NEXT: calll use
+; X86-NEXT: calll use at PLT
; X86-NEXT: addl $4, %esp
; X86-NEXT: .cfi_adjust_cfa_offset -4
; X86-NEXT: addl $10, %esi
@@ -50,7 +50,7 @@ define i32 @add_const_add_const_extrause(i32 %arg) {
; X64-NEXT: .cfi_offset %rbx, -16
; X64-NEXT: movl %edi, %ebx
; X64-NEXT: leal 8(%rbx), %edi
-; X64-NEXT: callq use
+; X64-NEXT: callq use at PLT
; X64-NEXT: leal 10(%rbx), %eax
; X64-NEXT: popq %rbx
; X64-NEXT: .cfi_def_cfa_offset 8
@@ -85,7 +85,7 @@ define <4 x i32> @vec_add_const_add_const_extrause(<4 x i32> %arg) {
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8]
; X86-NEXT: paddd %xmm1, %xmm0
-; X86-NEXT: calll vec_use
+; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: addl $28, %esp
@@ -100,7 +100,7 @@ define <4 x i32> @vec_add_const_add_const_extrause(<4 x i32> %arg) {
; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
; X64-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8]
; X64-NEXT: paddd %xmm1, %xmm0
-; X64-NEXT: callq vec_use
+; X64-NEXT: callq vec_use at PLT
; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; X64-NEXT: paddd {{.*}}(%rip), %xmm0
; X64-NEXT: addq $24, %rsp
@@ -156,7 +156,7 @@ define i32 @add_const_sub_const_extrause(i32 %arg) {
; X86-NEXT: leal 8(%esi), %eax
; X86-NEXT: pushl %eax
; X86-NEXT: .cfi_adjust_cfa_offset 4
-; X86-NEXT: calll use
+; X86-NEXT: calll use at PLT
; X86-NEXT: addl $4, %esp
; X86-NEXT: .cfi_adjust_cfa_offset -4
; X86-NEXT: addl $6, %esi
@@ -172,7 +172,7 @@ define i32 @add_const_sub_const_extrause(i32 %arg) {
; X64-NEXT: .cfi_offset %rbx, -16
; X64-NEXT: movl %edi, %ebx
; X64-NEXT: leal 8(%rbx), %edi
-; X64-NEXT: callq use
+; X64-NEXT: callq use at PLT
; X64-NEXT: leal 6(%rbx), %eax
; X64-NEXT: popq %rbx
; X64-NEXT: .cfi_def_cfa_offset 8
@@ -207,7 +207,7 @@ define <4 x i32> @vec_add_const_sub_const_extrause(<4 x i32> %arg) {
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8]
; X86-NEXT: paddd %xmm1, %xmm0
-; X86-NEXT: calll vec_use
+; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: addl $28, %esp
@@ -222,7 +222,7 @@ define <4 x i32> @vec_add_const_sub_const_extrause(<4 x i32> %arg) {
; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
; X64-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8]
; X64-NEXT: paddd %xmm1, %xmm0
-; X64-NEXT: callq vec_use
+; X64-NEXT: callq vec_use at PLT
; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; X64-NEXT: paddd {{.*}}(%rip), %xmm0
; X64-NEXT: addq $24, %rsp
@@ -278,7 +278,7 @@ define i32 @add_const_const_sub_extrause(i32 %arg) {
; X86-NEXT: leal 8(%esi), %eax
; X86-NEXT: pushl %eax
; X86-NEXT: .cfi_adjust_cfa_offset 4
-; X86-NEXT: calll use
+; X86-NEXT: calll use at PLT
; X86-NEXT: addl $4, %esp
; X86-NEXT: .cfi_adjust_cfa_offset -4
; X86-NEXT: movl $-6, %eax
@@ -294,7 +294,7 @@ define i32 @add_const_const_sub_extrause(i32 %arg) {
; X64-NEXT: .cfi_offset %rbx, -16
; X64-NEXT: movl %edi, %ebx
; X64-NEXT: leal 8(%rbx), %edi
-; X64-NEXT: callq use
+; X64-NEXT: callq use at PLT
; X64-NEXT: movl $-6, %eax
; X64-NEXT: subl %ebx, %eax
; X64-NEXT: popq %rbx
@@ -334,7 +334,7 @@ define <4 x i32> @vec_add_const_const_sub_extrause(<4 x i32> %arg) {
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8]
; X86-NEXT: paddd %xmm1, %xmm0
-; X86-NEXT: calll vec_use
+; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqa {{.*#+}} xmm0 = [4294967290,4294967290,4294967290,4294967290]
; X86-NEXT: movdqu (%esp), %xmm1 # 16-byte Reload
; X86-NEXT: psubd %xmm1, %xmm0
@@ -350,7 +350,7 @@ define <4 x i32> @vec_add_const_const_sub_extrause(<4 x i32> %arg) {
; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
; X64-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8]
; X64-NEXT: paddd %xmm1, %xmm0
-; X64-NEXT: callq vec_use
+; X64-NEXT: callq vec_use at PLT
; X64-NEXT: movdqa {{.*#+}} xmm0 = [4294967290,4294967290,4294967290,4294967290]
; X64-NEXT: psubd (%rsp), %xmm0 # 16-byte Folded Reload
; X64-NEXT: addq $24, %rsp
@@ -410,7 +410,7 @@ define i32 @sub_const_add_const_extrause(i32 %arg) {
; X86-NEXT: leal -8(%esi), %eax
; X86-NEXT: pushl %eax
; X86-NEXT: .cfi_adjust_cfa_offset 4
-; X86-NEXT: calll use
+; X86-NEXT: calll use at PLT
; X86-NEXT: addl $4, %esp
; X86-NEXT: .cfi_adjust_cfa_offset -4
; X86-NEXT: addl $-6, %esi
@@ -426,7 +426,7 @@ define i32 @sub_const_add_const_extrause(i32 %arg) {
; X64-NEXT: .cfi_offset %rbx, -16
; X64-NEXT: movl %edi, %ebx
; X64-NEXT: leal -8(%rbx), %edi
-; X64-NEXT: callq use
+; X64-NEXT: callq use at PLT
; X64-NEXT: leal -6(%rbx), %eax
; X64-NEXT: popq %rbx
; X64-NEXT: .cfi_def_cfa_offset 8
@@ -459,7 +459,7 @@ define <4 x i32> @vec_sub_const_add_const_extrause(<4 x i32> %arg) {
; X86-NEXT: .cfi_def_cfa_offset 32
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
-; X86-NEXT: calll vec_use
+; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
; X86-NEXT: addl $28, %esp
@@ -472,7 +472,7 @@ define <4 x i32> @vec_sub_const_add_const_extrause(<4 x i32> %arg) {
; X64-NEXT: .cfi_def_cfa_offset 32
; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
; X64-NEXT: psubd {{.*}}(%rip), %xmm0
-; X64-NEXT: callq vec_use
+; X64-NEXT: callq vec_use at PLT
; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; X64-NEXT: paddd {{.*}}(%rip), %xmm0
; X64-NEXT: addq $24, %rsp
@@ -528,7 +528,7 @@ define i32 @sub_const_sub_const_extrause(i32 %arg) {
; X86-NEXT: leal -8(%esi), %eax
; X86-NEXT: pushl %eax
; X86-NEXT: .cfi_adjust_cfa_offset 4
-; X86-NEXT: calll use
+; X86-NEXT: calll use at PLT
; X86-NEXT: addl $4, %esp
; X86-NEXT: .cfi_adjust_cfa_offset -4
; X86-NEXT: addl $-10, %esi
@@ -544,7 +544,7 @@ define i32 @sub_const_sub_const_extrause(i32 %arg) {
; X64-NEXT: .cfi_offset %rbx, -16
; X64-NEXT: movl %edi, %ebx
; X64-NEXT: leal -8(%rbx), %edi
-; X64-NEXT: callq use
+; X64-NEXT: callq use at PLT
; X64-NEXT: leal -10(%rbx), %eax
; X64-NEXT: popq %rbx
; X64-NEXT: .cfi_def_cfa_offset 8
@@ -577,7 +577,7 @@ define <4 x i32> @vec_sub_const_sub_const_extrause(<4 x i32> %arg) {
; X86-NEXT: .cfi_def_cfa_offset 32
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
-; X86-NEXT: calll vec_use
+; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
; X86-NEXT: addl $28, %esp
@@ -590,7 +590,7 @@ define <4 x i32> @vec_sub_const_sub_const_extrause(<4 x i32> %arg) {
; X64-NEXT: .cfi_def_cfa_offset 32
; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
; X64-NEXT: psubd {{.*}}(%rip), %xmm0
-; X64-NEXT: callq vec_use
+; X64-NEXT: callq vec_use at PLT
; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; X64-NEXT: psubd {{.*}}(%rip), %xmm0
; X64-NEXT: addq $24, %rsp
@@ -646,7 +646,7 @@ define i32 @sub_const_const_sub_extrause(i32 %arg) {
; X86-NEXT: leal -8(%esi), %eax
; X86-NEXT: pushl %eax
; X86-NEXT: .cfi_adjust_cfa_offset 4
-; X86-NEXT: calll use
+; X86-NEXT: calll use at PLT
; X86-NEXT: addl $4, %esp
; X86-NEXT: .cfi_adjust_cfa_offset -4
; X86-NEXT: movl $10, %eax
@@ -662,7 +662,7 @@ define i32 @sub_const_const_sub_extrause(i32 %arg) {
; X64-NEXT: .cfi_offset %rbx, -16
; X64-NEXT: movl %edi, %ebx
; X64-NEXT: leal -8(%rbx), %edi
-; X64-NEXT: callq use
+; X64-NEXT: callq use at PLT
; X64-NEXT: movl $10, %eax
; X64-NEXT: subl %ebx, %eax
; X64-NEXT: popq %rbx
@@ -700,7 +700,7 @@ define <4 x i32> @vec_sub_const_const_sub_extrause(<4 x i32> %arg) {
; X86-NEXT: .cfi_def_cfa_offset 32
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
-; X86-NEXT: calll vec_use
+; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqa {{.*#+}} xmm0 = [2,2,2,2]
; X86-NEXT: movdqu (%esp), %xmm1 # 16-byte Reload
; X86-NEXT: psubd %xmm1, %xmm0
@@ -714,7 +714,7 @@ define <4 x i32> @vec_sub_const_const_sub_extrause(<4 x i32> %arg) {
; X64-NEXT: .cfi_def_cfa_offset 32
; X64-NEXT: psubd {{.*}}(%rip), %xmm0
; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
-; X64-NEXT: callq vec_use
+; X64-NEXT: callq vec_use at PLT
; X64-NEXT: movdqa {{.*#+}} xmm0 = [2,2,2,2]
; X64-NEXT: psubd (%rsp), %xmm0 # 16-byte Folded Reload
; X64-NEXT: addq $24, %rsp
@@ -775,7 +775,7 @@ define i32 @const_sub_add_const_extrause(i32 %arg) {
; X86-NEXT: subl %esi, %eax
; X86-NEXT: pushl %eax
; X86-NEXT: .cfi_adjust_cfa_offset 4
-; X86-NEXT: calll use
+; X86-NEXT: calll use at PLT
; X86-NEXT: addl $4, %esp
; X86-NEXT: .cfi_adjust_cfa_offset -4
; X86-NEXT: movl $10, %eax
@@ -792,7 +792,7 @@ define i32 @const_sub_add_const_extrause(i32 %arg) {
; X64-NEXT: movl %edi, %ebx
; X64-NEXT: movl $8, %edi
; X64-NEXT: subl %ebx, %edi
-; X64-NEXT: callq use
+; X64-NEXT: callq use at PLT
; X64-NEXT: movl $10, %eax
; X64-NEXT: subl %ebx, %eax
; X64-NEXT: popq %rbx
@@ -832,7 +832,7 @@ define <4 x i32> @vec_const_sub_add_const_extrause(<4 x i32> %arg) {
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8]
; X86-NEXT: psubd %xmm1, %xmm0
-; X86-NEXT: calll vec_use
+; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqa {{.*#+}} xmm0 = [10,10,10,10]
; X86-NEXT: movdqu (%esp), %xmm1 # 16-byte Reload
; X86-NEXT: psubd %xmm1, %xmm0
@@ -848,7 +848,7 @@ define <4 x i32> @vec_const_sub_add_const_extrause(<4 x i32> %arg) {
; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
; X64-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8]
; X64-NEXT: psubd %xmm1, %xmm0
-; X64-NEXT: callq vec_use
+; X64-NEXT: callq vec_use at PLT
; X64-NEXT: movdqa {{.*#+}} xmm0 = [10,10,10,10]
; X64-NEXT: psubd (%rsp), %xmm0 # 16-byte Folded Reload
; X64-NEXT: addq $24, %rsp
@@ -909,7 +909,7 @@ define i32 @const_sub_sub_const_extrause(i32 %arg) {
; X86-NEXT: subl %esi, %eax
; X86-NEXT: pushl %eax
; X86-NEXT: .cfi_adjust_cfa_offset 4
-; X86-NEXT: calll use
+; X86-NEXT: calll use at PLT
; X86-NEXT: addl $4, %esp
; X86-NEXT: .cfi_adjust_cfa_offset -4
; X86-NEXT: movl $6, %eax
@@ -926,7 +926,7 @@ define i32 @const_sub_sub_const_extrause(i32 %arg) {
; X64-NEXT: movl %edi, %ebx
; X64-NEXT: movl $8, %edi
; X64-NEXT: subl %ebx, %edi
-; X64-NEXT: callq use
+; X64-NEXT: callq use at PLT
; X64-NEXT: movl $6, %eax
; X64-NEXT: subl %ebx, %eax
; X64-NEXT: popq %rbx
@@ -966,7 +966,7 @@ define <4 x i32> @vec_const_sub_sub_const_extrause(<4 x i32> %arg) {
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8]
; X86-NEXT: psubd %xmm1, %xmm0
-; X86-NEXT: calll vec_use
+; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqa {{.*#+}} xmm0 = [6,6,6,6]
; X86-NEXT: movdqu (%esp), %xmm1 # 16-byte Reload
; X86-NEXT: psubd %xmm1, %xmm0
@@ -982,7 +982,7 @@ define <4 x i32> @vec_const_sub_sub_const_extrause(<4 x i32> %arg) {
; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
; X64-NEXT: movdqa {{.*#+}} xmm0 = [8,8,8,8]
; X64-NEXT: psubd %xmm1, %xmm0
-; X64-NEXT: callq vec_use
+; X64-NEXT: callq vec_use at PLT
; X64-NEXT: movdqa {{.*#+}} xmm0 = [6,6,6,6]
; X64-NEXT: psubd (%rsp), %xmm0 # 16-byte Folded Reload
; X64-NEXT: addq $24, %rsp
@@ -1042,7 +1042,7 @@ define i32 @const_sub_const_sub_extrause(i32 %arg) {
; X86-NEXT: subl {{[0-9]+}}(%esp), %esi
; X86-NEXT: pushl %esi
; X86-NEXT: .cfi_adjust_cfa_offset 4
-; X86-NEXT: calll use
+; X86-NEXT: calll use at PLT
; X86-NEXT: addl $4, %esp
; X86-NEXT: .cfi_adjust_cfa_offset -4
; X86-NEXT: movl $2, %eax
@@ -1059,7 +1059,7 @@ define i32 @const_sub_const_sub_extrause(i32 %arg) {
; X64-NEXT: movl $8, %ebx
; X64-NEXT: subl %edi, %ebx
; X64-NEXT: movl %ebx, %edi
-; X64-NEXT: callq use
+; X64-NEXT: callq use at PLT
; X64-NEXT: movl $2, %eax
; X64-NEXT: subl %ebx, %eax
; X64-NEXT: popq %rbx
@@ -1095,7 +1095,7 @@ define <4 x i32> @vec_const_sub_const_sub_extrause(<4 x i32> %arg) {
; X86-NEXT: psubd %xmm0, %xmm1
; X86-NEXT: movdqu %xmm1, (%esp) # 16-byte Spill
; X86-NEXT: movdqa %xmm1, %xmm0
-; X86-NEXT: calll vec_use
+; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqa {{.*#+}} xmm0 = [2,2,2,2]
; X86-NEXT: movdqu (%esp), %xmm1 # 16-byte Reload
; X86-NEXT: psubd %xmm1, %xmm0
@@ -1111,7 +1111,7 @@ define <4 x i32> @vec_const_sub_const_sub_extrause(<4 x i32> %arg) {
; X64-NEXT: psubd %xmm0, %xmm1
; X64-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
; X64-NEXT: movdqa %xmm1, %xmm0
-; X64-NEXT: callq vec_use
+; X64-NEXT: callq vec_use at PLT
; X64-NEXT: movdqa {{.*#+}} xmm0 = [2,2,2,2]
; X64-NEXT: psubd (%rsp), %xmm0 # 16-byte Folded Reload
; X64-NEXT: addq $24, %rsp
diff --git a/llvm/test/CodeGen/X86/avx-cmp.ll b/llvm/test/CodeGen/X86/avx-cmp.ll
index e564cf162ace..3398fcd7cc10 100644
--- a/llvm/test/CodeGen/X86/avx-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx-cmp.ll
@@ -54,7 +54,7 @@ define void @render(double %a0) nounwind {
; CHECK-NEXT: jnp .LBB2_2
; CHECK-NEXT: .LBB2_5: # %if.then
; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
-; CHECK-NEXT: callq scale
+; CHECK-NEXT: callq scale at PLT
; CHECK-NEXT: jmp .LBB2_2
; CHECK-NEXT: .LBB2_6: # %for.end52
; CHECK-NEXT: addq $8, %rsp
diff --git a/llvm/test/CodeGen/X86/fmf-flags.ll b/llvm/test/CodeGen/X86/fmf-flags.ll
index 835ec72ff591..c8a85bdd390c 100644
--- a/llvm/test/CodeGen/X86/fmf-flags.ll
+++ b/llvm/test/CodeGen/X86/fmf-flags.ll
@@ -112,9 +112,9 @@ define dso_local float @div_arcp_by_const(half %x) {
; X64-NEXT: pushq %rax
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: movzwl %di, %edi
-; X64-NEXT: callq __gnu_h2f_ieee
+; X64-NEXT: callq __gnu_h2f_ieee at PLT
; X64-NEXT: mulss {{.*}}(%rip), %xmm0
-; X64-NEXT: callq __gnu_f2h_ieee
+; X64-NEXT: callq __gnu_f2h_ieee at PLT
; X64-NEXT: movzwl %ax, %edi
; X64-NEXT: popq %rax
; X64-NEXT: .cfi_def_cfa_offset 8
diff --git a/llvm/test/CodeGen/X86/fp-cvt.ll b/llvm/test/CodeGen/X86/fp-cvt.ll
index c8e6a95bcbac..a7e20c5e8c5e 100644
--- a/llvm/test/CodeGen/X86/fp-cvt.ll
+++ b/llvm/test/CodeGen/X86/fp-cvt.ll
@@ -896,7 +896,7 @@ define x86_fp80 @floor_fp80(x86_fp80 %a0) nounwind {
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-NEXT: fstpt (%rsp)
-; X64-NEXT: callq floorl
+; X64-NEXT: callq floorl at PLT
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = call x86_fp80 @llvm.floor.f80(x86_fp80 %a0)
@@ -919,7 +919,7 @@ define x86_fp80 @floor_fp80_ld(x86_fp80 *%a0) nounwind {
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt (%rdi)
; X64-NEXT: fstpt (%rsp)
-; X64-NEXT: callq floorl
+; X64-NEXT: callq floorl at PLT
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
@@ -948,7 +948,7 @@ define x86_fp80 @ceil_fp80(x86_fp80 %a0) nounwind {
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-NEXT: fstpt (%rsp)
-; X64-NEXT: callq ceill
+; X64-NEXT: callq ceill at PLT
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = call x86_fp80 @llvm.ceil.f80(x86_fp80 %a0)
@@ -971,7 +971,7 @@ define x86_fp80 @ceil_fp80_ld(x86_fp80 *%a0) nounwind {
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt (%rdi)
; X64-NEXT: fstpt (%rsp)
-; X64-NEXT: callq ceill
+; X64-NEXT: callq ceill at PLT
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
@@ -1000,7 +1000,7 @@ define x86_fp80 @trunc_fp80(x86_fp80 %a0) nounwind {
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-NEXT: fstpt (%rsp)
-; X64-NEXT: callq truncl
+; X64-NEXT: callq truncl at PLT
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = call x86_fp80 @llvm.trunc.f80(x86_fp80 %a0)
@@ -1023,7 +1023,7 @@ define x86_fp80 @trunc_fp80_ld(x86_fp80 *%a0) nounwind {
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt (%rdi)
; X64-NEXT: fstpt (%rsp)
-; X64-NEXT: callq truncl
+; X64-NEXT: callq truncl at PLT
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
@@ -1052,7 +1052,7 @@ define x86_fp80 @rint_fp80(x86_fp80 %a0) nounwind {
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-NEXT: fstpt (%rsp)
-; X64-NEXT: callq rintl
+; X64-NEXT: callq rintl at PLT
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = call x86_fp80 @llvm.rint.f80(x86_fp80 %a0)
@@ -1075,7 +1075,7 @@ define x86_fp80 @rint_fp80_ld(x86_fp80 *%a0) nounwind {
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt (%rdi)
; X64-NEXT: fstpt (%rsp)
-; X64-NEXT: callq rintl
+; X64-NEXT: callq rintl at PLT
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
@@ -1104,7 +1104,7 @@ define x86_fp80 @roundeven_fp80(x86_fp80 %a0) nounwind {
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt {{[0-9]+}}(%rsp)
; X64-NEXT: fstpt (%rsp)
-; X64-NEXT: callq roundevenl
+; X64-NEXT: callq roundevenl at PLT
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = call x86_fp80 @llvm.roundeven.f80(x86_fp80 %a0)
@@ -1127,7 +1127,7 @@ define x86_fp80 @roundeven_fp80_ld(x86_fp80 *%a0) nounwind {
; X64-NEXT: subq $24, %rsp
; X64-NEXT: fldt (%rdi)
; X64-NEXT: fstpt (%rsp)
-; X64-NEXT: callq roundevenl
+; X64-NEXT: callq roundevenl at PLT
; X64-NEXT: addq $24, %rsp
; X64-NEXT: retq
%1 = load x86_fp80, x86_fp80 *%a0
diff --git a/llvm/test/CodeGen/X86/fp-intrinsics.ll b/llvm/test/CodeGen/X86/fp-intrinsics.ll
index abe88f1ca233..7fe25c97d2c2 100644
--- a/llvm/test/CodeGen/X86/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/fp-intrinsics.ll
@@ -326,7 +326,7 @@ define double @f6() #0 {
; SSE-NEXT: .cfi_def_cfa_offset 16
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; SSE-NEXT: callq pow
+; SSE-NEXT: callq pow at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -337,7 +337,7 @@ define double @f6() #0 {
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX-NEXT: callq pow
+; AVX-NEXT: callq pow at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -382,7 +382,7 @@ define double @f7() #0 {
; SSE-NEXT: .cfi_def_cfa_offset 16
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movl $3, %edi
-; SSE-NEXT: callq __powidf2
+; SSE-NEXT: callq __powidf2 at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -393,7 +393,7 @@ define double @f7() #0 {
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: movl $3, %edi
-; AVX-NEXT: callq __powidf2
+; AVX-NEXT: callq __powidf2 at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -435,7 +435,7 @@ define double @f8() #0 {
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT: callq sin
+; SSE-NEXT: callq sin at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -445,7 +445,7 @@ define double @f8() #0 {
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: callq sin
+; AVX-NEXT: callq sin at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -486,7 +486,7 @@ define double @f9() #0 {
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT: callq cos
+; SSE-NEXT: callq cos at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -496,7 +496,7 @@ define double @f9() #0 {
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: callq cos
+; AVX-NEXT: callq cos at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -537,7 +537,7 @@ define double @f10() #0 {
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT: callq exp
+; SSE-NEXT: callq exp at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -547,7 +547,7 @@ define double @f10() #0 {
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: callq exp
+; AVX-NEXT: callq exp at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -588,7 +588,7 @@ define double @f11() #0 {
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT: callq exp2
+; SSE-NEXT: callq exp2 at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -598,7 +598,7 @@ define double @f11() #0 {
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: callq exp2
+; AVX-NEXT: callq exp2 at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -639,7 +639,7 @@ define double @f12() #0 {
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT: callq log
+; SSE-NEXT: callq log at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -649,7 +649,7 @@ define double @f12() #0 {
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: callq log
+; AVX-NEXT: callq log at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -690,7 +690,7 @@ define double @f13() #0 {
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT: callq log10
+; SSE-NEXT: callq log10 at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -700,7 +700,7 @@ define double @f13() #0 {
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: callq log10
+; AVX-NEXT: callq log10 at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -741,7 +741,7 @@ define double @f14() #0 {
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT: callq log2
+; SSE-NEXT: callq log2 at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -751,7 +751,7 @@ define double @f14() #0 {
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: callq log2
+; AVX-NEXT: callq log2 at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -792,7 +792,7 @@ define double @f15() #0 {
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT: callq rint
+; SSE-NEXT: callq rint at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -840,7 +840,7 @@ define double @f16() #0 {
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE-NEXT: callq nearbyint
+; SSE-NEXT: callq nearbyint at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -892,7 +892,7 @@ define double @f19() #0 {
; SSE-NEXT: .cfi_def_cfa_offset 16
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; SSE-NEXT: callq fmod
+; SSE-NEXT: callq fmod at PLT
; SSE-NEXT: popq %rax
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -903,7 +903,7 @@ define double @f19() #0 {
; AVX-NEXT: .cfi_def_cfa_offset 16
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX-NEXT: callq fmod
+; AVX-NEXT: callq fmod at PLT
; AVX-NEXT: popq %rax
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -1172,14 +1172,14 @@ define i128 @f20s128(double %x) nounwind strictfp {
; SSE-LABEL: f20s128:
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
-; SSE-NEXT: callq __fixdfti
+; SSE-NEXT: callq __fixdfti at PLT
; SSE-NEXT: popq %rcx
; SSE-NEXT: retq
;
; AVX-LABEL: f20s128:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
-; AVX-NEXT: callq __fixdfti
+; AVX-NEXT: callq __fixdfti at PLT
; AVX-NEXT: popq %rcx
; AVX-NEXT: retq
entry:
@@ -1517,14 +1517,14 @@ define i128 @f20u128(double %x) nounwind strictfp {
; SSE-LABEL: f20u128:
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
-; SSE-NEXT: callq __fixunsdfti
+; SSE-NEXT: callq __fixunsdfti at PLT
; SSE-NEXT: popq %rcx
; SSE-NEXT: retq
;
; AVX-LABEL: f20u128:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
-; AVX-NEXT: callq __fixunsdfti
+; AVX-NEXT: callq __fixunsdfti at PLT
; AVX-NEXT: popq %rcx
; AVX-NEXT: retq
entry:
@@ -1644,7 +1644,7 @@ define i32 @f23(double %x) #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: callq lrint
+; SSE-NEXT: callq lrint at PLT
; SSE-NEXT: popq %rcx
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -1653,7 +1653,7 @@ define i32 @f23(double %x) #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: callq lrint
+; AVX-NEXT: callq lrint at PLT
; AVX-NEXT: popq %rcx
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -1692,7 +1692,7 @@ define i32 @f24(float %x) #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: callq lrintf
+; SSE-NEXT: callq lrintf at PLT
; SSE-NEXT: popq %rcx
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -1701,7 +1701,7 @@ define i32 @f24(float %x) #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: callq lrintf
+; AVX-NEXT: callq lrintf at PLT
; AVX-NEXT: popq %rcx
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -1740,7 +1740,7 @@ define i64 @f25(double %x) #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: callq llrint
+; SSE-NEXT: callq llrint at PLT
; SSE-NEXT: popq %rcx
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -1749,7 +1749,7 @@ define i64 @f25(double %x) #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: callq llrint
+; AVX-NEXT: callq llrint at PLT
; AVX-NEXT: popq %rcx
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -1788,7 +1788,7 @@ define i64 @f26(float %x) #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: callq llrintf
+; SSE-NEXT: callq llrintf at PLT
; SSE-NEXT: popq %rcx
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -1797,7 +1797,7 @@ define i64 @f26(float %x) #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: callq llrintf
+; AVX-NEXT: callq llrintf at PLT
; AVX-NEXT: popq %rcx
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -1836,7 +1836,7 @@ define i32 @f27(double %x) #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: callq lround
+; SSE-NEXT: callq lround at PLT
; SSE-NEXT: popq %rcx
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -1845,7 +1845,7 @@ define i32 @f27(double %x) #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: callq lround
+; AVX-NEXT: callq lround at PLT
; AVX-NEXT: popq %rcx
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -1883,7 +1883,7 @@ define i32 @f28(float %x) #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: callq lroundf
+; SSE-NEXT: callq lroundf at PLT
; SSE-NEXT: popq %rcx
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -1892,7 +1892,7 @@ define i32 @f28(float %x) #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: callq lroundf
+; AVX-NEXT: callq lroundf at PLT
; AVX-NEXT: popq %rcx
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -1930,7 +1930,7 @@ define i64 @f29(double %x) #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: callq llround
+; SSE-NEXT: callq llround at PLT
; SSE-NEXT: popq %rcx
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -1939,7 +1939,7 @@ define i64 @f29(double %x) #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: callq llround
+; AVX-NEXT: callq llround at PLT
; AVX-NEXT: popq %rcx
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
@@ -1977,7 +1977,7 @@ define i64 @f30(float %x) #0 {
; SSE: # %bb.0: # %entry
; SSE-NEXT: pushq %rax
; SSE-NEXT: .cfi_def_cfa_offset 16
-; SSE-NEXT: callq llroundf
+; SSE-NEXT: callq llroundf at PLT
; SSE-NEXT: popq %rcx
; SSE-NEXT: .cfi_def_cfa_offset 8
; SSE-NEXT: retq
@@ -1986,7 +1986,7 @@ define i64 @f30(float %x) #0 {
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq %rax
; AVX-NEXT: .cfi_def_cfa_offset 16
-; AVX-NEXT: callq llroundf
+; AVX-NEXT: callq llroundf at PLT
; AVX-NEXT: popq %rcx
; AVX-NEXT: .cfi_def_cfa_offset 8
; AVX-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/fp128-cast.ll b/llvm/test/CodeGen/X86/fp128-cast.ll
index 6093095d51d0..af269449486f 100644
--- a/llvm/test/CodeGen/X86/fp128-cast.ll
+++ b/llvm/test/CodeGen/X86/fp128-cast.ll
@@ -26,7 +26,7 @@ define dso_local void @TestFPExtF32_F128() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X64-SSE-NEXT: callq __extendsftf2
+; X64-SSE-NEXT: callq __extendsftf2 at PLT
; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -57,7 +57,7 @@ define dso_local void @TestFPExtF32_F128() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X64-AVX-NEXT: callq __extendsftf2
+; X64-AVX-NEXT: callq __extendsftf2 at PLT
; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -73,7 +73,7 @@ define dso_local void @TestFPExtF64_F128() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X64-SSE-NEXT: callq __extenddftf2
+; X64-SSE-NEXT: callq __extenddftf2 at PLT
; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -104,7 +104,7 @@ define dso_local void @TestFPExtF64_F128() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-AVX-NEXT: callq __extenddftf2
+; X64-AVX-NEXT: callq __extenddftf2 at PLT
; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -121,7 +121,7 @@ define dso_local void @TestFPExtF80_F128() nounwind {
; X64-SSE-NEXT: subq $24, %rsp
; X64-SSE-NEXT: fldt {{.*}}(%rip)
; X64-SSE-NEXT: fstpt (%rsp)
-; X64-SSE-NEXT: callq __extendxftf2
+; X64-SSE-NEXT: callq __extendxftf2 at PLT
; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
; X64-SSE-NEXT: addq $24, %rsp
; X64-SSE-NEXT: retq
@@ -153,7 +153,7 @@ define dso_local void @TestFPExtF80_F128() nounwind {
; X64-AVX-NEXT: subq $24, %rsp
; X64-AVX-NEXT: fldt {{.*}}(%rip)
; X64-AVX-NEXT: fstpt (%rsp)
-; X64-AVX-NEXT: callq __extendxftf2
+; X64-AVX-NEXT: callq __extendxftf2 at PLT
; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
; X64-AVX-NEXT: addq $24, %rsp
; X64-AVX-NEXT: retq
@@ -169,7 +169,7 @@ define dso_local void @TestFPToSIF128_I16() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: callq __fixtfsi
+; X64-SSE-NEXT: callq __fixtfsi at PLT
; X64-SSE-NEXT: movw %ax, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -191,7 +191,7 @@ define dso_local void @TestFPToSIF128_I16() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
-; X64-AVX-NEXT: callq __fixtfsi
+; X64-AVX-NEXT: callq __fixtfsi at PLT
; X64-AVX-NEXT: movw %ax, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -207,7 +207,7 @@ define dso_local void @TestFPToUIF128_I16() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: callq __fixtfsi
+; X64-SSE-NEXT: callq __fixtfsi at PLT
; X64-SSE-NEXT: movw %ax, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -229,7 +229,7 @@ define dso_local void @TestFPToUIF128_I16() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
-; X64-AVX-NEXT: callq __fixtfsi
+; X64-AVX-NEXT: callq __fixtfsi at PLT
; X64-AVX-NEXT: movw %ax, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -245,7 +245,7 @@ define dso_local void @TestFPToSIF128_I32() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: callq __fixtfsi
+; X64-SSE-NEXT: callq __fixtfsi at PLT
; X64-SSE-NEXT: movl %eax, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -267,7 +267,7 @@ define dso_local void @TestFPToSIF128_I32() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
-; X64-AVX-NEXT: callq __fixtfsi
+; X64-AVX-NEXT: callq __fixtfsi at PLT
; X64-AVX-NEXT: movl %eax, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -283,7 +283,7 @@ define dso_local void @TestFPToUIF128_U32() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: callq __fixunstfsi
+; X64-SSE-NEXT: callq __fixunstfsi at PLT
; X64-SSE-NEXT: movl %eax, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -305,7 +305,7 @@ define dso_local void @TestFPToUIF128_U32() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
-; X64-AVX-NEXT: callq __fixunstfsi
+; X64-AVX-NEXT: callq __fixunstfsi at PLT
; X64-AVX-NEXT: movl %eax, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -321,7 +321,7 @@ define dso_local void @TestFPToSIF128_I64() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: callq __fixtfsi
+; X64-SSE-NEXT: callq __fixtfsi at PLT
; X64-SSE-NEXT: cltq
; X64-SSE-NEXT: movq %rax, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
@@ -346,7 +346,7 @@ define dso_local void @TestFPToSIF128_I64() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
-; X64-AVX-NEXT: callq __fixtfsi
+; X64-AVX-NEXT: callq __fixtfsi at PLT
; X64-AVX-NEXT: cltq
; X64-AVX-NEXT: movq %rax, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
@@ -364,7 +364,7 @@ define dso_local void @TestFPToUIF128_U64() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: callq __fixunstfsi
+; X64-SSE-NEXT: callq __fixunstfsi at PLT
; X64-SSE-NEXT: movl %eax, %eax
; X64-SSE-NEXT: movq %rax, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
@@ -388,7 +388,7 @@ define dso_local void @TestFPToUIF128_U64() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
-; X64-AVX-NEXT: callq __fixunstfsi
+; X64-AVX-NEXT: callq __fixunstfsi at PLT
; X64-AVX-NEXT: movl %eax, %eax
; X64-AVX-NEXT: movq %rax, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
@@ -406,7 +406,7 @@ define dso_local void @TestFPToSIF128_I128() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: callq __fixtfti
+; X64-SSE-NEXT: callq __fixtfti at PLT
; X64-SSE-NEXT: movq %rdx, vi128+{{.*}}(%rip)
; X64-SSE-NEXT: movq %rax, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
@@ -440,7 +440,7 @@ define dso_local void @TestFPToSIF128_I128() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
-; X64-AVX-NEXT: callq __fixtfti
+; X64-AVX-NEXT: callq __fixtfti at PLT
; X64-AVX-NEXT: movq %rdx, vi128+{{.*}}(%rip)
; X64-AVX-NEXT: movq %rax, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
@@ -457,7 +457,7 @@ define dso_local void @TestFPToUIF128_U128() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: callq __fixunstfti
+; X64-SSE-NEXT: callq __fixunstfti at PLT
; X64-SSE-NEXT: movq %rdx, vu128+{{.*}}(%rip)
; X64-SSE-NEXT: movq %rax, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
@@ -491,7 +491,7 @@ define dso_local void @TestFPToUIF128_U128() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
-; X64-AVX-NEXT: callq __fixunstfti
+; X64-AVX-NEXT: callq __fixunstfti at PLT
; X64-AVX-NEXT: movq %rdx, vu128+{{.*}}(%rip)
; X64-AVX-NEXT: movq %rax, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
@@ -508,7 +508,7 @@ define dso_local void @TestFPTruncF128_F32() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: callq __trunctfsf2
+; X64-SSE-NEXT: callq __trunctfsf2 at PLT
; X64-SSE-NEXT: movss %xmm0, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -530,7 +530,7 @@ define dso_local void @TestFPTruncF128_F32() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
-; X64-AVX-NEXT: callq __trunctfsf2
+; X64-AVX-NEXT: callq __trunctfsf2 at PLT
; X64-AVX-NEXT: vmovss %xmm0, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -546,7 +546,7 @@ define dso_local void @TestFPTruncF128_F64() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: callq __trunctfdf2
+; X64-SSE-NEXT: callq __trunctfdf2 at PLT
; X64-SSE-NEXT: movsd %xmm0, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -568,7 +568,7 @@ define dso_local void @TestFPTruncF128_F64() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
-; X64-AVX-NEXT: callq __trunctfdf2
+; X64-AVX-NEXT: callq __trunctfdf2 at PLT
; X64-AVX-NEXT: vmovsd %xmm0, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -584,7 +584,7 @@ define dso_local void @TestFPTruncF128_F80() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: callq __trunctfxf2
+; X64-SSE-NEXT: callq __trunctfxf2 at PLT
; X64-SSE-NEXT: fstpt {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -606,7 +606,7 @@ define dso_local void @TestFPTruncF128_F80() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm0
-; X64-AVX-NEXT: callq __trunctfxf2
+; X64-AVX-NEXT: callq __trunctfxf2 at PLT
; X64-AVX-NEXT: fstpt {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -622,7 +622,7 @@ define dso_local void @TestSIToFPI16_F128() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movswl {{.*}}(%rip), %edi
-; X64-SSE-NEXT: callq __floatsitf
+; X64-SSE-NEXT: callq __floatsitf at PLT
; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -654,7 +654,7 @@ define dso_local void @TestSIToFPI16_F128() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: movswl {{.*}}(%rip), %edi
-; X64-AVX-NEXT: callq __floatsitf
+; X64-AVX-NEXT: callq __floatsitf at PLT
; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -670,7 +670,7 @@ define dso_local void @TestSIToFPU16_F128() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movzwl {{.*}}(%rip), %edi
-; X64-SSE-NEXT: callq __floatsitf
+; X64-SSE-NEXT: callq __floatsitf at PLT
; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -702,7 +702,7 @@ define dso_local void @TestSIToFPU16_F128() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: movzwl {{.*}}(%rip), %edi
-; X64-AVX-NEXT: callq __floatsitf
+; X64-AVX-NEXT: callq __floatsitf at PLT
; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -718,7 +718,7 @@ define dso_local void @TestSIToFPI32_F128() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movl {{.*}}(%rip), %edi
-; X64-SSE-NEXT: callq __floatsitf
+; X64-SSE-NEXT: callq __floatsitf at PLT
; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -748,7 +748,7 @@ define dso_local void @TestSIToFPI32_F128() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: movl {{.*}}(%rip), %edi
-; X64-AVX-NEXT: callq __floatsitf
+; X64-AVX-NEXT: callq __floatsitf at PLT
; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -764,7 +764,7 @@ define dso_local void @TestUIToFPU32_F128() #2 {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movl {{.*}}(%rip), %edi
-; X64-SSE-NEXT: callq __floatunsitf
+; X64-SSE-NEXT: callq __floatunsitf at PLT
; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -794,7 +794,7 @@ define dso_local void @TestUIToFPU32_F128() #2 {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: movl {{.*}}(%rip), %edi
-; X64-AVX-NEXT: callq __floatunsitf
+; X64-AVX-NEXT: callq __floatunsitf at PLT
; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -810,7 +810,7 @@ define dso_local void @TestSIToFPI64_F128() nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movq {{.*}}(%rip), %rdi
-; X64-SSE-NEXT: callq __floatditf
+; X64-SSE-NEXT: callq __floatditf at PLT
; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -841,7 +841,7 @@ define dso_local void @TestSIToFPI64_F128() nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: movq {{.*}}(%rip), %rdi
-; X64-AVX-NEXT: callq __floatditf
+; X64-AVX-NEXT: callq __floatditf at PLT
; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -857,7 +857,7 @@ define dso_local void @TestUIToFPU64_F128() #2 {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movq {{.*}}(%rip), %rdi
-; X64-SSE-NEXT: callq __floatunditf
+; X64-SSE-NEXT: callq __floatunditf at PLT
; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -888,7 +888,7 @@ define dso_local void @TestUIToFPU64_F128() #2 {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: movq {{.*}}(%rip), %rdi
-; X64-AVX-NEXT: callq __floatunditf
+; X64-AVX-NEXT: callq __floatunditf at PLT
; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -905,7 +905,7 @@ define dso_local void @TestSIToFPI128_F128() nounwind {
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movq {{.*}}(%rip), %rdi
; X64-SSE-NEXT: movq vi128+{{.*}}(%rip), %rsi
-; X64-SSE-NEXT: callq __floattitf
+; X64-SSE-NEXT: callq __floattitf at PLT
; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -939,7 +939,7 @@ define dso_local void @TestSIToFPI128_F128() nounwind {
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: movq {{.*}}(%rip), %rdi
; X64-AVX-NEXT: movq vi128+{{.*}}(%rip), %rsi
-; X64-AVX-NEXT: callq __floattitf
+; X64-AVX-NEXT: callq __floattitf at PLT
; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -956,7 +956,7 @@ define dso_local void @TestUIToFPU128_F128() #2 {
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movq {{.*}}(%rip), %rdi
; X64-SSE-NEXT: movq vu128+{{.*}}(%rip), %rsi
-; X64-SSE-NEXT: callq __floatuntitf
+; X64-SSE-NEXT: callq __floatuntitf at PLT
; X64-SSE-NEXT: movaps %xmm0, {{.*}}(%rip)
; X64-SSE-NEXT: popq %rax
; X64-SSE-NEXT: retq
@@ -990,7 +990,7 @@ define dso_local void @TestUIToFPU128_F128() #2 {
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: movq {{.*}}(%rip), %rdi
; X64-AVX-NEXT: movq vu128+{{.*}}(%rip), %rsi
-; X64-AVX-NEXT: callq __floatuntitf
+; X64-AVX-NEXT: callq __floatuntitf at PLT
; X64-AVX-NEXT: vmovaps %xmm0, {{.*}}(%rip)
; X64-AVX-NEXT: popq %rax
; X64-AVX-NEXT: retq
@@ -1006,7 +1006,7 @@ define dso_local i32 @TestConst128(fp128 %v) nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: movaps {{.*}}(%rip), %xmm1
-; X64-SSE-NEXT: callq __gttf2
+; X64-SSE-NEXT: callq __gttf2 at PLT
; X64-SSE-NEXT: xorl %ecx, %ecx
; X64-SSE-NEXT: testl %eax, %eax
; X64-SSE-NEXT: setg %cl
@@ -1038,7 +1038,7 @@ define dso_local i32 @TestConst128(fp128 %v) nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vmovaps {{.*}}(%rip), %xmm1
-; X64-AVX-NEXT: callq __gttf2
+; X64-AVX-NEXT: callq __gttf2 at PLT
; X64-AVX-NEXT: xorl %ecx, %ecx
; X64-AVX-NEXT: testl %eax, %eax
; X64-AVX-NEXT: setg %cl
@@ -1057,7 +1057,7 @@ define dso_local i32 @TestConst128Zero(fp128 %v) nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: pushq %rax
; X64-SSE-NEXT: xorps %xmm1, %xmm1
-; X64-SSE-NEXT: callq __gttf2
+; X64-SSE-NEXT: callq __gttf2 at PLT
; X64-SSE-NEXT: xorl %ecx, %ecx
; X64-SSE-NEXT: testl %eax, %eax
; X64-SSE-NEXT: setg %cl
@@ -1089,7 +1089,7 @@ define dso_local i32 @TestConst128Zero(fp128 %v) nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: pushq %rax
; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X64-AVX-NEXT: callq __gttf2
+; X64-AVX-NEXT: callq __gttf2 at PLT
; X64-AVX-NEXT: xorl %ecx, %ecx
; X64-AVX-NEXT: testl %eax, %eax
; X64-AVX-NEXT: setg %cl
@@ -1121,7 +1121,7 @@ define dso_local i32 @TestBits128(fp128 %ld) nounwind {
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: subq $24, %rsp
; X64-SSE-NEXT: movaps %xmm0, %xmm1
-; X64-SSE-NEXT: callq __multf3
+; X64-SSE-NEXT: callq __multf3 at PLT
; X64-SSE-NEXT: movaps %xmm0, (%rsp)
; X64-SSE-NEXT: movq (%rsp), %rcx
; X64-SSE-NEXT: movq %rcx, %rdx
@@ -1167,7 +1167,7 @@ define dso_local i32 @TestBits128(fp128 %ld) nounwind {
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: subq $24, %rsp
; X64-AVX-NEXT: vmovaps %xmm0, %xmm1
-; X64-AVX-NEXT: callq __multf3
+; X64-AVX-NEXT: callq __multf3 at PLT
; X64-AVX-NEXT: vmovaps %xmm0, (%rsp)
; X64-AVX-NEXT: movq (%rsp), %rcx
; X64-AVX-NEXT: movq %rcx, %rdx
@@ -1258,10 +1258,10 @@ define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind {
; X64-SSE-NEXT: jl .LBB26_2
; X64-SSE-NEXT: # %bb.1: # %if.then
; X64-SSE-NEXT: pushq %rax
-; X64-SSE-NEXT: callq __trunctfdf2
+; X64-SSE-NEXT: callq __trunctfdf2 at PLT
; X64-SSE-NEXT: andps {{.*}}(%rip), %xmm0
; X64-SSE-NEXT: orps {{.*}}(%rip), %xmm0
-; X64-SSE-NEXT: callq __extenddftf2
+; X64-SSE-NEXT: callq __extenddftf2 at PLT
; X64-SSE-NEXT: addq $8, %rsp
; X64-SSE-NEXT: .LBB26_2: # %cleanup
; X64-SSE-NEXT: retq
@@ -1322,12 +1322,12 @@ define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind {
; X64-AVX-NEXT: jl .LBB26_2
; X64-AVX-NEXT: # %bb.1: # %if.then
; X64-AVX-NEXT: pushq %rax
-; X64-AVX-NEXT: callq __trunctfdf2
+; X64-AVX-NEXT: callq __trunctfdf2 at PLT
; X64-AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vmovddup {{.*#+}} xmm1 = [+Inf,+Inf]
; X64-AVX-NEXT: # xmm1 = mem[0,0]
; X64-AVX-NEXT: vorps %xmm0, %xmm1, %xmm0
-; X64-AVX-NEXT: callq __extenddftf2
+; X64-AVX-NEXT: callq __extenddftf2 at PLT
; X64-AVX-NEXT: addq $8, %rsp
; X64-AVX-NEXT: .LBB26_2: # %cleanup
; X64-AVX-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/fp128-i128.ll b/llvm/test/CodeGen/X86/fp128-i128.ll
index 90224de50338..6a70bc24fc6c 100644
--- a/llvm/test/CodeGen/X86/fp128-i128.ll
+++ b/llvm/test/CodeGen/X86/fp128-i128.ll
@@ -139,7 +139,7 @@ define fp128 @TestI128_1(fp128 %x) #0 {
; SSE-NEXT: movq %rcx, (%rsp)
; SSE-NEXT: movaps (%rsp), %xmm0
; SSE-NEXT: movaps {{.*}}(%rip), %xmm1
-; SSE-NEXT: callq __lttf2
+; SSE-NEXT: callq __lttf2 at PLT
; SSE-NEXT: xorl %ecx, %ecx
; SSE-NEXT: testl %eax, %eax
; SSE-NEXT: sets %cl
@@ -159,7 +159,7 @@ define fp128 @TestI128_1(fp128 %x) #0 {
; AVX-NEXT: movq %rcx, (%rsp)
; AVX-NEXT: vmovaps (%rsp), %xmm0
; AVX-NEXT: vmovaps {{.*}}(%rip), %xmm1
-; AVX-NEXT: callq __lttf2
+; AVX-NEXT: callq __lttf2 at PLT
; AVX-NEXT: xorl %ecx, %ecx
; AVX-NEXT: testl %eax, %eax
; AVX-NEXT: sets %cl
@@ -237,7 +237,7 @@ define fp128 @TestI128_3(fp128 %x, i32* nocapture readnone %ex) #0 {
; SSE-NEXT: jmp .LBB4_3
; SSE-NEXT: .LBB4_2: # %if.then
; SSE-NEXT: movaps {{.*}}(%rip), %xmm1
-; SSE-NEXT: callq __multf3
+; SSE-NEXT: callq __multf3 at PLT
; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; SSE-NEXT: movabsq $-9223090561878065153, %rdx # imm = 0x8000FFFFFFFFFFFF
@@ -264,7 +264,7 @@ define fp128 @TestI128_3(fp128 %x, i32* nocapture readnone %ex) #0 {
; AVX-NEXT: jmp .LBB4_3
; AVX-NEXT: .LBB4_2: # %if.then
; AVX-NEXT: vmovaps {{.*}}(%rip), %xmm1
-; AVX-NEXT: callq __multf3
+; AVX-NEXT: callq __multf3 at PLT
; AVX-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp)
; AVX-NEXT: movq {{[0-9]+}}(%rsp), %rcx
; AVX-NEXT: movabsq $-9223090561878065153, %rdx # imm = 0x8000FFFFFFFFFFFF
@@ -451,11 +451,11 @@ define dso_local void @TestCopySign({ fp128, fp128 }* noalias nocapture sret({ f
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
-; SSE-NEXT: callq __gttf2
+; SSE-NEXT: callq __gttf2 at PLT
; SSE-NEXT: movl %eax, %ebp
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: callq __subtf3
+; SSE-NEXT: callq __subtf3 at PLT
; SSE-NEXT: testl %ebp, %ebp
; SSE-NEXT: jle .LBB10_1
; SSE-NEXT: # %bb.2: # %if.then
@@ -488,11 +488,11 @@ define dso_local void @TestCopySign({ fp128, fp128 }* noalias nocapture sret({ f
; AVX-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm1
; AVX-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
-; AVX-NEXT: callq __gttf2
+; AVX-NEXT: callq __gttf2 at PLT
; AVX-NEXT: movl %eax, %ebp
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX-NEXT: vmovaps %xmm0, %xmm1
-; AVX-NEXT: callq __subtf3
+; AVX-NEXT: callq __subtf3 at PLT
; AVX-NEXT: testl %ebp, %ebp
; AVX-NEXT: jle .LBB10_1
; AVX-NEXT: # %bb.2: # %if.then
diff --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll
index 7789e7926041..d9629730fce2 100644
--- a/llvm/test/CodeGen/X86/half.ll
+++ b/llvm/test/CodeGen/X86/half.ll
@@ -103,7 +103,7 @@ define double @test_extend64(half* %addr) #0 {
; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rax
; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: cvtss2sd %xmm0, %xmm0
; CHECK-LIBCALL-NEXT: popq %rax
; CHECK-LIBCALL-NEXT: retq
@@ -135,7 +135,7 @@ define void @test_trunc32(float %in, half* %addr) #0 {
; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
-; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee at PLT
; CHECK-LIBCALL-NEXT: movw %ax, (%rbx)
; CHECK-LIBCALL-NEXT: popq %rbx
; CHECK-LIBCALL-NEXT: retq
@@ -168,7 +168,7 @@ define void @test_trunc64(double %in, half* %addr) #0 {
; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: movq %rdi, %rbx
-; CHECK-NEXT: callq __truncdfhf2
+; CHECK-NEXT: callq __truncdfhf2 at PLT
; CHECK-NEXT: movw %ax, (%rbx)
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: retq
@@ -195,7 +195,7 @@ define i64 @test_fptosi_i64(half* %p) #0 {
; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rax
; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: cvttss2si %xmm0, %rax
; CHECK-LIBCALL-NEXT: popq %rcx
; CHECK-LIBCALL-NEXT: retq
@@ -230,7 +230,7 @@ define void @test_sitofp_i64(i64 %a, half* %p) #0 {
; CHECK-LIBCALL-NEXT: pushq %rbx
; CHECK-LIBCALL-NEXT: movq %rsi, %rbx
; CHECK-LIBCALL-NEXT: cvtsi2ss %rdi, %xmm0
-; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee at PLT
; CHECK-LIBCALL-NEXT: movw %ax, (%rbx)
; CHECK-LIBCALL-NEXT: popq %rbx
; CHECK-LIBCALL-NEXT: retq
@@ -268,7 +268,7 @@ define i64 @test_fptoui_i64(half* %p) #0 {
; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rax
; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-LIBCALL-NEXT: movaps %xmm0, %xmm2
; CHECK-LIBCALL-NEXT: subss %xmm1, %xmm2
@@ -330,7 +330,7 @@ define void @test_uitofp_i64(i64 %a, half* %p) #0 {
; CHECK-LIBCALL-NEXT: cvtsi2ss %rdi, %xmm0
; CHECK-LIBCALL-NEXT: addss %xmm0, %xmm0
; CHECK-LIBCALL-NEXT: .LBB10_3:
-; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee at PLT
; CHECK-LIBCALL-NEXT: movw %ax, (%rbx)
; CHECK-LIBCALL-NEXT: popq %rbx
; CHECK-LIBCALL-NEXT: retq
@@ -389,21 +389,21 @@ define <4 x float> @test_extend32_vec4(<4 x half>* %p) #0 {
; CHECK-LIBCALL-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0
; CHECK-LIBCALL-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-LIBCALL-NEXT: pextrw $1, %xmm0, %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-LIBCALL-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-LIBCALL-NEXT: pextrw $0, %xmm0, %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-LIBCALL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-LIBCALL-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-LIBCALL-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-LIBCALL-NEXT: pextrw $1, %xmm0, %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-LIBCALL-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; CHECK-LIBCALL-NEXT: pextrw $0, %xmm0, %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; CHECK-LIBCALL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-LIBCALL-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
@@ -476,21 +476,21 @@ define <4 x double> @test_extend64_vec4(<4 x half>* %p) #0 {
; CHECK-LIBCALL-NEXT: movzwl 6(%rdi), %ebp
; CHECK-LIBCALL-NEXT: movzwl (%rdi), %ebx
; CHECK-LIBCALL-NEXT: movzwl 2(%rdi), %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: cvtss2sd %xmm0, %xmm0
; CHECK-LIBCALL-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-LIBCALL-NEXT: movl %ebx, %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: cvtss2sd %xmm0, %xmm0
; CHECK-LIBCALL-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
; CHECK-LIBCALL-NEXT: # xmm0 = xmm0[0],mem[0]
; CHECK-LIBCALL-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; CHECK-LIBCALL-NEXT: movl %ebp, %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: cvtss2sd %xmm0, %xmm0
; CHECK-LIBCALL-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; CHECK-LIBCALL-NEXT: movl %r14d, %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: cvtss2sd %xmm0, %xmm1
; CHECK-LIBCALL-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; CHECK-LIBCALL-NEXT: # xmm1 = xmm1[0],mem[0]
@@ -559,18 +559,18 @@ define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
; BWON-NOF16C-NEXT: movq %rdi, %rbx
; BWON-NOF16C-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; BWON-NOF16C-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; BWON-NOF16C-NEXT: callq __gnu_f2h_ieee
+; BWON-NOF16C-NEXT: callq __gnu_f2h_ieee at PLT
; BWON-NOF16C-NEXT: movl %eax, %r14d
; BWON-NOF16C-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; BWON-NOF16C-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; BWON-NOF16C-NEXT: callq __gnu_f2h_ieee
+; BWON-NOF16C-NEXT: callq __gnu_f2h_ieee at PLT
; BWON-NOF16C-NEXT: movl %eax, %r15d
; BWON-NOF16C-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; BWON-NOF16C-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; BWON-NOF16C-NEXT: callq __gnu_f2h_ieee
+; BWON-NOF16C-NEXT: callq __gnu_f2h_ieee at PLT
; BWON-NOF16C-NEXT: movl %eax, %ebp
; BWON-NOF16C-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
-; BWON-NOF16C-NEXT: callq __gnu_f2h_ieee
+; BWON-NOF16C-NEXT: callq __gnu_f2h_ieee at PLT
; BWON-NOF16C-NEXT: movw %ax, (%rbx)
; BWON-NOF16C-NEXT: movw %bp, 6(%rbx)
; BWON-NOF16C-NEXT: movw %r15w, 4(%rbx)
@@ -592,18 +592,18 @@ define void @test_trunc32_vec4(<4 x float> %a, <4 x half>* %p) #0 {
; BWOFF-NEXT: movq %rdi, %rbx
; BWOFF-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; BWOFF-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; BWOFF-NEXT: callq __gnu_f2h_ieee
+; BWOFF-NEXT: callq __gnu_f2h_ieee at PLT
; BWOFF-NEXT: movw %ax, %r14w
; BWOFF-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; BWOFF-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; BWOFF-NEXT: callq __gnu_f2h_ieee
+; BWOFF-NEXT: callq __gnu_f2h_ieee at PLT
; BWOFF-NEXT: movw %ax, %r15w
; BWOFF-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; BWOFF-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; BWOFF-NEXT: callq __gnu_f2h_ieee
+; BWOFF-NEXT: callq __gnu_f2h_ieee at PLT
; BWOFF-NEXT: movw %ax, %bp
; BWOFF-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
-; BWOFF-NEXT: callq __gnu_f2h_ieee
+; BWOFF-NEXT: callq __gnu_f2h_ieee at PLT
; BWOFF-NEXT: movw %ax, (%rbx)
; BWOFF-NEXT: movw %bp, 6(%rbx)
; BWOFF-NEXT: movw %r15w, 4(%rbx)
@@ -674,17 +674,17 @@ define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) #0 {
; BWON-NOF16C-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
; BWON-NOF16C-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; BWON-NOF16C-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; BWON-NOF16C-NEXT: callq __truncdfhf2
+; BWON-NOF16C-NEXT: callq __truncdfhf2 at PLT
; BWON-NOF16C-NEXT: movl %eax, %r14d
; BWON-NOF16C-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; BWON-NOF16C-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; BWON-NOF16C-NEXT: callq __truncdfhf2
+; BWON-NOF16C-NEXT: callq __truncdfhf2 at PLT
; BWON-NOF16C-NEXT: movl %eax, %r15d
; BWON-NOF16C-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; BWON-NOF16C-NEXT: callq __truncdfhf2
+; BWON-NOF16C-NEXT: callq __truncdfhf2 at PLT
; BWON-NOF16C-NEXT: movl %eax, %ebp
; BWON-NOF16C-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
-; BWON-NOF16C-NEXT: callq __truncdfhf2
+; BWON-NOF16C-NEXT: callq __truncdfhf2 at PLT
; BWON-NOF16C-NEXT: movw %ax, 4(%rbx)
; BWON-NOF16C-NEXT: movw %bp, (%rbx)
; BWON-NOF16C-NEXT: movw %r15w, 6(%rbx)
@@ -707,17 +707,17 @@ define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) #0 {
; BWOFF-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
; BWOFF-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; BWOFF-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; BWOFF-NEXT: callq __truncdfhf2
+; BWOFF-NEXT: callq __truncdfhf2 at PLT
; BWOFF-NEXT: movw %ax, %r14w
; BWOFF-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; BWOFF-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; BWOFF-NEXT: callq __truncdfhf2
+; BWOFF-NEXT: callq __truncdfhf2 at PLT
; BWOFF-NEXT: movw %ax, %r15w
; BWOFF-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; BWOFF-NEXT: callq __truncdfhf2
+; BWOFF-NEXT: callq __truncdfhf2 at PLT
; BWOFF-NEXT: movw %ax, %bp
; BWOFF-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
-; BWOFF-NEXT: callq __truncdfhf2
+; BWOFF-NEXT: callq __truncdfhf2 at PLT
; BWOFF-NEXT: movw %ax, 4(%rbx)
; BWOFF-NEXT: movw %bp, (%rbx)
; BWOFF-NEXT: movw %r15w, 6(%rbx)
@@ -740,22 +740,22 @@ define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) #0 {
; BWON-F16C-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; BWON-F16C-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; BWON-F16C-NEXT: vzeroupper
-; BWON-F16C-NEXT: callq __truncdfhf2
+; BWON-F16C-NEXT: callq __truncdfhf2 at PLT
; BWON-F16C-NEXT: movl %eax, %r14d
; BWON-F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; BWON-F16C-NEXT: vextractf128 $1, %ymm0, %xmm0
; BWON-F16C-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; BWON-F16C-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; BWON-F16C-NEXT: vzeroupper
-; BWON-F16C-NEXT: callq __truncdfhf2
+; BWON-F16C-NEXT: callq __truncdfhf2 at PLT
; BWON-F16C-NEXT: movl %eax, %r15d
; BWON-F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; BWON-F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; BWON-F16C-NEXT: vzeroupper
-; BWON-F16C-NEXT: callq __truncdfhf2
+; BWON-F16C-NEXT: callq __truncdfhf2 at PLT
; BWON-F16C-NEXT: movl %eax, %ebp
; BWON-F16C-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; BWON-F16C-NEXT: callq __truncdfhf2
+; BWON-F16C-NEXT: callq __truncdfhf2 at PLT
; BWON-F16C-NEXT: movw %ax, 4(%rbx)
; BWON-F16C-NEXT: movw %bp, (%rbx)
; BWON-F16C-NEXT: movw %r15w, 6(%rbx)
@@ -815,15 +815,15 @@ define half @test_f80trunc_nodagcombine() #0 {
; CHECK-LIBCALL-LABEL: test_f80trunc_nodagcombine:
; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rax
-; CHECK-LIBCALL-NEXT: callq test_floatret
-; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
+; CHECK-LIBCALL-NEXT: callq test_floatret at PLT
+; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee at PLT
; CHECK-LIBCALL-NEXT: popq %rcx
; CHECK-LIBCALL-NEXT: retq
;
; BWON-F16C-LABEL: test_f80trunc_nodagcombine:
; BWON-F16C: # %bb.0:
; BWON-F16C-NEXT: pushq %rax
-; BWON-F16C-NEXT: callq test_floatret
+; BWON-F16C-NEXT: callq test_floatret at PLT
; BWON-F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; BWON-F16C-NEXT: vmovd %xmm0, %eax
; BWON-F16C-NEXT: # kill: def $ax killed $ax killed $eax
@@ -833,7 +833,7 @@ define half @test_f80trunc_nodagcombine() #0 {
; CHECK-I686-LABEL: test_f80trunc_nodagcombine:
; CHECK-I686: # %bb.0:
; CHECK-I686-NEXT: subl $12, %esp
-; CHECK-I686-NEXT: calll test_floatret
+; CHECK-I686-NEXT: calll test_floatret at PLT
; CHECK-I686-NEXT: fstps (%esp)
; CHECK-I686-NEXT: calll __gnu_f2h_ieee
; CHECK-I686-NEXT: addl $12, %esp
@@ -853,14 +853,14 @@ define float @test_sitofp_fadd_i32(i32 %a, half* %b) #0 {
; CHECK-LIBCALL-NEXT: subq $16, %rsp
; CHECK-LIBCALL-NEXT: movzwl (%rsi), %ebx
; CHECK-LIBCALL-NEXT: cvtsi2ss %edi, %xmm0
-; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee at PLT
; CHECK-LIBCALL-NEXT: movzwl %ax, %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; CHECK-LIBCALL-NEXT: movl %ebx, %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: addss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
-; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_f2h_ieee at PLT
; CHECK-LIBCALL-NEXT: movzwl %ax, %edi
; CHECK-LIBCALL-NEXT: addq $16, %rsp
; CHECK-LIBCALL-NEXT: popq %rbx
@@ -920,7 +920,7 @@ define half @PR40273(half) #0 {
; CHECK-LIBCALL: # %bb.0:
; CHECK-LIBCALL-NEXT: pushq %rax
; CHECK-LIBCALL-NEXT: movzwl %di, %edi
-; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
+; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee at PLT
; CHECK-LIBCALL-NEXT: xorl %eax, %eax
; CHECK-LIBCALL-NEXT: xorps %xmm1, %xmm1
; CHECK-LIBCALL-NEXT: ucomiss %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/select.ll b/llvm/test/CodeGen/X86/select.ll
index ebd5c5495a57..012f7f035fec 100644
--- a/llvm/test/CodeGen/X86/select.ll
+++ b/llvm/test/CodeGen/X86/select.ll
@@ -102,7 +102,7 @@ define i32 @test2() nounwind {
;
; MCU-LABEL: test2:
; MCU: # %bb.0: # %entry
-; MCU-NEXT: calll return_false
+; MCU-NEXT: calll return_false at PLT
; MCU-NEXT: xorl %ecx, %ecx
; MCU-NEXT: testb $1, %al
; MCU-NEXT: jne .LBB1_2
More information about the llvm-commits
mailing list