[clang] [llvm] [clang] Always pass fp128 arguments indirectly on Windows (PR #115052)
Trevor Gross via cfe-commits
cfe-commits at lists.llvm.org
Wed Feb 26 01:00:16 PST 2025
https://github.com/tgross35 updated https://github.com/llvm/llvm-project/pull/115052
>From 60b07161e8668c2bc3ee5d7a4c470a90a7673178 Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Tue, 5 Nov 2024 07:00:35 -0500
Subject: [PATCH 1/3] [clang] Add fp128 ABI tests for MinGW (NFC)
Duplicate `win64-i128.c` to `win64-fp128.c` and update with the current
behavior of `__float128`.
---
clang/test/CodeGen/win64-fp128.c | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
create mode 100644 clang/test/CodeGen/win64-fp128.c
diff --git a/clang/test/CodeGen/win64-fp128.c b/clang/test/CodeGen/win64-fp128.c
new file mode 100644
index 0000000000000..33e2441ddf314
--- /dev/null
+++ b/clang/test/CodeGen/win64-fp128.c
@@ -0,0 +1,20 @@
+// RUN: %clang_cc1 -triple x86_64-windows-gnu -emit-llvm -o - %s \
+// RUN: | FileCheck %s --check-prefix=GNU64
+// __float128 is unsupported on MSVC
+
+__float128 fp128_ret(void) { return 0; }
+// GNU64: define dso_local fp128 @fp128_ret()
+
+__float128 fp128_args(__float128 a, __float128 b) { return a * b; }
+// GNU64: define dso_local fp128 @fp128_args(fp128 noundef %a, fp128 noundef %b)
+
+void fp128_vararg(int a, ...) {
+ // GNU64-LABEL: define dso_local void @fp128_vararg
+ __builtin_va_list ap;
+ __builtin_va_start(ap, a);
+ __float128 i = __builtin_va_arg(ap, __float128);
+ // movaps xmm0, xmmword ptr [rax]
+ // GNU64: load ptr, ptr
+ // GNU64: load fp128, ptr
+ __builtin_va_end(ap);
+}
>From 8b646bf3e77e3168e70e3c90afc2de98431fa8cc Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Tue, 5 Nov 2024 05:53:10 -0500
Subject: [PATCH 2/3] [clang] Always pass `fp128` arguments indirectly on
Windows
Clang currently passes and returns `__float128` in vector registers on
MinGW targets. However, the Windows x86-64 calling convention [1] states
the following:
__m128 types, arrays, and strings are never passed by immediate
value. Instead, a pointer is passed to memory allocated by the
caller. Structs and unions of size 8, 16, 32, or 64 bits, and __m64
types, are passed as if they were integers of the same size. Structs
or unions of other sizes are passed as a pointer to memory allocated
by the caller. For these aggregate types passed as a pointer,
including __m128, the caller-allocated temporary memory must be
16-byte aligned.
Based on the above it sounds like `__float128` should be passed
indirectly; this is what MinGW GCC already does, so change Clang to
match. Passing by value causes problems with varargs. E.g. the below
completes successfully when built with GCC but has a runtime crash when
built with Clang:
void va_f128(int count, ...) {
va_list args;
va_start(args, count);
__float128 val = va_arg(args, __float128);
va_end(args);
}
int main() {
va_f128(0, 0.0);
}
This patch fixes the above. It also resolves crashes when calling
GCC-built f128 libcalls.
Regarding return values, the documentation states:
A scalar return value that can fit into 64 bits, including the __m64
type, is returned through RAX. Non-scalar types including floats,
doubles, and vector types such as __m128, __m128i, __m128d are
returned in XMM0.
This makes it sound like it should be acceptable to return `__float128`
in XMM0. However, GCC returns `__float128` on the stack, so do the same
here to be consistent.
Clang's MSVC targets do not support `__float128` or `_Float128`, but
these changes would also apply there if it is eventually enabled.
[1]: https://learn.microsoft.com/en-us/cpp/build/x64-calling-convention?view=msvc-170
---
clang/lib/CodeGen/Targets/X86.cpp | 5 +++++
clang/test/CodeGen/win64-fp128.c | 5 ++---
2 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp
index b7a1374d5b399..513a54c39e522 100644
--- a/clang/lib/CodeGen/Targets/X86.cpp
+++ b/clang/lib/CodeGen/Targets/X86.cpp
@@ -3403,6 +3403,11 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
llvm::Type::getInt64Ty(getVMContext()), 2));
+ case BuiltinType::Float128:
+ // f128 is too large to fit in integer registers so the Windows ABI
+ // require it be passed on the stack. GCC does the same.
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+
default:
break;
}
diff --git a/clang/test/CodeGen/win64-fp128.c b/clang/test/CodeGen/win64-fp128.c
index 33e2441ddf314..3a864e139d244 100644
--- a/clang/test/CodeGen/win64-fp128.c
+++ b/clang/test/CodeGen/win64-fp128.c
@@ -3,17 +3,16 @@
// __float128 is unsupported on MSVC
__float128 fp128_ret(void) { return 0; }
-// GNU64: define dso_local fp128 @fp128_ret()
+// GNU64: define dso_local void @fp128_ret(ptr dead_on_unwind noalias writable sret(fp128) align 16 %agg.result)
__float128 fp128_args(__float128 a, __float128 b) { return a * b; }
-// GNU64: define dso_local fp128 @fp128_args(fp128 noundef %a, fp128 noundef %b)
+// GNU64: define dso_local void @fp128_args(ptr dead_on_unwind noalias writable sret(fp128) align 16 %agg.result, ptr noundef %0, ptr noundef %1)
void fp128_vararg(int a, ...) {
// GNU64-LABEL: define dso_local void @fp128_vararg
__builtin_va_list ap;
__builtin_va_start(ap, a);
__float128 i = __builtin_va_arg(ap, __float128);
- // movaps xmm0, xmmword ptr [rax]
// GNU64: load ptr, ptr
// GNU64: load fp128, ptr
__builtin_va_end(ap);
>From e73d83e9d7787dba97b96adca96bb17819ef99b5 Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Wed, 26 Feb 2025 08:37:07 +0000
Subject: [PATCH 3/3] [windows] Add fp128 libcall ABI tests for x86 Windows
(NFC)
Add Windows invocations to existing fp128 libcall tests.
---
.../test/CodeGen/X86/fp128-libcalls-strict.ll | 1654 +++++++++++++++++
llvm/test/CodeGen/X86/fp128-libcalls.ll | 1074 +++++++++++
2 files changed, 2728 insertions(+)
diff --git a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
index 5263e0d4f6f39..c9b1ac1e9bff6 100644
--- a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
+++ b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
@@ -8,6 +8,12 @@
; RUN: llc < %s -O2 -mtriple=i686-linux-gnu -mattr=+sse2 \
; RUN: -enable-legalize-types-checking \
; RUN: | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -O2 -mtriple=x86_64-pc-windows-msvc \
+; RUN: -enable-legalize-types-checking \
+; RUN: | FileCheck %s --check-prefix=WIN
+; RUN: llc < %s -O2 -mtriple=i686-pc-windows-msvc \
+; RUN: -enable-legalize-types-checking \
+; RUN: | FileCheck %s --check-prefix=WIN-X86
; Check all soft floating point library function calls.
@@ -57,6 +63,49 @@ define fp128 @add(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: add:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq __addtf3
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: add:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 40(%ebp)
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll ___addtf3
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%add = call fp128 @llvm.experimental.constrained.fadd.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %add
@@ -108,6 +157,49 @@ define fp128 @sub(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: sub:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq __subtf3
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: sub:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 40(%ebp)
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll ___subtf3
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sub = call fp128 @llvm.experimental.constrained.fsub.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %sub
@@ -159,6 +251,49 @@ define fp128 @mul(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: mul:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq __multf3
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: mul:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 40(%ebp)
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll ___multf3
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%mul = call fp128 @llvm.experimental.constrained.fmul.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %mul
@@ -210,6 +345,49 @@ define fp128 @div(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: div:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq __divtf3
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: div:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 40(%ebp)
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll ___divtf3
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%div = call fp128 @llvm.experimental.constrained.fdiv.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %div
@@ -258,6 +436,53 @@ define fp128 @fma(fp128 %x, fp128 %y, fp128 %z) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: fma:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq fmal
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: fma:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 56(%ebp)
+; WIN-X86-NEXT: pushl 52(%ebp)
+; WIN-X86-NEXT: pushl 48(%ebp)
+; WIN-X86-NEXT: pushl 44(%ebp)
+; WIN-X86-NEXT: pushl 40(%ebp)
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _fmal
+; WIN-X86-NEXT: addl $52, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%fma = call fp128 @llvm.experimental.constrained.fma.f128(fp128 %x, fp128 %y, fp128 %z, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %fma
@@ -302,6 +527,49 @@ define fp128 @frem(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: frem:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq fmodl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: frem:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 40(%ebp)
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _fmodl
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%div = call fp128 @llvm.experimental.constrained.frem.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %div
@@ -342,6 +610,45 @@ define fp128 @ceil(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: ceil:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq ceill
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: ceil:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _ceill
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%ceil = call fp128 @llvm.experimental.constrained.ceil.f128(fp128 %x, metadata !"fpexcept.strict") #0
ret fp128 %ceil
@@ -382,6 +689,45 @@ define fp128 @acos(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: acos:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq acosl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: acos:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _acosl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%acos = call fp128 @llvm.experimental.constrained.acos.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %acos
@@ -422,6 +768,45 @@ define fp128 @cos(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: cos:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq cosl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: cos:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _cosl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%cos = call fp128 @llvm.experimental.constrained.cos.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %cos
@@ -462,6 +847,45 @@ define fp128 @cosh(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: cosh:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq coshl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: cosh:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _coshl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%cosh = call fp128 @llvm.experimental.constrained.cosh.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %cosh
@@ -502,6 +926,45 @@ define fp128 @exp(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: exp:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq expl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: exp:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _expl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%exp = call fp128 @llvm.experimental.constrained.exp.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %exp
@@ -542,6 +1005,45 @@ define fp128 @exp2(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: exp2:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq exp2l
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: exp2:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _exp2l
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%exp2 = call fp128 @llvm.experimental.constrained.exp2.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %exp2
@@ -582,6 +1084,45 @@ define fp128 @floor(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: floor:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq floorl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: floor:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _floorl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%floor = call fp128 @llvm.experimental.constrained.floor.f128(fp128 %x, metadata !"fpexcept.strict") #0
ret fp128 %floor
@@ -622,6 +1163,45 @@ define fp128 @log(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: log:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq logl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: log:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _logl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%log = call fp128 @llvm.experimental.constrained.log.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %log
@@ -662,6 +1242,45 @@ define fp128 @log10(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: log10:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq log10l
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: log10:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _log10l
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%log10 = call fp128 @llvm.experimental.constrained.log10.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %log10
@@ -702,6 +1321,45 @@ define fp128 @log2(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: log2:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq log2l
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: log2:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _log2l
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%log2 = call fp128 @llvm.experimental.constrained.log2.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %log2
@@ -746,6 +1404,49 @@ define fp128 @maxnum(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: maxnum:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq fmaxl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: maxnum:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 40(%ebp)
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _fmaxl
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%maxnum = call fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %x, fp128 %y, metadata !"fpexcept.strict") #0
ret fp128 %maxnum
@@ -790,6 +1491,49 @@ define fp128 @minnum(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: minnum:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq fminl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: minnum:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 40(%ebp)
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _fminl
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%minnum = call fp128 @llvm.experimental.constrained.minnum.f128(fp128 %x, fp128 %y, metadata !"fpexcept.strict") #0
ret fp128 %minnum
@@ -830,6 +1574,45 @@ define fp128 @nearbyint(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: nearbyint:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq nearbyintl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: nearbyint:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _nearbyintl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%nearbyint = call fp128 @llvm.experimental.constrained.nearbyint.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %nearbyint
@@ -874,6 +1657,49 @@ define fp128 @pow(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: pow:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq powl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: pow:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 40(%ebp)
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _powl
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%pow = call fp128 @llvm.experimental.constrained.pow.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %pow
@@ -922,6 +1748,46 @@ define fp128 @powi(fp128 %x, i32 %y) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: powi:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq __powitf2
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: powi:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll ___powitf2
+; WIN-X86-NEXT: addl $24, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%powi = call fp128 @llvm.experimental.constrained.powi.f128(fp128 %x, i32 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %powi
@@ -962,6 +1828,45 @@ define fp128 @rint(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: rint:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq rintl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: rint:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _rintl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%rint = call fp128 @llvm.experimental.constrained.rint.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %rint
@@ -1002,6 +1907,45 @@ define fp128 @round(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: round:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq roundl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: round:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _roundl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%round = call fp128 @llvm.experimental.constrained.round.f128(fp128 %x, metadata !"fpexcept.strict") #0
ret fp128 %round
@@ -1042,6 +1986,45 @@ define fp128 @roundeven(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: roundeven:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq roundevenl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: roundeven:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _roundevenl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%roundeven = call fp128 @llvm.experimental.constrained.roundeven.f128(fp128 %x, metadata !"fpexcept.strict") #0
ret fp128 %roundeven
@@ -1082,6 +2065,45 @@ define fp128 @asin(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: asin:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq asinl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: asin:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _asinl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%asin = call fp128 @llvm.experimental.constrained.asin.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %asin
@@ -1122,6 +2144,45 @@ define fp128 @sin(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: sin:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq sinl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: sin:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _sinl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sin = call fp128 @llvm.experimental.constrained.sin.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %sin
@@ -1162,6 +2223,45 @@ define fp128 @sinh(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: sinh:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq sinhl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: sinh:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _sinhl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sinh = call fp128 @llvm.experimental.constrained.sinh.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %sinh
@@ -1202,6 +2302,45 @@ define fp128 @sqrt(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: sqrt:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq sqrtl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: sqrt:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _sqrtl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sqrt = call fp128 @llvm.experimental.constrained.sqrt.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %sqrt
@@ -1242,6 +2381,45 @@ define fp128 @atan(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: atan:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq atanl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: atan:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _atanl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%atan = call fp128 @llvm.experimental.constrained.atan.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %atan
@@ -1286,6 +2464,49 @@ define fp128 @atan2(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: atan2:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq atan2l
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: atan2:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 40(%ebp)
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _atan2l
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%atan2 = call fp128 @llvm.experimental.constrained.atan2.f128(fp128 %x, fp128 %y, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %atan2
@@ -1326,6 +2547,45 @@ define fp128 @tan(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: tan:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq tanl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: tan:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _tanl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%tan = call fp128 @llvm.experimental.constrained.tan.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %tan
@@ -1366,6 +2626,45 @@ define fp128 @tanh(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: tanh:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq tanhl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: tanh:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _tanhl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%tanh = call fp128 @llvm.experimental.constrained.tanh.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret fp128 %tanh
@@ -1406,6 +2705,45 @@ define fp128 @trunc(fp128 %x) nounwind strictfp {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: trunc:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq truncl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: trunc:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _truncl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 8(%esi)
+; WIN-X86-NEXT: movl %edx, 12(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%trunc = call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %x, metadata !"fpexcept.strict") #0
ret fp128 %trunc
@@ -1436,6 +2774,23 @@ define i32 @lrint(fp128 %x) nounwind strictfp {
; X86-NEXT: calll lrintl
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: lrint:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq lrintl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: lrint:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: calll _lrintl
+; WIN-X86-NEXT: addl $16, %esp
+; WIN-X86-NEXT: retl
entry:
%rint = call i32 @llvm.experimental.constrained.lrint.i32.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret i32 %rint
@@ -1466,6 +2821,23 @@ define i64 @llrint(fp128 %x) nounwind strictfp {
; X86-NEXT: calll llrintl
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: llrint:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq llrintl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: llrint:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: calll _llrintl
+; WIN-X86-NEXT: addl $16, %esp
+; WIN-X86-NEXT: retl
entry:
%rint = call i64 @llvm.experimental.constrained.llrint.i64.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret i64 %rint
@@ -1496,6 +2868,23 @@ define i32 @lround(fp128 %x) nounwind strictfp {
; X86-NEXT: calll lroundl
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: lround:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq lroundl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: lround:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: calll _lroundl
+; WIN-X86-NEXT: addl $16, %esp
+; WIN-X86-NEXT: retl
entry:
%round = call i32 @llvm.experimental.constrained.lround.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0
ret i32 %round
@@ -1526,6 +2915,23 @@ define i64 @llround(fp128 %x) nounwind strictfp {
; X86-NEXT: calll llroundl
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: llround:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq llroundl
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: llround:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: calll _llroundl
+; WIN-X86-NEXT: addl $16, %esp
+; WIN-X86-NEXT: retl
entry:
%round = call i64 @llvm.experimental.constrained.llround.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0
ret i64 %round
@@ -1601,6 +3007,48 @@ define i64 @cmp(i64 %a, i64 %b, fp128 %x, fp128 %y) #0 {
; X86-NEXT: movl 4(%ecx), %edx
; X86-NEXT: addl $12, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: cmp:
+; WIN: # %bb.0:
+; WIN-NEXT: pushq %rsi
+; WIN-NEXT: pushq %rdi
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: movaps %xmm3, %xmm1
+; WIN-NEXT: movaps %xmm2, %xmm0
+; WIN-NEXT: movq %rdx, %rsi
+; WIN-NEXT: movq %rcx, %rdi
+; WIN-NEXT: callq __eqtf2
+; WIN-NEXT: testl %eax, %eax
+; WIN-NEXT: cmovneq %rsi, %rdi
+; WIN-NEXT: movq %rdi, %rax
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: popq %rdi
+; WIN-NEXT: popq %rsi
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: cmp:
+; WIN-X86: # %bb.0:
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: calll ___eqtf2
+; WIN-X86-NEXT: addl $32, %esp
+; WIN-X86-NEXT: testl %eax, %eax
+; WIN-X86-NEXT: je LBB37_1
+; WIN-X86-NEXT: # %bb.2:
+; WIN-X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: jmp LBB37_3
+; WIN-X86-NEXT: LBB37_1:
+; WIN-X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: LBB37_3:
+; WIN-X86-NEXT: movl (%ecx), %eax
+; WIN-X86-NEXT: movl 4(%ecx), %edx
+; WIN-X86-NEXT: retl
%cond = call i1 @llvm.experimental.constrained.fcmp.f128(
fp128 %x, fp128 %y,
metadata !"oeq",
@@ -1679,6 +3127,48 @@ define i64 @cmps(i64 %a, i64 %b, fp128 %x, fp128 %y) #0 {
; X86-NEXT: movl 4(%ecx), %edx
; X86-NEXT: addl $12, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: cmps:
+; WIN: # %bb.0:
+; WIN-NEXT: pushq %rsi
+; WIN-NEXT: pushq %rdi
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: movaps %xmm3, %xmm1
+; WIN-NEXT: movaps %xmm2, %xmm0
+; WIN-NEXT: movq %rdx, %rsi
+; WIN-NEXT: movq %rcx, %rdi
+; WIN-NEXT: callq __eqtf2
+; WIN-NEXT: testl %eax, %eax
+; WIN-NEXT: cmovneq %rsi, %rdi
+; WIN-NEXT: movq %rdi, %rax
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: popq %rdi
+; WIN-NEXT: popq %rsi
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: cmps:
+; WIN-X86: # %bb.0:
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: calll ___eqtf2
+; WIN-X86-NEXT: addl $32, %esp
+; WIN-X86-NEXT: testl %eax, %eax
+; WIN-X86-NEXT: je LBB38_1
+; WIN-X86-NEXT: # %bb.2:
+; WIN-X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: jmp LBB38_3
+; WIN-X86-NEXT: LBB38_1:
+; WIN-X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: LBB38_3:
+; WIN-X86-NEXT: movl (%ecx), %eax
+; WIN-X86-NEXT: movl 4(%ecx), %edx
+; WIN-X86-NEXT: retl
%cond = call i1 @llvm.experimental.constrained.fcmps.f128(
fp128 %x, fp128 %y,
metadata !"oeq",
@@ -1815,6 +3305,88 @@ define i64 @cmp_ueq_q(i64 %a, i64 %b, fp128 %x, fp128 %y) #0 {
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
+;
+; WIN-LABEL: cmp_ueq_q:
+; WIN: # %bb.0:
+; WIN-NEXT: pushq %rsi
+; WIN-NEXT: pushq %rdi
+; WIN-NEXT: pushq %rbx
+; WIN-NEXT: subq $64, %rsp
+; WIN-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; WIN-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; WIN-NEXT: movaps %xmm3, %xmm6
+; WIN-NEXT: movaps %xmm2, %xmm7
+; WIN-NEXT: movq %rdx, %rsi
+; WIN-NEXT: movq %rcx, %rdi
+; WIN-NEXT: movaps %xmm2, %xmm0
+; WIN-NEXT: movaps %xmm3, %xmm1
+; WIN-NEXT: callq __eqtf2
+; WIN-NEXT: testl %eax, %eax
+; WIN-NEXT: sete %bl
+; WIN-NEXT: movaps %xmm7, %xmm0
+; WIN-NEXT: movaps %xmm6, %xmm1
+; WIN-NEXT: callq __unordtf2
+; WIN-NEXT: testl %eax, %eax
+; WIN-NEXT: setne %al
+; WIN-NEXT: orb %bl, %al
+; WIN-NEXT: cmoveq %rsi, %rdi
+; WIN-NEXT: movq %rdi, %rax
+; WIN-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; WIN-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; WIN-NEXT: addq $64, %rsp
+; WIN-NEXT: popq %rbx
+; WIN-NEXT: popq %rdi
+; WIN-NEXT: popq %rsi
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: cmp_ueq_q:
+; WIN-X86: # %bb.0:
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: pushl %ebx
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: calll ___eqtf2
+; WIN-X86-NEXT: addl $32, %esp
+; WIN-X86-NEXT: testl %eax, %eax
+; WIN-X86-NEXT: sete %bl
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: calll ___unordtf2
+; WIN-X86-NEXT: addl $32, %esp
+; WIN-X86-NEXT: testl %eax, %eax
+; WIN-X86-NEXT: setne %al
+; WIN-X86-NEXT: orb %bl, %al
+; WIN-X86-NEXT: jne LBB39_1
+; WIN-X86-NEXT: # %bb.2:
+; WIN-X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: jmp LBB39_3
+; WIN-X86-NEXT: LBB39_1:
+; WIN-X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: LBB39_3:
+; WIN-X86-NEXT: movl (%ecx), %eax
+; WIN-X86-NEXT: movl 4(%ecx), %edx
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebx
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
%cond = call i1 @llvm.experimental.constrained.fcmp.f128(
fp128 %x, fp128 %y,
metadata !"ueq",
@@ -1951,6 +3523,88 @@ define i64 @cmp_one_q(i64 %a, i64 %b, fp128 %x, fp128 %y) #0 {
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
+;
+; WIN-LABEL: cmp_one_q:
+; WIN: # %bb.0:
+; WIN-NEXT: pushq %rsi
+; WIN-NEXT: pushq %rdi
+; WIN-NEXT: pushq %rbx
+; WIN-NEXT: subq $64, %rsp
+; WIN-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; WIN-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; WIN-NEXT: movaps %xmm3, %xmm6
+; WIN-NEXT: movaps %xmm2, %xmm7
+; WIN-NEXT: movq %rdx, %rsi
+; WIN-NEXT: movq %rcx, %rdi
+; WIN-NEXT: movaps %xmm2, %xmm0
+; WIN-NEXT: movaps %xmm3, %xmm1
+; WIN-NEXT: callq __eqtf2
+; WIN-NEXT: testl %eax, %eax
+; WIN-NEXT: setne %bl
+; WIN-NEXT: movaps %xmm7, %xmm0
+; WIN-NEXT: movaps %xmm6, %xmm1
+; WIN-NEXT: callq __unordtf2
+; WIN-NEXT: testl %eax, %eax
+; WIN-NEXT: sete %al
+; WIN-NEXT: testb %bl, %al
+; WIN-NEXT: cmoveq %rsi, %rdi
+; WIN-NEXT: movq %rdi, %rax
+; WIN-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; WIN-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; WIN-NEXT: addq $64, %rsp
+; WIN-NEXT: popq %rbx
+; WIN-NEXT: popq %rdi
+; WIN-NEXT: popq %rsi
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: cmp_one_q:
+; WIN-X86: # %bb.0:
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: pushl %ebx
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: calll ___eqtf2
+; WIN-X86-NEXT: addl $32, %esp
+; WIN-X86-NEXT: testl %eax, %eax
+; WIN-X86-NEXT: setne %bl
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: pushl {{[0-9]+}}(%esp)
+; WIN-X86-NEXT: calll ___unordtf2
+; WIN-X86-NEXT: addl $32, %esp
+; WIN-X86-NEXT: testl %eax, %eax
+; WIN-X86-NEXT: sete %al
+; WIN-X86-NEXT: testb %bl, %al
+; WIN-X86-NEXT: jne LBB40_1
+; WIN-X86-NEXT: # %bb.2:
+; WIN-X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: jmp LBB40_3
+; WIN-X86-NEXT: LBB40_1:
+; WIN-X86-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: LBB40_3:
+; WIN-X86-NEXT: movl (%ecx), %eax
+; WIN-X86-NEXT: movl 4(%ecx), %edx
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebx
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
%cond = call i1 @llvm.experimental.constrained.fcmp.f128(
fp128 %x, fp128 %y,
metadata !"one",
diff --git a/llvm/test/CodeGen/X86/fp128-libcalls.ll b/llvm/test/CodeGen/X86/fp128-libcalls.ll
index 0831675095d74..230515927e1c4 100644
--- a/llvm/test/CodeGen/X86/fp128-libcalls.ll
+++ b/llvm/test/CodeGen/X86/fp128-libcalls.ll
@@ -5,6 +5,10 @@
; RUN: -enable-legalize-types-checking | FileCheck %s --check-prefix=GNU
; RUN: llc < %s -O2 -mtriple=i686-linux-gnu -mattr=sse2 \
; RUN: -enable-legalize-types-checking | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -O2 -mtriple=x86_64-pc-windows-msvc \
+; RUN: -enable-legalize-types-checking | FileCheck %s --check-prefix=WIN
+; RUN: llc < %s -O2 -mtriple=i686-pc-windows-msvc \
+; RUN: -enable-legalize-types-checking | FileCheck %s --check-prefix=WIN-X86
; Check all soft floating point library function calls.
@@ -55,6 +59,46 @@ define dso_local void @Test128Add(fp128 %d1, fp128 %d2) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Add:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq __addtf3
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Add:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll ___addtf3
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%add = fadd fp128 %d1, %d2
store fp128 %add, ptr @vf128, align 16
@@ -111,6 +155,48 @@ define dso_local void @Test128_1Add(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128_1Add:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: movaps %xmm0, %xmm1
+; WIN-NEXT: movaps vf128(%rip), %xmm0
+; WIN-NEXT: callq __addtf3
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128_1Add:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl _vf128+12
+; WIN-X86-NEXT: pushl _vf128+8
+; WIN-X86-NEXT: pushl _vf128+4
+; WIN-X86-NEXT: pushl _vf128
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll ___addtf3
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+8
+; WIN-X86-NEXT: movl %edx, _vf128+12
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%0 = load fp128, ptr @vf128, align 16
%add = fadd fp128 %0, %d1
@@ -162,6 +248,46 @@ define dso_local void @Test128Sub(fp128 %d1, fp128 %d2) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Sub:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq __subtf3
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Sub:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll ___subtf3
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sub = fsub fp128 %d1, %d2
store fp128 %sub, ptr @vf128, align 16
@@ -218,6 +344,48 @@ define dso_local void @Test128_1Sub(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128_1Sub:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: movaps %xmm0, %xmm1
+; WIN-NEXT: movaps vf128(%rip), %xmm0
+; WIN-NEXT: callq __subtf3
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128_1Sub:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl _vf128+12
+; WIN-X86-NEXT: pushl _vf128+8
+; WIN-X86-NEXT: pushl _vf128+4
+; WIN-X86-NEXT: pushl _vf128
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll ___subtf3
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+8
+; WIN-X86-NEXT: movl %edx, _vf128+12
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%0 = load fp128, ptr @vf128, align 16
%sub = fsub fp128 %0, %d1
@@ -269,6 +437,46 @@ define dso_local void @Test128Mul(fp128 %d1, fp128 %d2) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Mul:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq __multf3
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Mul:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll ___multf3
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%mul = fmul fp128 %d1, %d2
store fp128 %mul, ptr @vf128, align 16
@@ -325,6 +533,48 @@ define dso_local void @Test128_1Mul(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128_1Mul:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: movaps %xmm0, %xmm1
+; WIN-NEXT: movaps vf128(%rip), %xmm0
+; WIN-NEXT: callq __multf3
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128_1Mul:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl _vf128+12
+; WIN-X86-NEXT: pushl _vf128+8
+; WIN-X86-NEXT: pushl _vf128+4
+; WIN-X86-NEXT: pushl _vf128
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll ___multf3
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+8
+; WIN-X86-NEXT: movl %edx, _vf128+12
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%0 = load fp128, ptr @vf128, align 16
%mul = fmul fp128 %0, %d1
@@ -376,6 +626,46 @@ define dso_local void @Test128Div(fp128 %d1, fp128 %d2) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Div:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq __divtf3
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Div:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll ___divtf3
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%div = fdiv fp128 %d1, %d2
store fp128 %div, ptr @vf128, align 16
@@ -432,6 +722,48 @@ define dso_local void @Test128_1Div(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128_1Div:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: movaps %xmm0, %xmm1
+; WIN-NEXT: movaps vf128(%rip), %xmm0
+; WIN-NEXT: callq __divtf3
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128_1Div:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl _vf128+12
+; WIN-X86-NEXT: pushl _vf128+8
+; WIN-X86-NEXT: pushl _vf128+4
+; WIN-X86-NEXT: pushl _vf128
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll ___divtf3
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+8
+; WIN-X86-NEXT: movl %edx, _vf128+12
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%0 = load fp128, ptr @vf128, align 16
%div = fdiv fp128 %0, %d1
@@ -475,6 +807,46 @@ define dso_local void @Test128Rem(fp128 %d1, fp128 %d2) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Rem:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq fmodl
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Rem:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _fmodl
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%div = frem fp128 %d1, %d2
store fp128 %div, ptr @vf128, align 16
@@ -521,6 +893,48 @@ define dso_local void @Test128_1Rem(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128_1Rem:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: movaps %xmm0, %xmm1
+; WIN-NEXT: movaps vf128(%rip), %xmm0
+; WIN-NEXT: callq fmodl
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128_1Rem:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl _vf128+12
+; WIN-X86-NEXT: pushl _vf128+8
+; WIN-X86-NEXT: pushl _vf128+4
+; WIN-X86-NEXT: pushl _vf128
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _fmodl
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+8
+; WIN-X86-NEXT: movl %edx, _vf128+12
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%0 = load fp128, ptr @vf128, align 16
%div = frem fp128 %0, %d1
@@ -560,6 +974,42 @@ define dso_local void @Test128Sqrt(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Sqrt:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq sqrtl
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Sqrt:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _sqrtl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sqrt = call fp128 @llvm.sqrt.f128(fp128 %d1)
store fp128 %sqrt, ptr @vf128, align 16
@@ -599,6 +1049,42 @@ define dso_local void @Test128Sin(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Sin:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq sinl
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Sin:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _sinl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sqrt = call fp128 @llvm.sin.f128(fp128 %d1)
store fp128 %sqrt, ptr @vf128, align 16
@@ -638,6 +1124,42 @@ define dso_local void @Test128Cos(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Cos:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq cosl
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Cos:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _cosl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sqrt = call fp128 @llvm.cos.f128(fp128 %d1)
store fp128 %sqrt, ptr @vf128, align 16
@@ -677,6 +1199,42 @@ define dso_local void @Test128Ceil(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Ceil:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq ceill
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Ceil:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _ceill
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sqrt = call fp128 @llvm.ceil.f128(fp128 %d1)
store fp128 %sqrt, ptr @vf128, align 16
@@ -716,6 +1274,42 @@ define dso_local void @Test128Floor(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Floor:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq floorl
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Floor:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _floorl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sqrt = call fp128 @llvm.floor.f128(fp128 %d1)
store fp128 %sqrt, ptr @vf128, align 16
@@ -755,6 +1349,42 @@ define dso_local void @Test128Trunc(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Trunc:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq truncl
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Trunc:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _truncl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sqrt = call fp128 @llvm.trunc.f128(fp128 %d1)
store fp128 %sqrt, ptr @vf128, align 16
@@ -794,6 +1424,42 @@ define dso_local void @Test128Nearbyint(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Nearbyint:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq nearbyintl
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Nearbyint:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _nearbyintl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sqrt = call fp128 @llvm.nearbyint.f128(fp128 %d1)
store fp128 %sqrt, ptr @vf128, align 16
@@ -833,6 +1499,42 @@ define dso_local void @Test128Rint(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Rint:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq rintl
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Rint:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _rintl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sqrt = call fp128 @llvm.rint.f128(fp128 %d1)
store fp128 %sqrt, ptr @vf128, align 16
@@ -872,6 +1574,42 @@ define dso_local void @Test128Round(fp128 %d1) nounwind {
; X86-NEXT: movaps %xmm0, vf128
; X86-NEXT: addl $28, %esp
; X86-NEXT: retl
+;
+; WIN-LABEL: Test128Round:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: subq $40, %rsp
+; WIN-NEXT: callq roundl
+; WIN-NEXT: movaps %xmm0, vf128(%rip)
+; WIN-NEXT: addq $40, %rsp
+; WIN-NEXT: retq
+;
+; WIN-X86-LABEL: Test128Round:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $32, %esp
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl 8(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _roundl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; WIN-X86-NEXT: movl %esi, _vf128+12
+; WIN-X86-NEXT: movl %edx, _vf128+8
+; WIN-X86-NEXT: movl %ecx, _vf128+4
+; WIN-X86-NEXT: movl %eax, _vf128
+; WIN-X86-NEXT: leal -4(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%sqrt = call fp128 @llvm.round.f128(fp128 %d1)
store fp128 %sqrt, ptr @vf128, align 16
@@ -916,6 +1654,50 @@ define fp128 @Test128FMA(fp128 %a, fp128 %b, fp128 %c) nounwind {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: Test128FMA:
+; WIN: # %bb.0: # %entry
+; WIN-NEXT: jmp fmal # TAILCALL
+;
+; WIN-X86-LABEL: Test128FMA:
+; WIN-X86: # %bb.0: # %entry
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 56(%ebp)
+; WIN-X86-NEXT: pushl 52(%ebp)
+; WIN-X86-NEXT: pushl 48(%ebp)
+; WIN-X86-NEXT: pushl 44(%ebp)
+; WIN-X86-NEXT: pushl 40(%ebp)
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _fmal
+; WIN-X86-NEXT: addl $52, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 12(%esi)
+; WIN-X86-NEXT: movl %edx, 8(%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
entry:
%call = call fp128 @llvm.fma.f128(fp128 %a, fp128 %b, fp128 %c)
ret fp128 %call
@@ -951,6 +1733,42 @@ define fp128 @Test128Acos(fp128 %a) nounwind {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: Test128Acos:
+; WIN: # %bb.0:
+; WIN-NEXT: jmp acosl # TAILCALL
+;
+; WIN-X86-LABEL: Test128Acos:
+; WIN-X86: # %bb.0:
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _acosl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 12(%esi)
+; WIN-X86-NEXT: movl %edx, 8(%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
%x = call fp128 @llvm.acos.f128(fp128 %a)
ret fp128 %x
}
@@ -984,6 +1802,42 @@ define fp128 @Test128Asin(fp128 %a) nounwind {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: Test128Asin:
+; WIN: # %bb.0:
+; WIN-NEXT: jmp asinl # TAILCALL
+;
+; WIN-X86-LABEL: Test128Asin:
+; WIN-X86: # %bb.0:
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _asinl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 12(%esi)
+; WIN-X86-NEXT: movl %edx, 8(%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
%x = call fp128 @llvm.asin.f128(fp128 %a)
ret fp128 %x
}
@@ -1017,6 +1871,42 @@ define fp128 @Test128Atan(fp128 %a) nounwind {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: Test128Atan:
+; WIN: # %bb.0:
+; WIN-NEXT: jmp atanl # TAILCALL
+;
+; WIN-X86-LABEL: Test128Atan:
+; WIN-X86: # %bb.0:
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _atanl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 12(%esi)
+; WIN-X86-NEXT: movl %edx, 8(%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
%x = call fp128 @llvm.atan.f128(fp128 %a)
ret fp128 %x
}
@@ -1054,6 +1944,46 @@ define fp128 @Test128Atan2(fp128 %a, fp128 %b) nounwind {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: Test128Atan2:
+; WIN: # %bb.0:
+; WIN-NEXT: jmp atan2l # TAILCALL
+;
+; WIN-X86-LABEL: Test128Atan2:
+; WIN-X86: # %bb.0:
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 40(%ebp)
+; WIN-X86-NEXT: pushl 36(%ebp)
+; WIN-X86-NEXT: pushl 32(%ebp)
+; WIN-X86-NEXT: pushl 28(%ebp)
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _atan2l
+; WIN-X86-NEXT: addl $36, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 12(%esi)
+; WIN-X86-NEXT: movl %edx, 8(%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
%x = call fp128 @llvm.atan2.f128(fp128 %a, fp128 %b)
ret fp128 %x
}
@@ -1087,6 +2017,42 @@ define fp128 @Test128Cosh(fp128 %a) nounwind {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: Test128Cosh:
+; WIN: # %bb.0:
+; WIN-NEXT: jmp coshl # TAILCALL
+;
+; WIN-X86-LABEL: Test128Cosh:
+; WIN-X86: # %bb.0:
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _coshl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 12(%esi)
+; WIN-X86-NEXT: movl %edx, 8(%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
%x = call fp128 @llvm.cosh.f128(fp128 %a)
ret fp128 %x
}
@@ -1120,6 +2086,42 @@ define fp128 @Test128Sinh(fp128 %a) nounwind {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: Test128Sinh:
+; WIN: # %bb.0:
+; WIN-NEXT: jmp sinhl # TAILCALL
+;
+; WIN-X86-LABEL: Test128Sinh:
+; WIN-X86: # %bb.0:
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _sinhl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 12(%esi)
+; WIN-X86-NEXT: movl %edx, 8(%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
%x = call fp128 @llvm.sinh.f128(fp128 %a)
ret fp128 %x
}
@@ -1153,6 +2155,42 @@ define fp128 @Test128Tan(fp128 %a) nounwind {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: Test128Tan:
+; WIN: # %bb.0:
+; WIN-NEXT: jmp tanl # TAILCALL
+;
+; WIN-X86-LABEL: Test128Tan:
+; WIN-X86: # %bb.0:
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _tanl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 12(%esi)
+; WIN-X86-NEXT: movl %edx, 8(%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
%x = call fp128 @llvm.tan.f128(fp128 %a)
ret fp128 %x
}
@@ -1186,6 +2224,42 @@ define fp128 @Test128Tanh(fp128 %a) nounwind {
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
+;
+; WIN-LABEL: Test128Tanh:
+; WIN: # %bb.0:
+; WIN-NEXT: jmp tanhl # TAILCALL
+;
+; WIN-X86-LABEL: Test128Tanh:
+; WIN-X86: # %bb.0:
+; WIN-X86-NEXT: pushl %ebp
+; WIN-X86-NEXT: movl %esp, %ebp
+; WIN-X86-NEXT: pushl %edi
+; WIN-X86-NEXT: pushl %esi
+; WIN-X86-NEXT: andl $-16, %esp
+; WIN-X86-NEXT: subl $16, %esp
+; WIN-X86-NEXT: movl 8(%ebp), %esi
+; WIN-X86-NEXT: movl %esp, %eax
+; WIN-X86-NEXT: pushl 24(%ebp)
+; WIN-X86-NEXT: pushl 20(%ebp)
+; WIN-X86-NEXT: pushl 16(%ebp)
+; WIN-X86-NEXT: pushl 12(%ebp)
+; WIN-X86-NEXT: pushl %eax
+; WIN-X86-NEXT: calll _tanhl
+; WIN-X86-NEXT: addl $20, %esp
+; WIN-X86-NEXT: movl (%esp), %eax
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; WIN-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; WIN-X86-NEXT: movl %edi, 12(%esi)
+; WIN-X86-NEXT: movl %edx, 8(%esi)
+; WIN-X86-NEXT: movl %ecx, 4(%esi)
+; WIN-X86-NEXT: movl %eax, (%esi)
+; WIN-X86-NEXT: movl %esi, %eax
+; WIN-X86-NEXT: leal -8(%ebp), %esp
+; WIN-X86-NEXT: popl %esi
+; WIN-X86-NEXT: popl %edi
+; WIN-X86-NEXT: popl %ebp
+; WIN-X86-NEXT: retl
%x = call fp128 @llvm.tanh.f128(fp128 %a)
ret fp128 %x
}
More information about the cfe-commits
mailing list