[clang] [llvm] [WIP] Correct lowering of `fp128` intrinsics (PR #76558)
Trevor Gross via cfe-commits
cfe-commits at lists.llvm.org
Fri Dec 29 16:20:42 PST 2023
https://github.com/tgross35 updated https://github.com/llvm/llvm-project/pull/76558
>From 7df4ef93989b1913d9200fbc29d6d04f9e59d51a Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Fri, 11 Aug 2023 22:16:01 -0400
Subject: [PATCH 1/4] [IR] Add an xpassing test for `f128` intrinsic lowering
`f128` intrinsic functions lower to incorrect libc calls. Add a test
showing current behavior.
---
.../CodeGen/Generic/f128-math-lowering.ll | 610 ++++++++++++++++++
1 file changed, 610 insertions(+)
create mode 100644 llvm/test/CodeGen/Generic/f128-math-lowering.ll
diff --git a/llvm/test/CodeGen/Generic/f128-math-lowering.ll b/llvm/test/CodeGen/Generic/f128-math-lowering.ll
new file mode 100644
index 00000000000000..30efb8ef34918e
--- /dev/null
+++ b/llvm/test/CodeGen/Generic/f128-math-lowering.ll
@@ -0,0 +1,610 @@
+
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+;;
+; RUN: llc < %s -mtriple=aarch64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-AARCH64
+; RUN: llc < %s -mtriple=riscv32-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-RISCV32
+; RUN: llc < %s -mtriple=s390x-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-S390X
+; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-X64
+;
+; Verify that fp128 intrinsics only lower to `long double` calls on platforms
+; where `f128` and `long double` have the same layout.
+;
+; We test on x86 and x64 which have 80-bit ld, as well as aarch64 (ld == f128),
+; riscv32 (ld == f64), and s380x (ld == f128 with different alignment from
+; x64/aarch64 f128).
+;
+; FIXME: these emit calls to long double functions but should emit f128 calls
+
+define fp128 @test_cbrtf128(fp128 %a) {
+; CHECK-LABEL: test_cbrtf128:
+; CHECK-AARCH64: b llvm.cbrt.f128
+; CHECK-RISCV32: call llvm.cbrt.f128 at plt
+; CHECK-S390X: brasl {{%.*}} llvm.cbrt.f128 at PLT
+; CHECK-X64: jmp llvm.cbrt.f128 at PLT # TAILCALL
+; CHECK-X86: calll llvm.cbrt.f128 at PLT
+start:
+ %0 = tail call fp128 @llvm.cbrt.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.cbrt.f128(fp128)
+
+
+define fp128 @test_ceilf128(fp128 %a) {
+; CHECK-LABEL: test_ceilf128:
+; CHECK-AARCH64: b ceill
+; CHECK-RISCV32: call ceill at plt
+; CHECK-S390X: brasl {{%.*}} ceill at PLT
+; CHECK-X64: jmp ceill at PLT
+; CHECK-X86: calll ceill
+start:
+ %0 = tail call fp128 @llvm.ceil.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.ceil.f128(fp128)
+
+
+define fp128 @test_copysignf128(fp128 %a, fp128 %b) {
+; No math library call here, so make sure the assembly does the correct thing.
+; This test is autogenerated
+; CHECK-LABEL: test_copysignf128:
+; CHECK-AARCH64-LABEL: test_copysignf128:
+; CHECK-AARCH64: // %bb.0: // %start
+; CHECK-AARCH64-NEXT: stp q0, q1, [sp, #-32]!
+; CHECK-AARCH64-NEXT: .cfi_def_cfa_offset 32
+; CHECK-AARCH64-NEXT: ldrb w8, [sp, #15]
+; CHECK-AARCH64-NEXT: ldrb w9, [sp, #31]
+; CHECK-AARCH64-NEXT: bfxil w9, w8, #0, #7
+; CHECK-AARCH64-NEXT: strb w9, [sp, #15]
+; CHECK-AARCH64-NEXT: ldr q0, [sp], #32
+; CHECK-AARCH64-NEXT: ret
+;
+; CHECK-RISCV32-LABEL: test_copysignf128:
+; CHECK-RISCV32: # %bb.0: # %start
+; CHECK-RISCV32-NEXT: lw a3, 0(a1)
+; CHECK-RISCV32-NEXT: lw a4, 4(a1)
+; CHECK-RISCV32-NEXT: lw a2, 12(a2)
+; CHECK-RISCV32-NEXT: lw a5, 12(a1)
+; CHECK-RISCV32-NEXT: lw a1, 8(a1)
+; CHECK-RISCV32-NEXT: lui a6, 524288
+; CHECK-RISCV32-NEXT: and a2, a2, a6
+; CHECK-RISCV32-NEXT: slli a5, a5, 1
+; CHECK-RISCV32-NEXT: srli a5, a5, 1
+; CHECK-RISCV32-NEXT: or a2, a5, a2
+; CHECK-RISCV32-NEXT: sw a1, 8(a0)
+; CHECK-RISCV32-NEXT: sw a4, 4(a0)
+; CHECK-RISCV32-NEXT: sw a3, 0(a0)
+; CHECK-RISCV32-NEXT: sw a2, 12(a0)
+; CHECK-RISCV32-NEXT: ret
+;
+; CHECK-S390X-LABEL: test_copysignf128:
+; CHECK-S390X: # %bb.0: # %start
+; CHECK-S390X-NEXT: ld %f0, 0(%r3)
+; CHECK-S390X-NEXT: ld %f2, 8(%r3)
+; CHECK-S390X-NEXT: ld %f1, 0(%r4)
+; CHECK-S390X-NEXT: ld %f3, 8(%r4)
+; CHECK-S390X-NEXT: cpsdr %f0, %f1, %f0
+; CHECK-S390X-NEXT: std %f0, 0(%r2)
+; CHECK-S390X-NEXT: std %f2, 8(%r2)
+; CHECK-S390X-NEXT: br %r14
+;
+; CHECK-X86-LABEL: test_copysignf128:
+; CHECK-X86: # %bb.0: # %start
+; CHECK-X86-NEXT: pushl %ebx
+; CHECK-X86-NEXT: .cfi_def_cfa_offset 8
+; CHECK-X86-NEXT: pushl %edi
+; CHECK-X86-NEXT: .cfi_def_cfa_offset 12
+; CHECK-X86-NEXT: pushl %esi
+; CHECK-X86-NEXT: .cfi_def_cfa_offset 16
+; CHECK-X86-NEXT: .cfi_offset %esi, -16
+; CHECK-X86-NEXT: .cfi_offset %edi, -12
+; CHECK-X86-NEXT: .cfi_offset %ebx, -8
+; CHECK-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-X86-NEXT: movl $-2147483648, %edi # imm = 0x80000000
+; CHECK-X86-NEXT: andl {{[0-9]+}}(%esp), %edi
+; CHECK-X86-NEXT: movl $2147483647, %ebx # imm = 0x7FFFFFFF
+; CHECK-X86-NEXT: andl {{[0-9]+}}(%esp), %ebx
+; CHECK-X86-NEXT: orl %edi, %ebx
+; CHECK-X86-NEXT: movl %ebx, 12(%eax)
+; CHECK-X86-NEXT: movl %esi, 8(%eax)
+; CHECK-X86-NEXT: movl %edx, 4(%eax)
+; CHECK-X86-NEXT: movl %ecx, (%eax)
+; CHECK-X86-NEXT: popl %esi
+; CHECK-X86-NEXT: .cfi_def_cfa_offset 12
+; CHECK-X86-NEXT: popl %edi
+; CHECK-X86-NEXT: .cfi_def_cfa_offset 8
+; CHECK-X86-NEXT: popl %ebx
+; CHECK-X86-NEXT: .cfi_def_cfa_offset 4
+; CHECK-X86-NEXT: retl $4
+;
+; CHECK-X64-LABEL: test_copysignf128:
+; CHECK-X64: # %bb.0: # %start
+; CHECK-X64-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-X64-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-X64-NEXT: orps %xmm1, %xmm0
+; CHECK-X64-NEXT: retq
+start:
+ %0 = tail call fp128 @llvm.copysign.f128(fp128 %a, fp128 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.copysign.f128(fp128, fp128)
+
+
+define fp128 @test_cosf128(fp128 %a) {
+; CHECK-LABEL: test_cosf128:
+; CHECK-AARCH64: b cosl
+; CHECK-RISCV32: call cosl at plt
+; CHECK-S390X: brasl {{%.*}} cosl at PLT
+; CHECK-X64: jmp cosl at PLT
+; CHECK-X86: calll cosl
+start:
+ %0 = tail call fp128 @llvm.cos.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.cos.f128(fp128)
+
+
+define fp128 @test_exp2f128(fp128 %a) {
+; CHECK-LABEL: test_exp2f128:
+; CHECK-AARCH64: b exp2l
+; CHECK-RISCV32: call exp2l at plt
+; CHECK-S390X: brasl {{%.*}} exp2l at PLT
+; CHECK-X64: jmp exp2l at PLT
+; CHECK-X86: calll exp2l
+start:
+ %0 = tail call fp128 @llvm.exp2.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.exp2.f128(fp128)
+
+
+define fp128 @test___exp2f128_finite(fp128 %a) {
+; CHECK-LABEL: test___exp2f128_finite:
+; CHECK-AARCH64: b llvm.__exp2f128_finite.f128
+; CHECK-RISCV32: call llvm.__exp2f128_finite.f128 at plt
+; CHECK-S390X: brasl {{%.*}} llvm.__exp2f128_finite.f128 at PLT
+; CHECK-X64: jmp llvm.__exp2f128_finite.f128 at PLT # TAILCALL
+; CHECK-X86: calll llvm.__exp2f128_finite.f128 at PLT
+start:
+ %0 = tail call fp128 @llvm.__exp2f128_finite.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.__exp2f128_finite.f128(fp128)
+
+
+define fp128 @test_expf128(fp128 %a) {
+; CHECK-LABEL: test_expf128:
+; CHECK-AARCH64: b expl
+; CHECK-RISCV32: call expl at plt
+; CHECK-S390X: brasl {{%.*}} expl at PLT
+; CHECK-X64: jmp expl at PLT
+; CHECK-X86: calll expl
+start:
+ %0 = tail call fp128 @llvm.exp.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.exp.f128(fp128)
+
+
+define fp128 @test___expf128_finite(fp128 %a) {
+; CHECK-LABEL: test___expf128_finite:
+; CHECK-AARCH64: b llvm.__expf128_finite.f128
+; CHECK-RISCV32: call llvm.__expf128_finite.f128 at plt
+; CHECK-S390X: brasl {{%.*}} llvm.__expf128_finite.f128 at PLT
+; CHECK-X64: jmp llvm.__expf128_finite.f128 at PLT # TAILCALL
+; CHECK-X86: calll llvm.__expf128_finite.f128 at PLT
+start:
+ %0 = tail call fp128 @llvm.__expf128_finite.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.__expf128_finite.f128(fp128)
+
+
+define fp128 @test_floorf128(fp128 %a) {
+; CHECK-LABEL: test_floorf128:
+; CHECK-AARCH64: b floorl
+; CHECK-RISCV32: call floorl at plt
+; CHECK-S390X: brasl {{%.*}} floorl at PLT
+; CHECK-X64: jmp floorl at PLT
+; CHECK-X86: calll floorl
+start:
+ %0 = tail call fp128 @llvm.floor.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.floor.f128(fp128)
+
+
+define fp128 @test_fmaf128(fp128 %a, fp128 %b, fp128 %c) {
+; CHECK-LABEL: test_fmaf128:
+; CHECK-AARCH64: b fmal
+; CHECK-RISCV32: call fmal at plt
+; CHECK-S390X: brasl {{%.*}} fmal at PLT
+; CHECK-X64: jmp fmal at PLT
+; CHECK-X86: calll fmal
+start:
+ %0 = tail call fp128 @llvm.fma.f128(fp128 %a, fp128 %b, fp128 %c)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.fma.f128(fp128, fp128, fp128)
+
+
+define fp128 @test_fmaxf128(fp128 %a, fp128 %b) {
+; CHECK-LABEL: test_fmaxf128:
+; CHECK-AARCH64: b llvm.fmax.f128
+; CHECK-RISCV32: call llvm.fmax.f128 at plt
+; CHECK-S390X: brasl {{%.*}} llvm.fmax.f128 at PLT
+; CHECK-X64: jmp llvm.fmax.f128 at PLT # TAILCALL
+; CHECK-X86: calll llvm.fmax.f128 at PLT
+start:
+ %0 = tail call fp128 @llvm.fmax.f128(fp128 %a, fp128 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.fmax.f128(fp128, fp128)
+
+
+define fp128 @test_fminf128(fp128 %a, fp128 %b) {
+; CHECK-LABEL: test_fminf128:
+; CHECK-AARCH64: b llvm.fmin.f128
+; CHECK-RISCV32: call llvm.fmin.f128 at plt
+; CHECK-S390X: brasl {{%.*}} llvm.fmin.f128 at PLT
+; CHECK-X64: jmp llvm.fmin.f128 at PLT # TAILCALL
+; CHECK-X86: calll llvm.fmin.f128 at PLT
+start:
+ %0 = tail call fp128 @llvm.fmin.f128(fp128 %a, fp128 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.fmin.f128(fp128, fp128)
+
+
+define fp128 @test_fmodf128(fp128 %a, fp128 %b) {
+; CHECK-LABEL: test_fmodf128:
+; CHECK-AARCH64: b llvm.fmod.f128
+; CHECK-RISCV32: call llvm.fmod.f128 at plt
+; CHECK-S390X: brasl {{%.*}} llvm.fmod.f128 at PLT
+; CHECK-X64: jmp llvm.fmod.f128 at PLT # TAILCALL
+; CHECK-X86: calll llvm.fmod.f128 at PLT
+start:
+ %0 = tail call fp128 @llvm.fmod.f128(fp128 %a, fp128 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.fmod.f128(fp128, fp128)
+
+
+define { fp128, i32 } @test_frexpf128(fp128 %a) {
+; CHECK-LABEL: test_frexpf128:
+; CHECK-AARCH64: bl frexpl
+; CHECK-RISCV32: call frexpl at plt
+; CHECK-S390X: brasl {{%.*}} frexpl at PLT
+; CHECK-X64: callq frexpl at PLT
+; CHECK-X86: calll frexpl
+start:
+ %0 = tail call { fp128, i32 } @llvm.frexp.f128(fp128 %a)
+ ret { fp128, i32 } %0
+}
+
+declare { fp128, i32 } @llvm.frexp.f128(fp128)
+
+
+define fp128 @test_ldexpf128(fp128 %a, i32 %b) {
+; CHECK-LABEL: test_ldexpf128:
+; CHECK-AARCH64: b ldexpl
+; CHECK-RISCV32: call ldexpl at plt
+; CHECK-S390X: brasl {{%.*}} ldexpl at PLT
+; CHECK-X64: jmp ldexpl at PLT
+; CHECK-X86: calll ldexpl
+start:
+ %0 = tail call fp128 @llvm.ldexp.f128(fp128 %a, i32 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.ldexp.f128(fp128, i32)
+
+
+define i64 @test_llrintf128(fp128 %a) {
+; CHECK-LABEL: test_llrintf128:
+; CHECK-AARCH64: b llrintl
+; CHECK-RISCV32: call llrintl at plt
+; CHECK-S390X: brasl {{%.*}} llrintl at PLT
+; CHECK-X64: jmp llrintl at PLT
+; CHECK-X86: calll llrintl
+start:
+ %0 = tail call i64 @llvm.llrint.f128(fp128 %a)
+ ret i64 %0
+}
+
+declare i64 @llvm.llrint.f128(fp128)
+
+
+define i64 @test_llroundf128(fp128 %a) {
+; CHECK-LABEL: test_llroundf128:
+; CHECK-AARCH64: b llroundl
+; CHECK-RISCV32: call llroundl at plt
+; CHECK-S390X: brasl {{%.*}} llroundl at PLT
+; CHECK-X64: jmp llroundl at PLT
+; CHECK-X86: calll llroundl
+start:
+ %0 = tail call i64 @llvm.llround.i64.f128(fp128 %a)
+ ret i64 %0
+}
+
+declare i64 @llvm.llround.i64.f128(fp128)
+
+
+define fp128 @test_log10f128(fp128 %a) {
+; CHECK-LABEL: test_log10f128:
+; CHECK-AARCH64: b log10l
+; CHECK-RISCV32: call log10l at plt
+; CHECK-S390X: brasl {{%.*}} log10l at PLT
+; CHECK-X64: jmp log10l at PLT
+; CHECK-X86: calll log10l
+start:
+ %0 = tail call fp128 @llvm.log10.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.log10.f128(fp128)
+
+
+define fp128 @test___log10f128_finite(fp128 %a) {
+; CHECK-LABEL: test___log10f128_finite:
+; CHECK-AARCH64: b llvm.__log10f128_finite.f128
+; CHECK-RISCV32: call llvm.__log10f128_finite.f128 at plt
+; CHECK-S390X: brasl {{%.*}} llvm.__log10f128_finite.f128 at PLT
+; CHECK-X64: jmp llvm.__log10f128_finite.f128 at PLT # TAILCALL
+; CHECK-X86: calll llvm.__log10f128_finite.f128 at PLT
+start:
+ %0 = tail call fp128 @llvm.__log10f128_finite.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.__log10f128_finite.f128(fp128)
+
+
+define fp128 @test_log2f128(fp128 %a) {
+; CHECK-LABEL: test_log2f128:
+; CHECK-AARCH64: b log2l
+; CHECK-RISCV32: call log2l at plt
+; CHECK-S390X: brasl {{%.*}} log2l at PLT
+; CHECK-X64: jmp log2l at PLT
+; CHECK-X86: calll log2l
+start:
+ %0 = tail call fp128 @llvm.log2.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.log2.f128(fp128)
+
+
+define fp128 @test___log2f128_finite(fp128 %a) {
+; CHECK-LABEL: test___log2f128_finite:
+; CHECK-AARCH64: b llvm.__log2f128_finite.f128
+; CHECK-RISCV32: call llvm.__log2f128_finite.f128 at plt
+; CHECK-S390X: brasl {{%.*}} llvm.__log2f128_finite.f128 at PLT
+; CHECK-X64: jmp llvm.__log2f128_finite.f128 at PLT # TAILCALL
+; CHECK-X86: calll llvm.__log2f128_finite.f128 at PLT
+start:
+ %0 = tail call fp128 @llvm.__log2f128_finite.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.__log2f128_finite.f128(fp128)
+
+
+define fp128 @test_logf128(fp128 %a) {
+; CHECK-LABEL: test_logf128:
+; CHECK-AARCH64: b logl
+; CHECK-RISCV32: call logl at plt
+; CHECK-S390X: brasl {{%.*}} logl at PLT
+; CHECK-X64: jmp logl at PLT
+; CHECK-X86: calll logl
+start:
+ %0 = tail call fp128 @llvm.log.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.log.f128(fp128)
+
+
+define fp128 @test___logf128_finite(fp128 %a) {
+; CHECK-LABEL: test___logf128_finite:
+; CHECK-AARCH64: b llvm.__logf128_finite.f128
+; CHECK-RISCV32: call llvm.__logf128_finite.f128 at plt
+; CHECK-S390X: brasl {{%.*}} llvm.__logf128_finite.f128 at PLT
+; CHECK-X64: jmp llvm.__logf128_finite.f128 at PLT # TAILCALL
+; CHECK-X86: calll llvm.__logf128_finite.f128 at PLT
+start:
+ %0 = tail call fp128 @llvm.__logf128_finite.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.__logf128_finite.f128(fp128)
+
+
+define i64 @test_lrintf128(fp128 %a) {
+; CHECK-LABEL: test_lrintf128:
+; CHECK-AARCH64: b lrintl
+; CHECK-RISCV32: call lrintl at plt
+; CHECK-S390X: brasl {{%.*}} lrintl at PLT
+; CHECK-X64: jmp lrintl at PLT
+; CHECK-X86: calll lrintl
+start:
+ %0 = tail call i64 @llvm.lrint.f128(fp128 %a)
+ ret i64 %0
+}
+
+declare i64 @llvm.lrint.f128(fp128)
+
+
+define i64 @test_lroundf128(fp128 %a) {
+; CHECK-LABEL: test_lroundf128:
+; CHECK-AARCH64: b lroundl
+; CHECK-RISCV32: call lroundl at plt
+; CHECK-S390X: brasl {{%.*}} lroundl at PLT
+; CHECK-X64: jmp lroundl at PLT
+; CHECK-X86: calll lroundl
+start:
+ %0 = tail call i64 @llvm.lround.i64.f128(fp128 %a)
+ ret i64 %0
+}
+
+declare i64 @llvm.lround.i64.f128(fp128)
+
+
+define fp128 @test_nearbyintf128(fp128 %a) {
+; CHECK-LABEL: test_nearbyintf128:
+; CHECK-AARCH64: b nearbyintl
+; CHECK-RISCV32: call nearbyintl at plt
+; CHECK-S390X: brasl {{%.*}} nearbyintl at PLT
+; CHECK-X64: jmp nearbyintl at PLT
+; CHECK-X86: calll nearbyintl
+start:
+ %0 = tail call fp128 @llvm.nearbyint.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.nearbyint.f128(fp128)
+
+
+define fp128 @test_powf128(fp128 %a, fp128 %b) {
+; CHECK-LABEL: test_powf128:
+; CHECK-AARCH64: b powl
+; CHECK-RISCV32: call powl at plt
+; CHECK-S390X: brasl {{%.*}} powl at PLT
+; CHECK-X64: jmp powl at PLT
+; CHECK-X86: calll powl
+start:
+ %0 = tail call fp128 @llvm.pow.f128(fp128 %a, fp128 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.pow.f128(fp128, fp128)
+
+
+define fp128 @test___powf128_finite(fp128 %a, fp128 %b) {
+; CHECK-LABEL: test___powf128_finite:
+; CHECK-AARCH64: b llvm.__powf128_finite.f128
+; CHECK-RISCV32: call llvm.__powf128_finite.f128 at plt
+; CHECK-S390X: brasl {{%.*}} llvm.__powf128_finite.f128 at PLT
+; CHECK-X64: jmp llvm.__powf128_finite.f128 at PLT # TAILCALL
+; CHECK-X86: calll llvm.__powf128_finite.f128 at PLT
+start:
+ %0 = tail call fp128 @llvm.__powf128_finite.f128(fp128 %a, fp128 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.__powf128_finite.f128(fp128, fp128)
+
+
+define fp128 @test_rintf128(fp128 %a) {
+; CHECK-LABEL: test_rintf128:
+; CHECK-AARCH64: b rintl
+; CHECK-RISCV32: call rintl at plt
+;
+; CHECK-S390X-LABEL: test_rintf128:
+; CHECK-S390X: # %bb.0: # %start
+; CHECK-S390X-NEXT: ld %f0, 0(%r3)
+; CHECK-S390X-NEXT: ld %f2, 8(%r3)
+; CHECK-S390X-NEXT: fixbr %f0, 0, %f0
+; CHECK-S390X-NEXT: std %f0, 0(%r2)
+; CHECK-S390X-NEXT: std %f2, 8(%r2)
+; CHECK-S390X-NEXT: br %r14
+;
+; CHECK-X64: jmp rintl at PLT
+; CHECK-X86: calll rintl
+start:
+ %0 = tail call fp128 @llvm.rint.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.rint.f128(fp128)
+
+
+define fp128 @test_roundevenf128(fp128 %a) {
+; CHECK-LABEL: test_roundevenf128:
+; CHECK-AARCH64: b roundevenl
+; CHECK-RISCV32: call roundevenl at plt
+; CHECK-S390X: brasl {{%.*}} roundevenl at PLT
+; CHECK-X64: jmp roundevenl at PLT
+; CHECK-X86: calll roundevenl
+start:
+ %0 = tail call fp128 @llvm.roundeven.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.roundeven.f128(fp128)
+
+
+define fp128 @test_roundf128(fp128 %a) {
+; CHECK-LABEL: test_roundf128:
+; CHECK-AARCH64: b roundl
+; CHECK-RISCV32: call roundl at plt
+; CHECK-S390X: brasl {{%.*}} roundl at PLT
+; CHECK-X64: jmp roundl at PLT
+; CHECK-X86: calll roundl
+start:
+ %0 = tail call fp128 @llvm.round.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.round.f128(fp128)
+
+
+define fp128 @test_sinf128(fp128 %a) {
+; CHECK-LABEL: test_sinf128:
+; CHECK-AARCH64: b sinl
+; CHECK-RISCV32: call sinl at plt
+; CHECK-S390X: brasl {{%.*}} sinl at PLT
+; CHECK-X64: jmp sinl at PLT
+; CHECK-X86: calll sinl
+start:
+ %0 = tail call fp128 @llvm.sin.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.sin.f128(fp128)
+
+
+define fp128 @test_sqrtf128(fp128 %a) {
+; CHECK-LABEL: test_sqrtf128:
+; CHECK-AARCH64: b sqrtl
+; CHECK-RISCV32: call sqrtl at plt
+; CHECK-S390X: sqxbr {{%.*}} {{%.*}}
+; CHECK-X64: jmp sqrtl at PLT
+; CHECK-X86: calll sqrtl
+start:
+ %0 = tail call fp128 @llvm.sqrt.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.sqrt.f128(fp128)
+
+
+define fp128 @test_truncf128(fp128 %a) {
+; CHECK-LABEL: test_truncf128:
+; CHECK-AARCH64: b truncl
+; CHECK-RISCV32: call truncl at plt
+; CHECK-S390X: brasl {{%.*}} truncl at PLT
+; CHECK-X64: jmp truncl at PLT
+; CHECK-X86: calll truncl
+start:
+ %0 = tail call fp128 @llvm.trunc.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.trunc.f128(fp128)
>From f9bd6dc4c52fca838a2b4bd46fc161b17d99eb92 Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Thu, 28 Dec 2023 03:50:05 -0500
Subject: [PATCH 2/4] [llvm][clang] Move `long double` layout logic from clang
to LLVM
Information about the size and alignment of `long double` is currently part of
clang. Move this logic to LLVM so it can be used to control lowering of
intrinsics.
---
clang/include/clang/Basic/TargetInfo.h | 6 ++
clang/lib/Basic/TargetInfo.cpp | 10 +-
clang/lib/Basic/Targets/AArch64.cpp | 8 +-
clang/lib/Basic/Targets/ARC.h | 2 +-
clang/lib/Basic/Targets/AVR.h | 3 -
clang/lib/Basic/Targets/CSKY.h | 2 +-
clang/lib/Basic/Targets/LoongArch.h | 3 -
clang/lib/Basic/Targets/MSP430.h | 4 +-
clang/lib/Basic/Targets/Mips.h | 8 --
clang/lib/Basic/Targets/RISCV.h | 3 -
clang/lib/Basic/Targets/Sparc.h | 6 +-
clang/lib/Basic/Targets/SystemZ.h | 3 -
clang/lib/Basic/Targets/TCE.h | 3 -
clang/lib/Basic/Targets/VE.h | 3 -
clang/lib/Basic/Targets/WebAssembly.h | 2 -
clang/lib/Basic/Targets/X86.h | 11 +--
clang/lib/Basic/Targets/XCore.h | 2 +-
llvm/include/llvm/TargetParser/Triple.h | 18 ++++
llvm/lib/TargetParser/Triple.cpp | 118 ++++++++++++++++++++++++
19 files changed, 158 insertions(+), 57 deletions(-)
diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index ac3c324c6c29c4..f6e6d24acaed1b 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -1218,6 +1218,12 @@ class TargetInfo : public TransferrableTargetInfo,
return Triple;
}
+ /// Returns information about C layouts that come from LLVM. This is used
+ /// to control lowering to libcalls.
+ llvm::Triple::CLayouts getTripleLayouts() const {
+ return Triple.getCLayouts();
+ }
+
/// Returns the target ID if supported.
virtual std::optional<std::string> getTargetID() const {
return std::nullopt;
diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp
index 96b3ad9ba2f273..2cf4cacf490cf5 100644
--- a/clang/lib/Basic/TargetInfo.cpp
+++ b/clang/lib/Basic/TargetInfo.cpp
@@ -54,6 +54,10 @@ static const LangASMap FakeAddrSpaceMap = {
TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
// Set defaults. Defaults are set for a 32-bit RISC platform, like PPC or
// SPARC. These should be overridden by concrete targets as needed.
+
+ // Load some defaults from LLVM
+ llvm::Triple::CLayouts TripleLayouts = getTripleLayouts();
+
BigEndian = !T.isLittleEndian();
TLSSupported = true;
VLASupported = true;
@@ -113,8 +117,8 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
FloatAlign = 32;
DoubleWidth = 64;
DoubleAlign = 64;
- LongDoubleWidth = 64;
- LongDoubleAlign = 64;
+ LongDoubleWidth = TripleLayouts.LongDoubleWidth;
+ LongDoubleAlign = TripleLayouts.LongDoubleAlign;
Float128Align = 128;
Ibm128Align = 128;
LargeArrayMinWidth = 0;
@@ -144,7 +148,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
HalfFormat = &llvm::APFloat::IEEEhalf();
FloatFormat = &llvm::APFloat::IEEEsingle();
DoubleFormat = &llvm::APFloat::IEEEdouble();
- LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ LongDoubleFormat = TripleLayouts.LongDoubleFormat;
Float128Format = &llvm::APFloat::IEEEquad();
Ibm128Format = &llvm::APFloat::PPCDoubleDouble();
MCountName = "mcount";
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index 2f8395cb8932f2..c3ed06ca550b61 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -156,8 +156,7 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
MaxAtomicInlineWidth = 128;
MaxAtomicPromoteWidth = 128;
- LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
- LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ SuitableAlign = LongDoubleAlign;
BFloat16Width = BFloat16Align = 16;
BFloat16Format = &llvm::APFloat::BFloat();
@@ -1452,8 +1451,6 @@ WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
IntWidth = IntAlign = 32;
LongWidth = LongAlign = 32;
DoubleAlign = LongLongAlign = 64;
- LongDoubleWidth = LongDoubleAlign = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble();
IntMaxType = SignedLongLong;
Int64Type = SignedLongLong;
SizeType = UnsignedLongLong;
@@ -1550,8 +1547,7 @@ DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
WCharType = SignedInt;
UseSignedCharForObjCBool = false;
- LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ SuitableAlign = LongDoubleAlign;
UseZeroLengthBitfieldAlignment = false;
diff --git a/clang/lib/Basic/Targets/ARC.h b/clang/lib/Basic/Targets/ARC.h
index fcbfdd6eec5862..b9da57e6e16448 100644
--- a/clang/lib/Basic/Targets/ARC.h
+++ b/clang/lib/Basic/Targets/ARC.h
@@ -28,7 +28,7 @@ class LLVM_LIBRARY_VISIBILITY ARCTargetInfo : public TargetInfo {
NoAsmVariants = true;
LongLongAlign = 32;
SuitableAlign = 32;
- DoubleAlign = LongDoubleAlign = 32;
+ DoubleAlign = 32;
SizeType = UnsignedInt;
PtrDiffType = SignedInt;
IntPtrType = SignedInt;
diff --git a/clang/lib/Basic/Targets/AVR.h b/clang/lib/Basic/Targets/AVR.h
index 854a51d78c393b..38847b057991f3 100644
--- a/clang/lib/Basic/Targets/AVR.h
+++ b/clang/lib/Basic/Targets/AVR.h
@@ -44,9 +44,6 @@ class LLVM_LIBRARY_VISIBILITY AVRTargetInfo : public TargetInfo {
DoubleWidth = 32;
DoubleAlign = 8;
DoubleFormat = &llvm::APFloat::IEEEsingle();
- LongDoubleWidth = 32;
- LongDoubleAlign = 8;
- LongDoubleFormat = &llvm::APFloat::IEEEsingle();
SizeType = UnsignedInt;
PtrDiffType = SignedInt;
IntPtrType = SignedInt;
diff --git a/clang/lib/Basic/Targets/CSKY.h b/clang/lib/Basic/Targets/CSKY.h
index 11404e37db368a..c600c0fe02152c 100644
--- a/clang/lib/Basic/Targets/CSKY.h
+++ b/clang/lib/Basic/Targets/CSKY.h
@@ -43,7 +43,7 @@ class LLVM_LIBRARY_VISIBILITY CSKYTargetInfo : public TargetInfo {
NoAsmVariants = true;
LongLongAlign = 32;
SuitableAlign = 32;
- DoubleAlign = LongDoubleAlign = 32;
+ DoubleAlign = 32;
SizeType = UnsignedInt;
PtrDiffType = SignedInt;
IntPtrType = SignedInt;
diff --git a/clang/lib/Basic/Targets/LoongArch.h b/clang/lib/Basic/Targets/LoongArch.h
index 3313102492cb8d..894d7ce9fa7781 100644
--- a/clang/lib/Basic/Targets/LoongArch.h
+++ b/clang/lib/Basic/Targets/LoongArch.h
@@ -37,9 +37,6 @@ class LLVM_LIBRARY_VISIBILITY LoongArchTargetInfo : public TargetInfo {
HasFeatureF = false;
HasFeatureLSX = false;
HasFeatureLASX = false;
- LongDoubleWidth = 128;
- LongDoubleAlign = 128;
- LongDoubleFormat = &llvm::APFloat::IEEEquad();
MCountName = "_mcount";
SuitableAlign = 128;
WCharType = SignedInt;
diff --git a/clang/lib/Basic/Targets/MSP430.h b/clang/lib/Basic/Targets/MSP430.h
index 25639b8c1e0ad9..d692398a80aee8 100644
--- a/clang/lib/Basic/Targets/MSP430.h
+++ b/clang/lib/Basic/Targets/MSP430.h
@@ -35,8 +35,8 @@ class LLVM_LIBRARY_VISIBILITY MSP430TargetInfo : public TargetInfo {
LongAlign = LongLongAlign = 16;
FloatWidth = 32;
FloatAlign = 16;
- DoubleWidth = LongDoubleWidth = 64;
- DoubleAlign = LongDoubleAlign = 16;
+ DoubleWidth = 64;
+ DoubleAlign = 16;
PointerWidth = 16;
PointerAlign = 16;
SuitableAlign = 16;
diff --git a/clang/lib/Basic/Targets/Mips.h b/clang/lib/Basic/Targets/Mips.h
index f46b95abfd75c7..3387adfd51ddae 100644
--- a/clang/lib/Basic/Targets/Mips.h
+++ b/clang/lib/Basic/Targets/Mips.h
@@ -118,8 +118,6 @@ class LLVM_LIBRARY_VISIBILITY MipsTargetInfo : public TargetInfo {
void setO32ABITypes() {
Int64Type = SignedLongLong;
IntMaxType = Int64Type;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble();
- LongDoubleWidth = LongDoubleAlign = 64;
LongWidth = LongAlign = 32;
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
PointerWidth = PointerAlign = 32;
@@ -129,12 +127,6 @@ class LLVM_LIBRARY_VISIBILITY MipsTargetInfo : public TargetInfo {
}
void setN32N64ABITypes() {
- LongDoubleWidth = LongDoubleAlign = 128;
- LongDoubleFormat = &llvm::APFloat::IEEEquad();
- if (getTriple().isOSFreeBSD()) {
- LongDoubleWidth = LongDoubleAlign = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble();
- }
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
SuitableAlign = 128;
}
diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h
index f98c88cd45f831..53a35c1cb75b99 100644
--- a/clang/lib/Basic/Targets/RISCV.h
+++ b/clang/lib/Basic/Targets/RISCV.h
@@ -39,9 +39,6 @@ class RISCVTargetInfo : public TargetInfo {
BFloat16Width = 16;
BFloat16Align = 16;
BFloat16Format = &llvm::APFloat::BFloat();
- LongDoubleWidth = 128;
- LongDoubleAlign = 128;
- LongDoubleFormat = &llvm::APFloat::IEEEquad();
SuitableAlign = 128;
WCharType = SignedInt;
WIntType = UnsignedInt;
diff --git a/clang/lib/Basic/Targets/Sparc.h b/clang/lib/Basic/Targets/Sparc.h
index 214fef88e1dcd4..a226932421ac50 100644
--- a/clang/lib/Basic/Targets/Sparc.h
+++ b/clang/lib/Basic/Targets/Sparc.h
@@ -205,12 +205,8 @@ class LLVM_LIBRARY_VISIBILITY SparcV9TargetInfo : public SparcTargetInfo {
IntMaxType = SignedLong;
Int64Type = IntMaxType;
- // The SPARCv8 System V ABI has long double 128-bits in size, but 64-bit
- // aligned. The SPARCv9 SCD 2.4.1 says 16-byte aligned.
- LongDoubleWidth = 128;
- LongDoubleAlign = 128;
SuitableAlign = 128;
- LongDoubleFormat = &llvm::APFloat::IEEEquad();
+
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
}
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index e4ec338880f210..e244fd0110ab67 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -40,9 +40,6 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
LongWidth = LongLongWidth = LongAlign = LongLongAlign = 64;
Int128Align = 64;
PointerWidth = PointerAlign = 64;
- LongDoubleWidth = 128;
- LongDoubleAlign = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEquad();
DefaultAlignForAttributeAligned = 64;
MinGlobalAlign = 16;
if (Triple.isOSzOS()) {
diff --git a/clang/lib/Basic/Targets/TCE.h b/clang/lib/Basic/Targets/TCE.h
index dcf684fe6dbc01..71ec11a7724f5d 100644
--- a/clang/lib/Basic/Targets/TCE.h
+++ b/clang/lib/Basic/Targets/TCE.h
@@ -76,11 +76,8 @@ class LLVM_LIBRARY_VISIBILITY TCETargetInfo : public TargetInfo {
FloatAlign = 32;
DoubleWidth = 32;
DoubleAlign = 32;
- LongDoubleWidth = 32;
- LongDoubleAlign = 32;
FloatFormat = &llvm::APFloat::IEEEsingle();
DoubleFormat = &llvm::APFloat::IEEEsingle();
- LongDoubleFormat = &llvm::APFloat::IEEEsingle();
resetDataLayout("E-p:32:32:32-i1:8:8-i8:8:32-"
"i16:16:32-i32:32:32-i64:32:32-"
"f32:32:32-f64:32:32-v64:32:32-"
diff --git a/clang/lib/Basic/Targets/VE.h b/clang/lib/Basic/Targets/VE.h
index ea9a092cad8090..09ca50b9dea0d1 100644
--- a/clang/lib/Basic/Targets/VE.h
+++ b/clang/lib/Basic/Targets/VE.h
@@ -27,9 +27,6 @@ class LLVM_LIBRARY_VISIBILITY VETargetInfo : public TargetInfo {
VETargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple) {
NoAsmVariants = true;
- LongDoubleWidth = 128;
- LongDoubleAlign = 128;
- LongDoubleFormat = &llvm::APFloat::IEEEquad();
DoubleAlign = LongLongAlign = 64;
SuitableAlign = 64;
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
diff --git a/clang/lib/Basic/Targets/WebAssembly.h b/clang/lib/Basic/Targets/WebAssembly.h
index 83b1711f9fdf6a..916692786722cd 100644
--- a/clang/lib/Basic/Targets/WebAssembly.h
+++ b/clang/lib/Basic/Targets/WebAssembly.h
@@ -76,8 +76,6 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
LargeArrayMinWidth = 128;
LargeArrayAlign = 128;
SigAtomicType = SignedLong;
- LongDoubleWidth = LongDoubleAlign = 128;
- LongDoubleFormat = &llvm::APFloat::IEEEquad();
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
// size_t being unsigned long for both wasm32 and wasm64 makes mangled names
// more consistent between the two.
diff --git a/clang/lib/Basic/Targets/X86.h b/clang/lib/Basic/Targets/X86.h
index 0ab1c10833db26..f944726587beda 100644
--- a/clang/lib/Basic/Targets/X86.h
+++ b/clang/lib/Basic/Targets/X86.h
@@ -185,7 +185,6 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
: TargetInfo(Triple) {
BFloat16Width = BFloat16Align = 16;
BFloat16Format = &llvm::APFloat::BFloat();
- LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
AddrSpaceMap = &X86AddrSpaceMap;
HasStrictFP = true;
@@ -437,8 +436,6 @@ class LLVM_LIBRARY_VISIBILITY X86_32TargetInfo : public X86TargetInfo {
X86_32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: X86TargetInfo(Triple, Opts) {
DoubleAlign = LongLongAlign = 32;
- LongDoubleWidth = 96;
- LongDoubleAlign = 32;
SuitableAlign = 128;
resetDataLayout(Triple.isOSBinFormatMachO()
? "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:"
@@ -540,8 +537,6 @@ class LLVM_LIBRARY_VISIBILITY DarwinI386TargetInfo
public:
DarwinI386TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: DarwinTargetInfo<X86_32TargetInfo>(Triple, Opts) {
- LongDoubleWidth = 128;
- LongDoubleAlign = 128;
SuitableAlign = 128;
MaxVectorAlign = 256;
// The watchOS simulator uses the builtin bool type for Objective-C.
@@ -987,8 +982,6 @@ class LLVM_LIBRARY_VISIBILITY OHOSX86_32TargetInfo
OHOSX86_32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OHOSTargetInfo<X86_32TargetInfo>(Triple, Opts) {
SuitableAlign = 32;
- LongDoubleWidth = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble();
}
};
@@ -997,9 +990,7 @@ class LLVM_LIBRARY_VISIBILITY OHOSX86_64TargetInfo
: public OHOSTargetInfo<X86_64TargetInfo> {
public:
OHOSX86_64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : OHOSTargetInfo<X86_64TargetInfo>(Triple, Opts) {
- LongDoubleFormat = &llvm::APFloat::IEEEquad();
- }
+ : OHOSTargetInfo<X86_64TargetInfo>(Triple, Opts) {}
};
} // namespace targets
} // namespace clang
diff --git a/clang/lib/Basic/Targets/XCore.h b/clang/lib/Basic/Targets/XCore.h
index a58d3e8acf4791..e96584d6c1db5a 100644
--- a/clang/lib/Basic/Targets/XCore.h
+++ b/clang/lib/Basic/Targets/XCore.h
@@ -29,7 +29,7 @@ class LLVM_LIBRARY_VISIBILITY XCoreTargetInfo : public TargetInfo {
NoAsmVariants = true;
LongLongAlign = 32;
SuitableAlign = 32;
- DoubleAlign = LongDoubleAlign = 32;
+ DoubleAlign = 32;
SizeType = UnsignedInt;
PtrDiffType = SignedInt;
IntPtrType = SignedInt;
diff --git a/llvm/include/llvm/TargetParser/Triple.h b/llvm/include/llvm/TargetParser/Triple.h
index 47904621c0967f..8bc9f03b789f84 100644
--- a/llvm/include/llvm/TargetParser/Triple.h
+++ b/llvm/include/llvm/TargetParser/Triple.h
@@ -9,6 +9,7 @@
#ifndef LLVM_TARGETPARSER_TRIPLE_H
#define LLVM_TARGETPARSER_TRIPLE_H
+#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/VersionTuple.h"
@@ -1155,6 +1156,23 @@ class Triple {
/// Returns a canonicalized OS version number for the specified OS.
static VersionTuple getCanonicalVersionForOS(OSType OSKind,
const VersionTuple &Version);
+
+ /// Layouts for C types that are relevant to libc calls generated by LLVM
+ struct CLayouts {
+ unsigned char LongDoubleWidth;
+ unsigned char LongDoubleAlign;
+ const fltSemantics *LongDoubleFormat;
+ };
+
+ /// Provide default layouts relevant to C. Frontends may override these
+ /// values.
+ CLayouts getCLayouts() const;
+
+ /// Return true if `long double` and `__float128` have the same layout.
+ bool isLongDoubleF128() const {
+ // TODO: do we also need to check alignment?
+ return getCLayouts().LongDoubleWidth == 128;
+ }
};
} // End llvm namespace
diff --git a/llvm/lib/TargetParser/Triple.cpp b/llvm/lib/TargetParser/Triple.cpp
index e93502187b5492..a2f3cb8db39e03 100644
--- a/llvm/lib/TargetParser/Triple.cpp
+++ b/llvm/lib/TargetParser/Triple.cpp
@@ -1897,6 +1897,124 @@ VersionTuple Triple::getCanonicalVersionForOS(OSType OSKind,
}
}
+Triple::CLayouts Triple::getCLayouts() const {
+ Triple::CLayouts Layouts;
+
+ // Default to a 32-bit RISC platform
+ Layouts.LongDoubleWidth = 64;
+ Layouts.LongDoubleAlign = 64;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+
+ enum ArchType arch = getArch();
+
+ if (arch == aarch64 || arch == aarch64_be || arch == aarch64_32) {
+ Layouts.LongDoubleWidth = 128;
+ Layouts.LongDoubleAlign = 128;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+
+ // TODO: verify this logic matches when WindowsARM64TargetInfo /
+ // DarwinAArch64TargetInfo is called
+ if (isOSWindows()) {
+ Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 64;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ } else if (isMacOSX()) {
+ // TODO: should this just be isMacOSX or check specifically for darwin?
+ Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 64;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ }
+ } else if (arch == avr) {
+ Layouts.LongDoubleWidth = 32;
+ Layouts.LongDoubleAlign = 8;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEsingle();
+ } else if (arch == arc) {
+ Layouts.LongDoubleAlign = 32;
+ } else if (arch == arm) {
+ // TODO: port the logic
+ } else if (arch == csky) {
+ Layouts.LongDoubleAlign = 32;
+ } else if (arch == loongarch32 || arch == loongarch64) {
+ Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 128;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ } else if (arch == mips || arch == mipsel || arch == mips64 ||
+ arch == mips64el) {
+ if (isMIPS32()) {
+ // o32
+ Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 64;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ } else {
+ // n32 & n64
+ Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 128;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ if (isOSFreeBSD()) {
+ Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 64;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ }
+ }
+ } else if (arch == msp430) {
+ Layouts.LongDoubleWidth = 64;
+ Layouts.LongDoubleAlign = 16;
+ } else if (arch == ppc || arch == ppcle || arch == ppc64 || arch == ppc64le) {
+ // TODO: figure out how to get features
+ } else if (arch == riscv32 || arch == riscv64) {
+ Layouts.LongDoubleWidth = 128;
+ Layouts.LongDoubleAlign = 128;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ } else if (arch == sparcv9) {
+ // The SPARCv8 System V ABI has long double 128-bits in size, but 64-bit
+ // aligned. The SPARCv9 SCD 2.4.1 says 16-byte aligned.
+ Layouts.LongDoubleWidth = 128;
+ Layouts.LongDoubleAlign = 128;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ } else if (arch == systemz) {
+ Layouts.LongDoubleWidth = 128;
+ Layouts.LongDoubleAlign = 64;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ } else if (arch == tce || arch == tcele) {
+ Layouts.LongDoubleWidth = 32;
+ Layouts.LongDoubleAlign = 32;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEsingle();
+ } else if (arch == ve) {
+ Layouts.LongDoubleWidth = 128;
+ Layouts.LongDoubleAlign = 128;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ } else if (arch == wasm32 || arch == wasm64) {
+ Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 128;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ } else if (arch == x86) {
+ Layouts.LongDoubleWidth = 96;
+ Layouts.LongDoubleAlign = 32;
+ Layouts.LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
+ if (isMacOSX()) {
+ Layouts.LongDoubleWidth = 128;
+ Layouts.LongDoubleAlign = 128;
+ } else if (isOSWindows()) {
+ // TODO: how to get difference between WindowsX86_32TargetInfo,
+ // MicrosoftX86_32TargetInfo, CygwinX86_64TargetInfo, and
+ // MinGWX86_32TargetInfo
+ Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 64;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ } else if (isOSIAMCU()) {
+ Layouts.LongDoubleWidth = 64;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ } else if (isOHOSFamily()) {
+ Layouts.LongDoubleWidth = 64;
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ }
+ } else if (arch == x86_64) {
+ Layouts.LongDoubleWidth = 128;
+ Layouts.LongDoubleAlign = 128;
+ Layouts.LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
+
+ if (isOHOSFamily()) {
+ Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ }
+ } else if (arch == xcore) {
+ Layouts.LongDoubleAlign = 32;
+ }
+
+ return Layouts;
+}
+
// HLSL triple environment orders are relied on in the front end
static_assert(Triple::Vertex - Triple::Pixel == 1,
"incorrect HLSL stage order");
>From ae4f2c4412a785c2691faddf93a34e6b289f75a7 Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Thu, 28 Dec 2023 04:01:22 -0500
Subject: [PATCH 3/4] [ir] Change `fp128` lowering to use `f128` functions by
default
Switch from emitting long double functions to using `f128`-specific functions.
Fixes https://github.com/llvm/llvm-project/issues/44744.
---
llvm/include/llvm/IR/RuntimeLibcalls.def | 68 +--
llvm/test/CodeGen/X86/f128-arith.ll | 515 +++++++++++++++++++++++
2 files changed, 549 insertions(+), 34 deletions(-)
create mode 100644 llvm/test/CodeGen/X86/f128-arith.ll
diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.def b/llvm/include/llvm/IR/RuntimeLibcalls.def
index 19dea60bebf9be..7fce540612856a 100644
--- a/llvm/include/llvm/IR/RuntimeLibcalls.def
+++ b/llvm/include/llvm/IR/RuntimeLibcalls.def
@@ -110,12 +110,12 @@ HANDLE_LIBCALL(DIV_PPCF128, "__gcc_qdiv")
HANDLE_LIBCALL(REM_F32, "fmodf")
HANDLE_LIBCALL(REM_F64, "fmod")
HANDLE_LIBCALL(REM_F80, "fmodl")
-HANDLE_LIBCALL(REM_F128, "fmodl")
+HANDLE_LIBCALL(REM_F128, "fmodf128")
HANDLE_LIBCALL(REM_PPCF128, "fmodl")
HANDLE_LIBCALL(FMA_F32, "fmaf")
HANDLE_LIBCALL(FMA_F64, "fma")
HANDLE_LIBCALL(FMA_F80, "fmal")
-HANDLE_LIBCALL(FMA_F128, "fmal")
+HANDLE_LIBCALL(FMA_F128, "fmaf128")
HANDLE_LIBCALL(FMA_PPCF128, "fmal")
HANDLE_LIBCALL(POWI_F32, "__powisf2")
HANDLE_LIBCALL(POWI_F64, "__powidf2")
@@ -125,62 +125,62 @@ HANDLE_LIBCALL(POWI_PPCF128, "__powitf2")
HANDLE_LIBCALL(SQRT_F32, "sqrtf")
HANDLE_LIBCALL(SQRT_F64, "sqrt")
HANDLE_LIBCALL(SQRT_F80, "sqrtl")
-HANDLE_LIBCALL(SQRT_F128, "sqrtl")
+HANDLE_LIBCALL(SQRT_F128, "sqrtf128")
HANDLE_LIBCALL(SQRT_PPCF128, "sqrtl")
HANDLE_LIBCALL(CBRT_F32, "cbrtf")
HANDLE_LIBCALL(CBRT_F64, "cbrt")
HANDLE_LIBCALL(CBRT_F80, "cbrtl")
-HANDLE_LIBCALL(CBRT_F128, "cbrtl")
+HANDLE_LIBCALL(CBRT_F128, "cbrtf128")
HANDLE_LIBCALL(CBRT_PPCF128, "cbrtl")
HANDLE_LIBCALL(LOG_F32, "logf")
HANDLE_LIBCALL(LOG_F64, "log")
HANDLE_LIBCALL(LOG_F80, "logl")
-HANDLE_LIBCALL(LOG_F128, "logl")
+HANDLE_LIBCALL(LOG_F128, "logf128")
HANDLE_LIBCALL(LOG_PPCF128, "logl")
HANDLE_LIBCALL(LOG_FINITE_F32, "__logf_finite")
HANDLE_LIBCALL(LOG_FINITE_F64, "__log_finite")
HANDLE_LIBCALL(LOG_FINITE_F80, "__logl_finite")
-HANDLE_LIBCALL(LOG_FINITE_F128, "__logl_finite")
+HANDLE_LIBCALL(LOG_FINITE_F128, "__logf128_finite")
HANDLE_LIBCALL(LOG_FINITE_PPCF128, "__logl_finite")
HANDLE_LIBCALL(LOG2_F32, "log2f")
HANDLE_LIBCALL(LOG2_F64, "log2")
HANDLE_LIBCALL(LOG2_F80, "log2l")
-HANDLE_LIBCALL(LOG2_F128, "log2l")
+HANDLE_LIBCALL(LOG2_F128, "log2f128")
HANDLE_LIBCALL(LOG2_PPCF128, "log2l")
HANDLE_LIBCALL(LOG2_FINITE_F32, "__log2f_finite")
HANDLE_LIBCALL(LOG2_FINITE_F64, "__log2_finite")
HANDLE_LIBCALL(LOG2_FINITE_F80, "__log2l_finite")
-HANDLE_LIBCALL(LOG2_FINITE_F128, "__log2l_finite")
+HANDLE_LIBCALL(LOG2_FINITE_F128, "__log2f128_finite")
HANDLE_LIBCALL(LOG2_FINITE_PPCF128, "__log2l_finite")
HANDLE_LIBCALL(LOG10_F32, "log10f")
HANDLE_LIBCALL(LOG10_F64, "log10")
HANDLE_LIBCALL(LOG10_F80, "log10l")
-HANDLE_LIBCALL(LOG10_F128, "log10l")
+HANDLE_LIBCALL(LOG10_F128, "log10f128")
HANDLE_LIBCALL(LOG10_PPCF128, "log10l")
HANDLE_LIBCALL(LOG10_FINITE_F32, "__log10f_finite")
HANDLE_LIBCALL(LOG10_FINITE_F64, "__log10_finite")
HANDLE_LIBCALL(LOG10_FINITE_F80, "__log10l_finite")
-HANDLE_LIBCALL(LOG10_FINITE_F128, "__log10l_finite")
+HANDLE_LIBCALL(LOG10_FINITE_F128, "__log10f128_finite")
HANDLE_LIBCALL(LOG10_FINITE_PPCF128, "__log10l_finite")
HANDLE_LIBCALL(EXP_F32, "expf")
HANDLE_LIBCALL(EXP_F64, "exp")
HANDLE_LIBCALL(EXP_F80, "expl")
-HANDLE_LIBCALL(EXP_F128, "expl")
+HANDLE_LIBCALL(EXP_F128, "expf128")
HANDLE_LIBCALL(EXP_PPCF128, "expl")
HANDLE_LIBCALL(EXP_FINITE_F32, "__expf_finite")
HANDLE_LIBCALL(EXP_FINITE_F64, "__exp_finite")
HANDLE_LIBCALL(EXP_FINITE_F80, "__expl_finite")
-HANDLE_LIBCALL(EXP_FINITE_F128, "__expl_finite")
+HANDLE_LIBCALL(EXP_FINITE_F128, "__expf128_finite")
HANDLE_LIBCALL(EXP_FINITE_PPCF128, "__expl_finite")
HANDLE_LIBCALL(EXP2_F32, "exp2f")
HANDLE_LIBCALL(EXP2_F64, "exp2")
HANDLE_LIBCALL(EXP2_F80, "exp2l")
-HANDLE_LIBCALL(EXP2_F128, "exp2l")
+HANDLE_LIBCALL(EXP2_F128, "exp2f128")
HANDLE_LIBCALL(EXP2_PPCF128, "exp2l")
HANDLE_LIBCALL(EXP2_FINITE_F32, "__exp2f_finite")
HANDLE_LIBCALL(EXP2_FINITE_F64, "__exp2_finite")
HANDLE_LIBCALL(EXP2_FINITE_F80, "__exp2l_finite")
-HANDLE_LIBCALL(EXP2_FINITE_F128, "__exp2l_finite")
+HANDLE_LIBCALL(EXP2_FINITE_F128, "__exp2f128_finite")
HANDLE_LIBCALL(EXP2_FINITE_PPCF128, "__exp2l_finite")
HANDLE_LIBCALL(EXP10_F32, "exp10f")
HANDLE_LIBCALL(EXP10_F64, "exp10")
@@ -190,12 +190,12 @@ HANDLE_LIBCALL(EXP10_PPCF128, "exp10l")
HANDLE_LIBCALL(SIN_F32, "sinf")
HANDLE_LIBCALL(SIN_F64, "sin")
HANDLE_LIBCALL(SIN_F80, "sinl")
-HANDLE_LIBCALL(SIN_F128, "sinl")
+HANDLE_LIBCALL(SIN_F128, "sinf128")
HANDLE_LIBCALL(SIN_PPCF128, "sinl")
HANDLE_LIBCALL(COS_F32, "cosf")
HANDLE_LIBCALL(COS_F64, "cos")
HANDLE_LIBCALL(COS_F80, "cosl")
-HANDLE_LIBCALL(COS_F128, "cosl")
+HANDLE_LIBCALL(COS_F128, "cosf128")
HANDLE_LIBCALL(COS_PPCF128, "cosl")
HANDLE_LIBCALL(SINCOS_F32, nullptr)
HANDLE_LIBCALL(SINCOS_F64, nullptr)
@@ -207,92 +207,92 @@ HANDLE_LIBCALL(SINCOS_STRET_F64, nullptr)
HANDLE_LIBCALL(POW_F32, "powf")
HANDLE_LIBCALL(POW_F64, "pow")
HANDLE_LIBCALL(POW_F80, "powl")
-HANDLE_LIBCALL(POW_F128, "powl")
+HANDLE_LIBCALL(POW_F128, "powf128")
HANDLE_LIBCALL(POW_PPCF128, "powl")
HANDLE_LIBCALL(POW_FINITE_F32, "__powf_finite")
HANDLE_LIBCALL(POW_FINITE_F64, "__pow_finite")
HANDLE_LIBCALL(POW_FINITE_F80, "__powl_finite")
-HANDLE_LIBCALL(POW_FINITE_F128, "__powl_finite")
+HANDLE_LIBCALL(POW_FINITE_F128, "__powf128_finite")
HANDLE_LIBCALL(POW_FINITE_PPCF128, "__powl_finite")
HANDLE_LIBCALL(CEIL_F32, "ceilf")
HANDLE_LIBCALL(CEIL_F64, "ceil")
HANDLE_LIBCALL(CEIL_F80, "ceill")
-HANDLE_LIBCALL(CEIL_F128, "ceill")
+HANDLE_LIBCALL(CEIL_F128, "ceilf128")
HANDLE_LIBCALL(CEIL_PPCF128, "ceill")
HANDLE_LIBCALL(TRUNC_F32, "truncf")
HANDLE_LIBCALL(TRUNC_F64, "trunc")
HANDLE_LIBCALL(TRUNC_F80, "truncl")
-HANDLE_LIBCALL(TRUNC_F128, "truncl")
+HANDLE_LIBCALL(TRUNC_F128, "truncf128")
HANDLE_LIBCALL(TRUNC_PPCF128, "truncl")
HANDLE_LIBCALL(RINT_F32, "rintf")
HANDLE_LIBCALL(RINT_F64, "rint")
HANDLE_LIBCALL(RINT_F80, "rintl")
-HANDLE_LIBCALL(RINT_F128, "rintl")
+HANDLE_LIBCALL(RINT_F128, "rintf128")
HANDLE_LIBCALL(RINT_PPCF128, "rintl")
HANDLE_LIBCALL(NEARBYINT_F32, "nearbyintf")
HANDLE_LIBCALL(NEARBYINT_F64, "nearbyint")
HANDLE_LIBCALL(NEARBYINT_F80, "nearbyintl")
-HANDLE_LIBCALL(NEARBYINT_F128, "nearbyintl")
+HANDLE_LIBCALL(NEARBYINT_F128, "nearbyintf128")
HANDLE_LIBCALL(NEARBYINT_PPCF128, "nearbyintl")
HANDLE_LIBCALL(ROUND_F32, "roundf")
HANDLE_LIBCALL(ROUND_F64, "round")
HANDLE_LIBCALL(ROUND_F80, "roundl")
-HANDLE_LIBCALL(ROUND_F128, "roundl")
+HANDLE_LIBCALL(ROUND_F128, "roundf128")
HANDLE_LIBCALL(ROUND_PPCF128, "roundl")
HANDLE_LIBCALL(ROUNDEVEN_F32, "roundevenf")
HANDLE_LIBCALL(ROUNDEVEN_F64, "roundeven")
HANDLE_LIBCALL(ROUNDEVEN_F80, "roundevenl")
-HANDLE_LIBCALL(ROUNDEVEN_F128, "roundevenl")
+HANDLE_LIBCALL(ROUNDEVEN_F128, "roundevenf128")
HANDLE_LIBCALL(ROUNDEVEN_PPCF128, "roundevenl")
HANDLE_LIBCALL(FLOOR_F32, "floorf")
HANDLE_LIBCALL(FLOOR_F64, "floor")
HANDLE_LIBCALL(FLOOR_F80, "floorl")
-HANDLE_LIBCALL(FLOOR_F128, "floorl")
+HANDLE_LIBCALL(FLOOR_F128, "floorf128")
HANDLE_LIBCALL(FLOOR_PPCF128, "floorl")
HANDLE_LIBCALL(COPYSIGN_F32, "copysignf")
HANDLE_LIBCALL(COPYSIGN_F64, "copysign")
HANDLE_LIBCALL(COPYSIGN_F80, "copysignl")
-HANDLE_LIBCALL(COPYSIGN_F128, "copysignl")
+HANDLE_LIBCALL(COPYSIGN_F128, "copysignf128")
HANDLE_LIBCALL(COPYSIGN_PPCF128, "copysignl")
HANDLE_LIBCALL(FMIN_F32, "fminf")
HANDLE_LIBCALL(FMIN_F64, "fmin")
HANDLE_LIBCALL(FMIN_F80, "fminl")
-HANDLE_LIBCALL(FMIN_F128, "fminl")
+HANDLE_LIBCALL(FMIN_F128, "fminf128")
HANDLE_LIBCALL(FMIN_PPCF128, "fminl")
HANDLE_LIBCALL(FMAX_F32, "fmaxf")
HANDLE_LIBCALL(FMAX_F64, "fmax")
HANDLE_LIBCALL(FMAX_F80, "fmaxl")
-HANDLE_LIBCALL(FMAX_F128, "fmaxl")
+HANDLE_LIBCALL(FMAX_F128, "fmaxf128")
HANDLE_LIBCALL(FMAX_PPCF128, "fmaxl")
HANDLE_LIBCALL(LROUND_F32, "lroundf")
HANDLE_LIBCALL(LROUND_F64, "lround")
HANDLE_LIBCALL(LROUND_F80, "lroundl")
-HANDLE_LIBCALL(LROUND_F128, "lroundl")
+HANDLE_LIBCALL(LROUND_F128, "lroundf128")
HANDLE_LIBCALL(LROUND_PPCF128, "lroundl")
HANDLE_LIBCALL(LLROUND_F32, "llroundf")
HANDLE_LIBCALL(LLROUND_F64, "llround")
HANDLE_LIBCALL(LLROUND_F80, "llroundl")
-HANDLE_LIBCALL(LLROUND_F128, "llroundl")
+HANDLE_LIBCALL(LLROUND_F128, "llroundf128")
HANDLE_LIBCALL(LLROUND_PPCF128, "llroundl")
HANDLE_LIBCALL(LRINT_F32, "lrintf")
HANDLE_LIBCALL(LRINT_F64, "lrint")
HANDLE_LIBCALL(LRINT_F80, "lrintl")
-HANDLE_LIBCALL(LRINT_F128, "lrintl")
+HANDLE_LIBCALL(LRINT_F128, "lrintf128")
HANDLE_LIBCALL(LRINT_PPCF128, "lrintl")
HANDLE_LIBCALL(LLRINT_F32, "llrintf")
HANDLE_LIBCALL(LLRINT_F64, "llrint")
HANDLE_LIBCALL(LLRINT_F80, "llrintl")
-HANDLE_LIBCALL(LLRINT_F128, "llrintl")
+HANDLE_LIBCALL(LLRINT_F128, "llrintf128")
HANDLE_LIBCALL(LLRINT_PPCF128, "llrintl")
HANDLE_LIBCALL(LDEXP_F32, "ldexpf")
HANDLE_LIBCALL(LDEXP_F64, "ldexp")
HANDLE_LIBCALL(LDEXP_F80, "ldexpl")
-HANDLE_LIBCALL(LDEXP_F128, "ldexpl")
+HANDLE_LIBCALL(LDEXP_F128, "ldexpf128")
HANDLE_LIBCALL(LDEXP_PPCF128, "ldexpl")
HANDLE_LIBCALL(FREXP_F32, "frexpf")
HANDLE_LIBCALL(FREXP_F64, "frexp")
HANDLE_LIBCALL(FREXP_F80, "frexpl")
-HANDLE_LIBCALL(FREXP_F128, "frexpl")
+HANDLE_LIBCALL(FREXP_F128, "frexpf128")
HANDLE_LIBCALL(FREXP_PPCF128, "frexpl")
// Floating point environment
diff --git a/llvm/test/CodeGen/X86/f128-arith.ll b/llvm/test/CodeGen/X86/f128-arith.ll
new file mode 100644
index 00000000000000..a65d9039cb69b2
--- /dev/null
+++ b/llvm/test/CodeGen/X86/f128-arith.ll
@@ -0,0 +1,515 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-64
+;
+; Test lowering of fp128 intrinsics
+
+define fp128 @test_cbrtf128(fp128 %a) {
+; CHECK-32-LABEL: test_cbrtf128:
+; CHECK-32: calll llvm.cbrt.f128 at PLT
+;
+; CHECK-64-LABEL: test_cbrtf128:
+; CHECK-64: jmp llvm.cbrt.f128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.cbrt.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.cbrt.f128(fp128)
+
+
+define fp128 @test_ceilf128(fp128 %a) {
+; CHECK-32-LABEL: test_ceilf128:
+; CHECK-32: calll ceilf128
+;
+; CHECK-64-LABEL: test_ceilf128:
+; CHECK-64: jmp ceilf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.ceil.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.ceil.f128(fp128)
+
+
+define fp128 @test_copysignf128(fp128 %a, fp128 %b) {
+; CHECK-32-LABEL: test_copysignf128:
+; CHECK-32: # %bb.0: # %start
+; CHECK-32-NEXT: pushl %ebx
+; CHECK-32-NEXT: .cfi_def_cfa_offset 8
+; CHECK-32-NEXT: pushl %edi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 12
+; CHECK-32-NEXT: pushl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 16
+; CHECK-32-NEXT: .cfi_offset %esi, -16
+; CHECK-32-NEXT: .cfi_offset %edi, -12
+; CHECK-32-NEXT: .cfi_offset %ebx, -8
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-32-NEXT: movl $-2147483648, %edi # imm = 0x80000000
+; CHECK-32-NEXT: andl {{[0-9]+}}(%esp), %edi
+; CHECK-32-NEXT: movl $2147483647, %ebx # imm = 0x7FFFFFFF
+; CHECK-32-NEXT: andl {{[0-9]+}}(%esp), %ebx
+; CHECK-32-NEXT: orl %edi, %ebx
+; CHECK-32-NEXT: movl %ebx, 12(%eax)
+; CHECK-32-NEXT: movl %esi, 8(%eax)
+; CHECK-32-NEXT: movl %edx, 4(%eax)
+; CHECK-32-NEXT: movl %ecx, (%eax)
+; CHECK-32-NEXT: popl %esi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 12
+; CHECK-32-NEXT: popl %edi
+; CHECK-32-NEXT: .cfi_def_cfa_offset 8
+; CHECK-32-NEXT: popl %ebx
+; CHECK-32-NEXT: .cfi_def_cfa_offset 4
+; CHECK-32-NEXT: retl $4
+;
+; CHECK-64-LABEL: test_copysignf128:
+; CHECK-64: # %bb.0: # %start
+; CHECK-64-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-64-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-64-NEXT: orps %xmm1, %xmm0
+; CHECK-64-NEXT: retq
+; FIXME: calling long double rather than f128 function
+; FIXME: calling long double rather than f128 function
+start:
+ %0 = tail call fp128 @llvm.copysign.f128(fp128 %a, fp128 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.copysign.f128(fp128, fp128)
+
+
+define fp128 @test_cosf128(fp128 %a) {
+; CHECK-32-LABEL: test_cosf128:
+; CHECK-32: calll cosf128
+;
+; CHECK-64-LABEL: test_cosf128:
+; CHECK-64: jmp cosf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.cos.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.cos.f128(fp128)
+
+
+define fp128 @test_exp2f128(fp128 %a) {
+; CHECK-32-LABEL: test_exp2f128:
+; CHECK-32: calll exp2f128
+;
+; CHECK-64-LABEL: test_exp2f128:
+; CHECK-64: jmp exp2f128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.exp2.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.exp2.f128(fp128)
+
+
+define fp128 @test___exp2f128_finite(fp128 %a) {
+; CHECK-32-LABEL: test___exp2f128_finite:
+; CHECK-32: calll llvm.__exp2f128_finite.f128 at PLT
+;
+; CHECK-64-LABEL: test___exp2f128_finite:
+; CHECK-64: jmp llvm.__exp2f128_finite.f128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.__exp2f128_finite.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.__exp2f128_finite.f128(fp128)
+
+
+define fp128 @test_expf128(fp128 %a) {
+; CHECK-32-LABEL: test_expf128:
+; CHECK-32: calll expf128
+;
+; CHECK-64-LABEL: test_expf128:
+; CHECK-64: jmp expf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.exp.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.exp.f128(fp128)
+
+
+define fp128 @test___expf128_finite(fp128 %a) {
+; CHECK-32-LABEL: test___expf128_finite:
+; CHECK-32: calll llvm.__expf128_finite.f128 at PLT
+;
+; CHECK-64-LABEL: test___expf128_finite:
+; CHECK-64: jmp llvm.__expf128_finite.f128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.__expf128_finite.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.__expf128_finite.f128(fp128)
+
+
+define fp128 @test_floorf128(fp128 %a) {
+; CHECK-32-LABEL: test_floorf128:
+; CHECK-32: calll floorf128
+;
+; CHECK-64-LABEL: test_floorf128:
+; CHECK-64: jmp floorf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.floor.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.floor.f128(fp128)
+
+
+define fp128 @test_fmaf128(fp128 %a, fp128 %b, fp128 %c) {
+; CHECK-32-LABEL: test_fmaf128:
+; CHECK-32: calll fmaf128
+;
+; CHECK-64-LABEL: test_fmaf128:
+; CHECK-64: jmp fmaf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.fma.f128(fp128 %a, fp128 %b, fp128 %c)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.fma.f128(fp128, fp128, fp128)
+
+
+define fp128 @test_fmaxf128(fp128 %a, fp128 %b) {
+; CHECK-32-LABEL: test_fmaxf128:
+; CHECK-32: calll llvm.fmax.f128 at PLT
+;
+; CHECK-64-LABEL: test_fmaxf128:
+; CHECK-64: jmp llvm.fmax.f128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.fmax.f128(fp128 %a, fp128 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.fmax.f128(fp128, fp128)
+
+
+define fp128 @test_fminf128(fp128 %a, fp128 %b) {
+; CHECK-32-LABEL: test_fminf128:
+; CHECK-32: calll llvm.fmin.f128 at PLT
+;
+; CHECK-64-LABEL: test_fminf128:
+; CHECK-64: jmp llvm.fmin.f128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.fmin.f128(fp128 %a, fp128 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.fmin.f128(fp128, fp128)
+
+
+define fp128 @test_fmodf128(fp128 %a, fp128 %b) {
+; CHECK-32-LABEL: test_fmodf128:
+; CHECK-32: calll llvm.fmod.f128 at PLT
+;
+; CHECK-64-LABEL: test_fmodf128:
+; CHECK-64: jmp llvm.fmod.f128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.fmod.f128(fp128 %a, fp128 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.fmod.f128(fp128, fp128)
+
+
+define { fp128, i32 } @test_frexpf128(fp128 %a) {
+; CHECK-32-LABEL: test_frexpf128:
+; CHECK-32: calll frexpf128
+;
+; CHECK-64-LABEL: test_frexpf128:
+; CHECK-64: callq frexpf128 at PLT
+start:
+ %0 = tail call { fp128, i32 } @llvm.frexp.f128(fp128 %a)
+ ret { fp128, i32 } %0
+}
+
+declare { fp128, i32 } @llvm.frexp.f128(fp128)
+
+
+define fp128 @test_ldexpf128(fp128 %a, i32 %b) {
+; CHECK-32-LABEL: test_ldexpf128:
+; CHECK-32: calll ldexpf128
+;
+; CHECK-64-LABEL: test_ldexpf128:
+; CHECK-64: jmp ldexpf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.ldexp.f128(fp128 %a, i32 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.ldexp.f128(fp128, i32)
+
+
+define i64 @test_llrintf128(fp128 %a) {
+; CHECK-32-LABEL: test_llrintf128:
+; CHECK-32: calll llrintf128
+;
+; CHECK-64-LABEL: test_llrintf128:
+; CHECK-64: jmp llrintf128 at PLT # TAILCALL
+start:
+ %0 = tail call i64 @llvm.llrint.f128(fp128 %a)
+ ret i64 %0
+}
+
+declare i64 @llvm.llrint.f128(fp128)
+
+
+define i64 @test_llroundf128(fp128 %a) {
+; CHECK-32-LABEL: test_llroundf128:
+; CHECK-32: calll llroundf128
+;
+; CHECK-64-LABEL: test_llroundf128:
+; CHECK-64: jmp llroundf128 at PLT # TAILCALL
+start:
+ %0 = tail call i64 @llvm.llround.i64.f128(fp128 %a)
+ ret i64 %0
+}
+
+declare i64 @llvm.llround.i64.f128(fp128)
+
+
+define fp128 @test_log10f128(fp128 %a) {
+; CHECK-32-LABEL: test_log10f128:
+; CHECK-32: calll log10f128
+;
+; CHECK-64-LABEL: test_log10f128:
+; CHECK-64: jmp log10f128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.log10.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.log10.f128(fp128)
+
+
+define fp128 @test___log10f128_finite(fp128 %a) {
+; CHECK-32-LABEL: test___log10f128_finite:
+; CHECK-32: calll llvm.__log10f128_finite.f128 at PLT
+;
+; CHECK-64-LABEL: test___log10f128_finite:
+; CHECK-64: jmp llvm.__log10f128_finite.f128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.__log10f128_finite.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.__log10f128_finite.f128(fp128)
+
+
+define fp128 @test_log2f128(fp128 %a) {
+; CHECK-32-LABEL: test_log2f128:
+; CHECK-32: calll log2f128
+;
+; CHECK-64-LABEL: test_log2f128:
+; CHECK-64: jmp log2f128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.log2.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.log2.f128(fp128)
+
+
+define fp128 @test___log2f128_finite(fp128 %a) {
+; CHECK-32-LABEL: test___log2f128_finite:
+; CHECK-32: calll llvm.__log2f128_finite.f128 at PLT
+;
+; CHECK-64-LABEL: test___log2f128_finite:
+; CHECK-64: jmp llvm.__log2f128_finite.f128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.__log2f128_finite.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.__log2f128_finite.f128(fp128)
+
+
+define fp128 @test_logf128(fp128 %a) {
+; CHECK-32-LABEL: test_logf128:
+; CHECK-32: calll logf128
+;
+; CHECK-64-LABEL: test_logf128:
+; CHECK-64: jmp logf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.log.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.log.f128(fp128)
+
+
+define fp128 @test___logf128_finite(fp128 %a) {
+; CHECK-32-LABEL: test___logf128_finite:
+; CHECK-32: calll llvm.__logf128_finite.f128 at PLT
+;
+; CHECK-64-LABEL: test___logf128_finite:
+; CHECK-64: jmp llvm.__logf128_finite.f128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.__logf128_finite.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.__logf128_finite.f128(fp128)
+
+
+define i64 @test_lrintf128(fp128 %a) {
+; CHECK-32-LABEL: test_lrintf128:
+; CHECK-32: calll lrintf128
+;
+; CHECK-64-LABEL: test_lrintf128:
+; CHECK-64: jmp lrintf128 at PLT # TAILCALL
+start:
+ %0 = tail call i64 @llvm.lrint.f128(fp128 %a)
+ ret i64 %0
+}
+
+declare i64 @llvm.lrint.f128(fp128)
+
+
+define i64 @test_lroundf128(fp128 %a) {
+; CHECK-32-LABEL: test_lroundf128:
+; CHECK-32: calll lroundf128
+;
+; CHECK-64-LABEL: test_lroundf128:
+; CHECK-64: jmp lroundf128 at PLT # TAILCALL
+start:
+ %0 = tail call i64 @llvm.lround.i64.f128(fp128 %a)
+ ret i64 %0
+}
+
+declare i64 @llvm.lround.i64.f128(fp128)
+
+
+define fp128 @test_nearbyintf128(fp128 %a) {
+; CHECK-32-LABEL: test_nearbyintf128:
+; CHECK-32: calll nearbyintf128
+;
+; CHECK-64-LABEL: test_nearbyintf128:
+; CHECK-64: jmp nearbyintf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.nearbyint.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.nearbyint.f128(fp128)
+
+
+define fp128 @test_powf128(fp128 %a, fp128 %b) {
+; CHECK-32-LABEL: test_powf128:
+; CHECK-32: calll powf128
+;
+; CHECK-64-LABEL: test_powf128:
+; CHECK-64: jmp powf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.pow.f128(fp128 %a, fp128 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.pow.f128(fp128, fp128)
+
+
+define fp128 @test___powf128_finite(fp128 %a, fp128 %b) {
+; CHECK-32-LABEL: test___powf128_finite:
+; CHECK-32: calll llvm.__powf128_finite.f128 at PLT
+;
+; CHECK-64-LABEL: test___powf128_finite:
+; CHECK-64: jmp llvm.__powf128_finite.f128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.__powf128_finite.f128(fp128 %a, fp128 %b)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.__powf128_finite.f128(fp128, fp128)
+
+
+define fp128 @test_rintf128(fp128 %a) {
+; CHECK-32-LABEL: test_rintf128:
+; CHECK-32: calll rintf128
+;
+; CHECK-64-LABEL: test_rintf128:
+; CHECK-64: jmp rintf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.rint.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.rint.f128(fp128)
+
+
+define fp128 @test_roundevenf128(fp128 %a) {
+; CHECK-32-LABEL: test_roundevenf128:
+; CHECK-32: calll roundevenf128
+;
+; CHECK-64-LABEL: test_roundevenf128:
+; CHECK-64: jmp roundevenf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.roundeven.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.roundeven.f128(fp128)
+
+
+define fp128 @test_roundf128(fp128 %a) {
+; CHECK-32-LABEL: test_roundf128:
+; CHECK-32: calll roundf128
+;
+; CHECK-64-LABEL: test_roundf128:
+; CHECK-64: jmp roundf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.round.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.round.f128(fp128)
+
+
+define fp128 @test_sinf128(fp128 %a) {
+; CHECK-32-LABEL: test_sinf128:
+; CHECK-32: calll sinf128
+;
+; CHECK-64-LABEL: test_sinf128:
+; CHECK-64: jmp sinf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.sin.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.sin.f128(fp128)
+
+
+define fp128 @test_sqrtf128(fp128 %a) {
+; CHECK-32-LABEL: test_sqrtf128:
+; CHECK-32: calll sqrtf128
+;
+; CHECK-64-LABEL: test_sqrtf128:
+; CHECK-64: jmp sqrtf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.sqrt.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.sqrt.f128(fp128)
+
+
+define fp128 @test_truncf128(fp128 %a) {
+; CHECK-32-LABEL: test_truncf128:
+; CHECK-32: calll truncf128
+;
+; CHECK-64-LABEL: test_truncf128:
+; CHECK-64: jmp truncf128 at PLT # TAILCALL
+start:
+ %0 = tail call fp128 @llvm.trunc.f128(fp128 %a)
+ ret fp128 %0
+}
+
+declare fp128 @llvm.trunc.f128(fp128)
>From 88da550f9cf6e6fd2b95395eae0c6bd29c439d31 Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Fri, 29 Dec 2023 03:45:24 -0500
Subject: [PATCH 4/4] [ir] Use long double functions for `fp128` intrinsics
where possible
Functions such as `sinf128` are not always available. For targets where `long
double` is `f128`, use functions such as `sinfl` instead.
---
llvm/lib/CodeGen/TargetLoweringBase.cpp | 38 +++++++++++++++++++++++++
1 file changed, 38 insertions(+)
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 2648c16bcd8d90..d59affcde907bb 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -221,6 +221,44 @@ void TargetLoweringBase::InitLibcalls(const Triple &TT) {
setLibcallName(RTLIB::FREXP_F128, nullptr);
setLibcallName(RTLIB::FREXP_PPCF128, nullptr);
}
+
+ if (TT.isLongDoubleF128()) {
+ // Use the more available long double functions for fp128 if possible
+ setLibcallName(RTLIB::REM_F128, "fmodl");
+ setLibcallName(RTLIB::FMA_F128, "fmal");
+ setLibcallName(RTLIB::SQRT_F128, "sqrtl");
+ setLibcallName(RTLIB::CBRT_F128, "cbrtl");
+ setLibcallName(RTLIB::LOG_F128, "logl");
+ setLibcallName(RTLIB::LOG_FINITE_F128, "__logl_finite");
+ setLibcallName(RTLIB::LOG2_F128, "log2l");
+ setLibcallName(RTLIB::LOG2_FINITE_F128, "__log2l_finite");
+ setLibcallName(RTLIB::LOG10_F128, "log10l");
+ setLibcallName(RTLIB::LOG10_FINITE_F128, "__log10l_finite");
+ setLibcallName(RTLIB::EXP_F128, "expl");
+ setLibcallName(RTLIB::EXP_FINITE_F128, "__expl_finite");
+ setLibcallName(RTLIB::EXP2_F128, "exp2l");
+ setLibcallName(RTLIB::EXP2_FINITE_F128, "__exp2l_finite");
+ setLibcallName(RTLIB::SIN_F128, "sinl");
+ setLibcallName(RTLIB::COS_F128, "cosl");
+ setLibcallName(RTLIB::POW_F128, "powl");
+ setLibcallName(RTLIB::POW_FINITE_F128, "__powl_finite");
+ setLibcallName(RTLIB::CEIL_F128, "ceill");
+ setLibcallName(RTLIB::TRUNC_F128, "truncl");
+ setLibcallName(RTLIB::RINT_F128, "rintl");
+ setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintl");
+ setLibcallName(RTLIB::ROUND_F128, "roundl");
+ setLibcallName(RTLIB::ROUNDEVEN_F128, "roundevenl");
+ setLibcallName(RTLIB::FLOOR_F128, "floorl");
+ setLibcallName(RTLIB::COPYSIGN_F128, "copysignl");
+ setLibcallName(RTLIB::FMIN_F128, "fminl");
+ setLibcallName(RTLIB::FMAX_F128, "fmaxl");
+ setLibcallName(RTLIB::LROUND_F128, "lroundl");
+ setLibcallName(RTLIB::LLROUND_F128, "llroundl");
+ setLibcallName(RTLIB::LRINT_F128, "lrintl");
+ setLibcallName(RTLIB::LLRINT_F128, "llrintl");
+ setLibcallName(RTLIB::LDEXP_F128, "ldexpl");
+ setLibcallName(RTLIB::FREXP_F128, "frexpl");
+ }
}
/// GetFPLibCall - Helper to return the right libcall for the given floating
More information about the cfe-commits
mailing list