[llvm] [clang] [WIP] Correct lowering of `fp128` intrinsics (PR #76558)

Trevor Gross via cfe-commits cfe-commits at lists.llvm.org
Sat Jan 13 01:35:59 PST 2024


https://github.com/tgross35 updated https://github.com/llvm/llvm-project/pull/76558

>From 90a465d0a7e9744a4a8043152016e500927a0d95 Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Fri, 11 Aug 2023 22:16:01 -0400
Subject: [PATCH 1/4] [IR] Add an xpassing test for `f128` intrinsic lowering

`f128` intrinsic functions lower to incorrect libc calls. Add a test
showing current behavior.
---
 .../CodeGen/Generic/f128-math-lowering.ll     | 610 ++++++++++++++++++
 1 file changed, 610 insertions(+)
 create mode 100644 llvm/test/CodeGen/Generic/f128-math-lowering.ll

diff --git a/llvm/test/CodeGen/Generic/f128-math-lowering.ll b/llvm/test/CodeGen/Generic/f128-math-lowering.ll
new file mode 100644
index 00000000000000..8a70786d97fe67
--- /dev/null
+++ b/llvm/test/CodeGen/Generic/f128-math-lowering.ll
@@ -0,0 +1,610 @@
+
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+;
+; RUN: llc < %s -mtriple=aarch64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-AARCH64
+; RUN: llc < %s -mtriple=riscv32-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-RISCV32
+; RUN: llc < %s -mtriple=s390x-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-S390X
+; RUN: llc < %s -mtriple=i686-unknown-unknown   -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-X64
+;
+; Verify that fp128 intrinsics only lower to `long double` calls on platforms
+; where `f128` and `long double` have the same layout.
+;
+; We test on x86 and x64 which have 80-bit ld, as well as aarch64 (ld == f128),
+; riscv32 (ld == f64), and s380x (ld == f128 with different alignment from
+; x64/aarch64 f128).
+;
+; FIXME: these emit calls to long double functions but should emit f128 calls
+
+define fp128 @test_cbrtf128(fp128 %a) {
+; CHECK-LABEL:      test_cbrtf128:
+; CHECK-AARCH64:    b llvm.cbrt.f128
+; CHECK-RISCV32:    call llvm.cbrt.f128 at plt
+; CHECK-S390X:      brasl {{%.*}} llvm.cbrt.f128 at PLT
+; CHECK-X64:        jmp llvm.cbrt.f128 at PLT # TAILCALL
+; CHECK-X86:        calll llvm.cbrt.f128 at PLT
+start:
+  %0 = tail call fp128 @llvm.cbrt.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.cbrt.f128(fp128)
+
+
+define fp128 @test_ceilf128(fp128 %a) {
+; CHECK-LABEL:      test_ceilf128:
+; CHECK-AARCH64:    b ceill
+; CHECK-RISCV32:    call ceill at plt
+; CHECK-S390X:      brasl {{%.*}} ceill at PLT
+; CHECK-X64:        jmp ceill at PLT
+; CHECK-X86:        calll ceill
+start:
+  %0 = tail call fp128 @llvm.ceil.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.ceil.f128(fp128)
+
+
+define fp128 @test_copysignf128(fp128 %a, fp128 %b) {
+; No math library call here, so make sure the assembly does the correct thing.
+; This test is autogenerated
+; CHECK-LABEL:        test_copysignf128:
+; CHECK-AARCH64-LABEL: test_copysignf128:
+; CHECK-AARCH64:       // %bb.0: // %start
+; CHECK-AARCH64-NEXT:    stp q0, q1, [sp, #-32]!
+; CHECK-AARCH64-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-AARCH64-NEXT:    ldrb w8, [sp, #15]
+; CHECK-AARCH64-NEXT:    ldrb w9, [sp, #31]
+; CHECK-AARCH64-NEXT:    bfxil w9, w8, #0, #7
+; CHECK-AARCH64-NEXT:    strb w9, [sp, #15]
+; CHECK-AARCH64-NEXT:    ldr q0, [sp], #32
+; CHECK-AARCH64-NEXT:    ret
+;
+; CHECK-RISCV32-LABEL: test_copysignf128:
+; CHECK-RISCV32:       # %bb.0: # %start
+; CHECK-RISCV32-NEXT:    lw a3, 0(a1)
+; CHECK-RISCV32-NEXT:    lw a4, 4(a1)
+; CHECK-RISCV32-NEXT:    lw a2, 12(a2)
+; CHECK-RISCV32-NEXT:    lw a5, 12(a1)
+; CHECK-RISCV32-NEXT:    lw a1, 8(a1)
+; CHECK-RISCV32-NEXT:    lui a6, 524288
+; CHECK-RISCV32-NEXT:    and a2, a2, a6
+; CHECK-RISCV32-NEXT:    slli a5, a5, 1
+; CHECK-RISCV32-NEXT:    srli a5, a5, 1
+; CHECK-RISCV32-NEXT:    or a2, a5, a2
+; CHECK-RISCV32-NEXT:    sw a1, 8(a0)
+; CHECK-RISCV32-NEXT:    sw a4, 4(a0)
+; CHECK-RISCV32-NEXT:    sw a3, 0(a0)
+; CHECK-RISCV32-NEXT:    sw a2, 12(a0)
+; CHECK-RISCV32-NEXT:    ret
+;
+; CHECK-S390X-LABEL: test_copysignf128:
+; CHECK-S390X:       # %bb.0: # %start
+; CHECK-S390X-NEXT:    ld %f0, 0(%r3)
+; CHECK-S390X-NEXT:    ld %f2, 8(%r3)
+; CHECK-S390X-NEXT:    ld %f1, 0(%r4)
+; CHECK-S390X-NEXT:    ld %f3, 8(%r4)
+; CHECK-S390X-NEXT:    cpsdr %f0, %f1, %f0
+; CHECK-S390X-NEXT:    std %f0, 0(%r2)
+; CHECK-S390X-NEXT:    std %f2, 8(%r2)
+; CHECK-S390X-NEXT:    br %r14
+;
+; CHECK-X86-LABEL: test_copysignf128:
+; CHECK-X86:       # %bb.0: # %start
+; CHECK-X86-NEXT:    pushl %ebx
+; CHECK-X86-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-X86-NEXT:    pushl %edi
+; CHECK-X86-NEXT:    .cfi_def_cfa_offset 12
+; CHECK-X86-NEXT:    pushl %esi
+; CHECK-X86-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-X86-NEXT:    .cfi_offset %esi, -16
+; CHECK-X86-NEXT:    .cfi_offset %edi, -12
+; CHECK-X86-NEXT:    .cfi_offset %ebx, -8
+; CHECK-X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; CHECK-X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; CHECK-X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; CHECK-X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; CHECK-X86-NEXT:    movl $-2147483648, %edi # imm = 0x80000000
+; CHECK-X86-NEXT:    andl {{[0-9]+}}(%esp), %edi
+; CHECK-X86-NEXT:    movl $2147483647, %ebx # imm = 0x7FFFFFFF
+; CHECK-X86-NEXT:    andl {{[0-9]+}}(%esp), %ebx
+; CHECK-X86-NEXT:    orl %edi, %ebx
+; CHECK-X86-NEXT:    movl %ebx, 12(%eax)
+; CHECK-X86-NEXT:    movl %esi, 8(%eax)
+; CHECK-X86-NEXT:    movl %edx, 4(%eax)
+; CHECK-X86-NEXT:    movl %ecx, (%eax)
+; CHECK-X86-NEXT:    popl %esi
+; CHECK-X86-NEXT:    .cfi_def_cfa_offset 12
+; CHECK-X86-NEXT:    popl %edi
+; CHECK-X86-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-X86-NEXT:    popl %ebx
+; CHECK-X86-NEXT:    .cfi_def_cfa_offset 4
+; CHECK-X86-NEXT:    retl $4
+;
+; CHECK-X64-LABEL: test_copysignf128:
+; CHECK-X64:       # %bb.0: # %start
+; CHECK-X64-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; CHECK-X64-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; CHECK-X64-NEXT:    orps %xmm1, %xmm0
+; CHECK-X64-NEXT:    retq
+start:
+  %0 = tail call fp128 @llvm.copysign.f128(fp128 %a, fp128 %b)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.copysign.f128(fp128, fp128)
+
+
+define fp128 @test_cosf128(fp128 %a) {
+; CHECK-LABEL:      test_cosf128:
+; CHECK-AARCH64:    b cosl
+; CHECK-RISCV32:    call cosl at plt
+; CHECK-S390X:      brasl {{%.*}} cosl at PLT
+; CHECK-X64:        jmp cosl at PLT
+; CHECK-X86:        calll cosl
+start:
+  %0 = tail call fp128 @llvm.cos.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.cos.f128(fp128)
+
+
+define fp128 @test_exp2f128(fp128 %a) {
+; CHECK-LABEL:      test_exp2f128:
+; CHECK-AARCH64:    b exp2l
+; CHECK-RISCV32:    call exp2l at plt
+; CHECK-S390X:      brasl {{%.*}} exp2l at PLT
+; CHECK-X64:        jmp exp2l at PLT
+; CHECK-X86:        calll exp2l
+start:
+  %0 = tail call fp128 @llvm.exp2.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.exp2.f128(fp128)
+
+
+define fp128 @test___exp2f128_finite(fp128 %a) {
+; CHECK-LABEL:      test___exp2f128_finite:
+; CHECK-AARCH64:    b llvm.__exp2f128_finite.f128
+; CHECK-RISCV32:    call llvm.__exp2f128_finite.f128 at plt
+; CHECK-S390X:      brasl {{%.*}} llvm.__exp2f128_finite.f128 at PLT
+; CHECK-X64:        jmp llvm.__exp2f128_finite.f128 at PLT # TAILCALL
+; CHECK-X86:        calll llvm.__exp2f128_finite.f128 at PLT
+start:
+  %0 = tail call fp128 @llvm.__exp2f128_finite.f128(fp128  %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.__exp2f128_finite.f128(fp128)
+
+
+define fp128 @test_expf128(fp128 %a) {
+; CHECK-LABEL:      test_expf128:
+; CHECK-AARCH64:    b expl
+; CHECK-RISCV32:    call expl at plt
+; CHECK-S390X:      brasl {{%.*}} expl at PLT
+; CHECK-X64:        jmp expl at PLT
+; CHECK-X86:        calll expl
+start:
+  %0 = tail call fp128 @llvm.exp.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.exp.f128(fp128)
+
+
+define fp128 @test___expf128_finite(fp128 %a) {
+; CHECK-LABEL:      test___expf128_finite:
+; CHECK-AARCH64:    b llvm.__expf128_finite.f128
+; CHECK-RISCV32:    call llvm.__expf128_finite.f128 at plt
+; CHECK-S390X:      brasl {{%.*}} llvm.__expf128_finite.f128 at PLT
+; CHECK-X64:        jmp llvm.__expf128_finite.f128 at PLT # TAILCALL
+; CHECK-X86:        calll llvm.__expf128_finite.f128 at PLT
+start:
+  %0 = tail call fp128 @llvm.__expf128_finite.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.__expf128_finite.f128(fp128)
+
+
+define fp128 @test_floorf128(fp128 %a) {
+; CHECK-LABEL:      test_floorf128:
+; CHECK-AARCH64:    b floorl
+; CHECK-RISCV32:    call floorl at plt
+; CHECK-S390X:      brasl {{%.*}} floorl at PLT
+; CHECK-X64:        jmp floorl at PLT
+; CHECK-X86:        calll floorl
+start:
+  %0 = tail call fp128 @llvm.floor.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.floor.f128(fp128)
+
+
+define fp128 @test_fmaf128(fp128 %a, fp128 %b, fp128 %c) {
+; CHECK-LABEL:      test_fmaf128:
+; CHECK-AARCH64:    b fmal
+; CHECK-RISCV32:    call fmal at plt
+; CHECK-S390X:      brasl {{%.*}} fmal at PLT
+; CHECK-X64:        jmp fmal at PLT
+; CHECK-X86:        calll fmal
+start:
+  %0 = tail call fp128 @llvm.fma.f128(fp128 %a, fp128 %b, fp128 %c)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.fma.f128(fp128, fp128, fp128)
+
+
+define fp128 @test_fmaxf128(fp128 %a, fp128 %b) {
+; CHECK-LABEL:      test_fmaxf128:
+; CHECK-AARCH64:    b llvm.fmax.f128
+; CHECK-RISCV32:    call llvm.fmax.f128 at plt
+; CHECK-S390X:      brasl {{%.*}} llvm.fmax.f128 at PLT
+; CHECK-X64:        jmp llvm.fmax.f128 at PLT # TAILCALL
+; CHECK-X86:        calll llvm.fmax.f128 at PLT
+start:
+  %0 = tail call fp128 @llvm.fmax.f128(fp128 %a, fp128 %b)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.fmax.f128(fp128, fp128)
+
+
+define fp128 @test_fminf128(fp128 %a, fp128 %b) {
+; CHECK-LABEL:      test_fminf128:
+; CHECK-AARCH64:    b llvm.fmin.f128
+; CHECK-RISCV32:    call llvm.fmin.f128 at plt
+; CHECK-S390X:      brasl {{%.*}} llvm.fmin.f128 at PLT
+; CHECK-X64:        jmp llvm.fmin.f128 at PLT # TAILCALL
+; CHECK-X86:        calll llvm.fmin.f128 at PLT
+start:
+  %0 = tail call fp128 @llvm.fmin.f128(fp128 %a, fp128 %b)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.fmin.f128(fp128, fp128)
+
+
+define fp128 @test_fmodf128(fp128 %a, fp128 %b) {
+; CHECK-LABEL:      test_fmodf128:
+; CHECK-AARCH64:    b llvm.fmod.f128
+; CHECK-RISCV32:    call llvm.fmod.f128 at plt
+; CHECK-S390X:      brasl {{%.*}} llvm.fmod.f128 at PLT
+; CHECK-X64:        jmp llvm.fmod.f128 at PLT # TAILCALL
+; CHECK-X86:        calll llvm.fmod.f128 at PLT
+start:
+  %0 = tail call fp128 @llvm.fmod.f128(fp128 %a, fp128 %b)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.fmod.f128(fp128, fp128)
+
+
+define { fp128, i32 } @test_frexpf128(fp128 %a) {
+; CHECK-LABEL:      test_frexpf128:
+; CHECK-AARCH64:    bl frexpl
+; CHECK-RISCV32:    call frexpl at plt
+; CHECK-S390X:      brasl {{%.*}} frexpl at PLT
+; CHECK-X64:        callq frexpl at PLT
+; CHECK-X86:        calll frexpl
+start:
+  %0 = tail call { fp128, i32 } @llvm.frexp.f128(fp128 %a)
+  ret { fp128, i32 } %0
+}
+
+declare { fp128, i32 } @llvm.frexp.f128(fp128)
+
+
+define fp128 @test_ldexpf128(fp128 %a, i32 %b) {
+; CHECK-LABEL:      test_ldexpf128:
+; CHECK-AARCH64:    b ldexpl
+; CHECK-RISCV32:    call ldexpl at plt
+; CHECK-S390X:      brasl {{%.*}} ldexpl at PLT
+; CHECK-X64:        jmp ldexpl at PLT
+; CHECK-X86:        calll ldexpl
+start:
+  %0 = tail call fp128 @llvm.ldexp.f128(fp128 %a, i32 %b)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.ldexp.f128(fp128, i32)
+
+
+define i64 @test_llrintf128(fp128 %a) {
+; CHECK-LABEL:      test_llrintf128:
+; CHECK-AARCH64:    b llrintl
+; CHECK-RISCV32:    call llrintl at plt
+; CHECK-S390X:      brasl {{%.*}} llrintl at PLT
+; CHECK-X64:        jmp llrintl at PLT
+; CHECK-X86:        calll llrintl
+start:
+  %0 = tail call i64 @llvm.llrint.f128(fp128 %a)
+  ret i64 %0
+}
+
+declare i64 @llvm.llrint.f128(fp128)
+
+
+define i64 @test_llroundf128(fp128 %a) {
+; CHECK-LABEL:      test_llroundf128:
+; CHECK-AARCH64:    b llroundl
+; CHECK-RISCV32:    call llroundl at plt
+; CHECK-S390X:      brasl {{%.*}} llroundl at PLT
+; CHECK-X64:        jmp llroundl at PLT
+; CHECK-X86:        calll llroundl
+start:
+  %0 = tail call i64 @llvm.llround.i64.f128(fp128 %a)
+  ret i64 %0
+}
+
+declare i64 @llvm.llround.i64.f128(fp128)
+
+
+define fp128 @test_log10f128(fp128 %a) {
+; CHECK-LABEL:      test_log10f128:
+; CHECK-AARCH64:    b log10l
+; CHECK-RISCV32:    call log10l at plt
+; CHECK-S390X:      brasl {{%.*}} log10l at PLT
+; CHECK-X64:        jmp log10l at PLT
+; CHECK-X86:        calll log10l
+start:
+  %0 = tail call fp128 @llvm.log10.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.log10.f128(fp128)
+
+
+define fp128 @test___log10f128_finite(fp128 %a) {
+; CHECK-LABEL:      test___log10f128_finite:
+; CHECK-AARCH64:    b llvm.__log10f128_finite.f128
+; CHECK-RISCV32:    call llvm.__log10f128_finite.f128 at plt
+; CHECK-S390X:      brasl {{%.*}} llvm.__log10f128_finite.f128 at PLT
+; CHECK-X64:        jmp llvm.__log10f128_finite.f128 at PLT # TAILCALL
+; CHECK-X86:        calll llvm.__log10f128_finite.f128 at PLT
+start:
+  %0 = tail call fp128 @llvm.__log10f128_finite.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.__log10f128_finite.f128(fp128)
+
+
+define fp128 @test_log2f128(fp128 %a) {
+; CHECK-LABEL:      test_log2f128:
+; CHECK-AARCH64:    b log2l
+; CHECK-RISCV32:    call log2l at plt
+; CHECK-S390X:      brasl {{%.*}} log2l at PLT
+; CHECK-X64:        jmp log2l at PLT
+; CHECK-X86:        calll log2l
+start:
+  %0 = tail call fp128 @llvm.log2.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.log2.f128(fp128)
+
+
+define fp128 @test___log2f128_finite(fp128 %a) {
+; CHECK-LABEL:      test___log2f128_finite:
+; CHECK-AARCH64:    b llvm.__log2f128_finite.f128
+; CHECK-RISCV32:    call llvm.__log2f128_finite.f128 at plt
+; CHECK-S390X:      brasl {{%.*}} llvm.__log2f128_finite.f128 at PLT
+; CHECK-X64:        jmp llvm.__log2f128_finite.f128 at PLT # TAILCALL
+; CHECK-X86:        calll llvm.__log2f128_finite.f128 at PLT
+start:
+  %0 = tail call fp128 @llvm.__log2f128_finite.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.__log2f128_finite.f128(fp128)
+
+
+define fp128 @test_logf128(fp128 %a) {
+; CHECK-LABEL:      test_logf128:
+; CHECK-AARCH64:    b logl
+; CHECK-RISCV32:    call logl at plt
+; CHECK-S390X:      brasl {{%.*}} logl at PLT
+; CHECK-X64:        jmp logl at PLT
+; CHECK-X86:        calll logl
+start:
+  %0 = tail call fp128 @llvm.log.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.log.f128(fp128)
+
+
+define fp128 @test___logf128_finite(fp128 %a) {
+; CHECK-LABEL:      test___logf128_finite:
+; CHECK-AARCH64:    b llvm.__logf128_finite.f128
+; CHECK-RISCV32:    call llvm.__logf128_finite.f128 at plt
+; CHECK-S390X:      brasl {{%.*}} llvm.__logf128_finite.f128 at PLT
+; CHECK-X64:        jmp llvm.__logf128_finite.f128 at PLT # TAILCALL
+; CHECK-X86:        calll llvm.__logf128_finite.f128 at PLT
+start:
+  %0 = tail call fp128 @llvm.__logf128_finite.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.__logf128_finite.f128(fp128)
+
+
+define i64 @test_lrintf128(fp128 %a) {
+; CHECK-LABEL:      test_lrintf128:
+; CHECK-AARCH64:    b lrintl
+; CHECK-RISCV32:    call lrintl at plt
+; CHECK-S390X:      brasl {{%.*}} lrintl at PLT
+; CHECK-X64:        jmp lrintl at PLT
+; CHECK-X86:        calll lrintl
+start:
+  %0 = tail call i64 @llvm.lrint.f128(fp128 %a)
+  ret i64 %0
+}
+
+declare i64 @llvm.lrint.f128(fp128)
+
+
+define i64 @test_lroundf128(fp128 %a) {
+; CHECK-LABEL:      test_lroundf128:
+; CHECK-AARCH64:    b lroundl
+; CHECK-RISCV32:    call lroundl at plt
+; CHECK-S390X:      brasl {{%.*}} lroundl at PLT
+; CHECK-X64:        jmp lroundl at PLT
+; CHECK-X86:        calll lroundl
+start:
+  %0 = tail call i64 @llvm.lround.i64.f128(fp128 %a)
+  ret i64 %0
+}
+
+declare i64 @llvm.lround.i64.f128(fp128)
+
+
+define fp128 @test_nearbyintf128(fp128 %a) {
+; CHECK-LABEL:      test_nearbyintf128:
+; CHECK-AARCH64:    b nearbyintl
+; CHECK-RISCV32:    call nearbyintl at plt
+; CHECK-S390X:      brasl {{%.*}} nearbyintl at PLT
+; CHECK-X64:        jmp nearbyintl at PLT
+; CHECK-X86:        calll nearbyintl
+start:
+  %0 = tail call fp128 @llvm.nearbyint.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.nearbyint.f128(fp128)
+
+
+define fp128 @test_powf128(fp128 %a, fp128 %b) {
+; CHECK-LABEL:      test_powf128:
+; CHECK-AARCH64:    b powl
+; CHECK-RISCV32:    call powl at plt
+; CHECK-S390X:      brasl {{%.*}} powl at PLT
+; CHECK-X64:        jmp powl at PLT
+; CHECK-X86:        calll powl
+start:
+  %0 = tail call fp128 @llvm.pow.f128(fp128 %a, fp128 %b)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.pow.f128(fp128, fp128)
+
+
+define fp128 @test___powf128_finite(fp128 %a, fp128 %b) {
+; CHECK-LABEL:      test___powf128_finite:
+; CHECK-AARCH64:    b llvm.__powf128_finite.f128
+; CHECK-RISCV32:    call llvm.__powf128_finite.f128 at plt
+; CHECK-S390X:      brasl {{%.*}} llvm.__powf128_finite.f128 at PLT
+; CHECK-X64:        jmp llvm.__powf128_finite.f128 at PLT # TAILCALL
+; CHECK-X86:        calll llvm.__powf128_finite.f128 at PLT
+start:
+  %0 = tail call fp128 @llvm.__powf128_finite.f128(fp128 %a, fp128 %b)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.__powf128_finite.f128(fp128, fp128)
+
+
+define fp128 @test_rintf128(fp128 %a) {
+; CHECK-LABEL:      test_rintf128:
+; CHECK-AARCH64:    b rintl
+; CHECK-RISCV32:    call rintl at plt
+;
+; CHECK-S390X-LABEL: test_rintf128:
+; CHECK-S390X:       # %bb.0: # %start
+; CHECK-S390X-NEXT:    ld %f0, 0(%r3)
+; CHECK-S390X-NEXT:    ld %f2, 8(%r3)
+; CHECK-S390X-NEXT:    fixbr %f0, 0, %f0
+; CHECK-S390X-NEXT:    std %f0, 0(%r2)
+; CHECK-S390X-NEXT:    std %f2, 8(%r2)
+; CHECK-S390X-NEXT:    br %r14
+;
+; CHECK-X64:        jmp rintl at PLT
+; CHECK-X86:        calll rintl
+start:
+  %0 = tail call fp128 @llvm.rint.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.rint.f128(fp128)
+
+
+define fp128 @test_roundevenf128(fp128 %a) {
+; CHECK-LABEL:      test_roundevenf128:
+; CHECK-AARCH64:    b roundevenl
+; CHECK-RISCV32:    call roundevenl at plt
+; CHECK-S390X:      brasl {{%.*}} roundevenl at PLT
+; CHECK-X64:        jmp roundevenl at PLT
+; CHECK-X86:        calll roundevenl
+start:
+  %0 = tail call fp128 @llvm.roundeven.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.roundeven.f128(fp128)
+
+
+define fp128 @test_roundf128(fp128 %a) {
+; CHECK-LABEL:      test_roundf128:
+; CHECK-AARCH64:    b roundl
+; CHECK-RISCV32:    call roundl at plt
+; CHECK-S390X:      brasl {{%.*}} roundl at PLT
+; CHECK-X64:        jmp roundl at PLT
+; CHECK-X86:        calll roundl
+start:
+  %0 = tail call fp128 @llvm.round.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.round.f128(fp128)
+
+
+define fp128 @test_sinf128(fp128 %a) {
+; CHECK-LABEL:      test_sinf128:
+; CHECK-AARCH64:    b sinl
+; CHECK-RISCV32:    call sinl at plt
+; CHECK-S390X:      brasl {{%.*}} sinl at PLT
+; CHECK-X64:        jmp sinl at PLT
+; CHECK-X86:        calll sinl
+start:
+  %0 = tail call fp128 @llvm.sin.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.sin.f128(fp128)
+
+
+define fp128 @test_sqrtf128(fp128 %a) {
+; CHECK-LABEL:      test_sqrtf128:
+; CHECK-AARCH64:    b sqrtl
+; CHECK-RISCV32:    call sqrtl at plt
+; CHECK-S390X:      sqxbr {{%.*}} {{%.*}}
+; CHECK-X64:        jmp sqrtl at PLT
+; CHECK-X86:        calll sqrtl
+start:
+  %0 = tail call fp128 @llvm.sqrt.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.sqrt.f128(fp128)
+
+
+define fp128 @test_truncf128(fp128 %a) {
+; CHECK-LABEL:      test_truncf128:
+; CHECK-AARCH64:    b truncl
+; CHECK-RISCV32:    call truncl at plt
+; CHECK-S390X:      brasl {{%.*}} truncl at PLT
+; CHECK-X64:        jmp truncl at PLT
+; CHECK-X86:        calll truncl
+start:
+  %0 = tail call fp128 @llvm.trunc.f128(fp128 %a)
+  ret fp128 %0
+}
+
+declare fp128 @llvm.trunc.f128(fp128)

>From 648329caff6a15ee4fb061e8ff68f5a9fb0c0519 Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Thu, 28 Dec 2023 03:50:05 -0500
Subject: [PATCH 2/4] [llvm][clang] duplicate `long double` layout logic from
 clang to LLVM

Information about the size and alignment of `long double` is currently part of
clang. Copy this logic to LLVM so it can be used to control lowering of
intrinsics.

Additionally, add an assertion to make sure Clang and LLVM agree.
---
 clang/include/clang/Basic/TargetInfo.h  |   4 +
 clang/lib/Basic/TargetInfo.cpp          |  12 ++
 clang/lib/Basic/Targets.cpp             |  13 ++-
 llvm/include/llvm/TargetParser/Triple.h |  18 +++
 llvm/lib/TargetParser/Triple.cpp        | 142 ++++++++++++++++++++++++
 5 files changed, 187 insertions(+), 2 deletions(-)

diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index 3eb23ebdacf0ed..ad8a2202e32231 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -1218,6 +1218,10 @@ class TargetInfo : public TransferrableTargetInfo,
     return Triple;
   }
 
+  /// Assert that layouts for C types computed by Clang are the same as layouts
+  /// stored in the LLVM target.
+  void validateCLayouts() const;
+
   /// Returns the target ID if supported.
   virtual std::optional<std::string> getTargetID() const {
     return std::nullopt;
diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp
index 96b3ad9ba2f273..1fb1c171b389d7 100644
--- a/clang/lib/Basic/TargetInfo.cpp
+++ b/clang/lib/Basic/TargetInfo.cpp
@@ -925,6 +925,18 @@ bool TargetInfo::validateInputConstraint(
   return true;
 }
 
+void TargetInfo::validateCLayouts() const {
+  llvm::Triple::CLayouts TripleLayouts = Triple.getCLayouts();
+  if (__builtin_expect(LongDoubleWidth != TripleLayouts.LongDoubleWidth ||
+                       LongDoubleAlign != TripleLayouts.LongDoubleAlign, 0)) {
+    fprintf(stderr, "'long double' width got %d but expected %d",
+            LongDoubleWidth, TripleLayouts.LongDoubleWidth);
+    fprintf(stderr, "'long double' align got %d but expected %d",
+            LongDoubleAlign, TripleLayouts.LongDoubleAlign);
+    llvm_unreachable("Clang & LLVM layout mismatch");
+  }
+}
+
 void TargetInfo::CheckFixedPointBits() const {
   // Check that the number of fractional and integral bits (and maybe sign) can
   // fit into the bits given for a fixed point type.
diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp
index ea002bb464fcc5..2790ecc404cebd 100644
--- a/clang/lib/Basic/Targets.cpp
+++ b/clang/lib/Basic/Targets.cpp
@@ -109,8 +109,8 @@ void addCygMingDefines(const LangOptions &Opts, MacroBuilder &Builder) {
 // Driver code
 //===----------------------------------------------------------------------===//
 
-std::unique_ptr<TargetInfo> AllocateTarget(const llvm::Triple &Triple,
-                                           const TargetOptions &Opts) {
+std::unique_ptr<TargetInfo> AllocateTargetImpl(const llvm::Triple &Triple,
+                                               const TargetOptions &Opts) {
   llvm::Triple::OSType os = Triple.getOS();
 
   switch (Triple.getArch()) {
@@ -749,6 +749,15 @@ std::unique_ptr<TargetInfo> AllocateTarget(const llvm::Triple &Triple,
     }
   }
 }
+std::unique_ptr<TargetInfo> AllocateTarget(const llvm::Triple &Triple,
+                                           const TargetOptions &Opts) {
+
+  std::unique_ptr<TargetInfo> target = AllocateTargetImpl(Triple, Opts);
+  if (target != nullptr) {
+    target->validateCLayouts();
+  }
+  return target;
+}
 } // namespace targets
 } // namespace clang
 
diff --git a/llvm/include/llvm/TargetParser/Triple.h b/llvm/include/llvm/TargetParser/Triple.h
index 95014a546f7245..80426558ab3e5e 100644
--- a/llvm/include/llvm/TargetParser/Triple.h
+++ b/llvm/include/llvm/TargetParser/Triple.h
@@ -9,6 +9,7 @@
 #ifndef LLVM_TARGETPARSER_TRIPLE_H
 #define LLVM_TARGETPARSER_TRIPLE_H
 
+#include "llvm/ADT/APFloat.h"
 #include "llvm/ADT/Twine.h"
 #include "llvm/Support/VersionTuple.h"
 
@@ -1161,6 +1162,23 @@ class Triple {
   /// Returns a canonicalized OS version number for the specified OS.
   static VersionTuple getCanonicalVersionForOS(OSType OSKind,
                                                const VersionTuple &Version);
+
+  /// Layouts for C types that are relevant to libc calls generated by LLVM
+  struct CLayouts {
+    unsigned char LongDoubleWidth;
+    unsigned char LongDoubleAlign;
+    const fltSemantics *LongDoubleFormat;
+  };
+
+  /// Provide default layouts relevant to C. Frontends may override these
+  /// values.
+  CLayouts getCLayouts() const;
+
+  /// Return true if `long double` and `__float128` have the same layout.
+  bool isLongDoubleF128() const {
+    // TODO: do we also need to check alignment?
+    return getCLayouts().LongDoubleWidth == 128;
+  }
 };
 
 } // End llvm namespace
diff --git a/llvm/lib/TargetParser/Triple.cpp b/llvm/lib/TargetParser/Triple.cpp
index b9971c25af71f3..319d4d74101bbd 100644
--- a/llvm/lib/TargetParser/Triple.cpp
+++ b/llvm/lib/TargetParser/Triple.cpp
@@ -1900,6 +1900,148 @@ VersionTuple Triple::getCanonicalVersionForOS(OSType OSKind,
   }
 }
 
+Triple::CLayouts Triple::getCLayouts() const {
+  Triple::CLayouts Layouts;
+
+  // Default to a 32-bit RISC platform
+  Layouts.LongDoubleWidth = 64;
+  Layouts.LongDoubleAlign = 64;
+  Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+
+  enum ArchType arch = getArch();
+
+  if (arch == aarch64 || arch == aarch64_be || arch == aarch64_32) {
+    Layouts.LongDoubleWidth = 128;
+    Layouts.LongDoubleAlign = 128;
+    Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+
+    // TODO: verify this logic matches when WindowsARM64TargetInfo /
+    // DarwinAArch64TargetInfo is called
+    if (isOSWindows()) {
+      Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 64;
+      Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+    } else if (isMacOSX()) {
+      // TODO: should this just be isMacOSX or check specifically for darwin?
+      Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 64;
+      Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+    }
+  } else if (arch == avr) {
+    Layouts.LongDoubleWidth = 32;
+    Layouts.LongDoubleAlign = 8;
+    Layouts.LongDoubleFormat = &llvm::APFloat::IEEEsingle();
+  } else if (arch == arc) {
+    Layouts.LongDoubleAlign = 32;
+  } else if (arch == arm) {
+    // TODO: port the logic
+  } else if (arch == csky) {
+    Layouts.LongDoubleAlign = 32;
+  } else if (arch == loongarch32 || arch == loongarch64) {
+    Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 128;
+    Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+  } else if (arch == mips || arch == mipsel || arch == mips64 ||
+             arch == mips64el) {
+    if (isMIPS32()) {
+      // o32
+      Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 64;
+      Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+    } else {
+      // n32 & n64
+      Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 128;
+      Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+      if (isOSFreeBSD()) {
+        Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 64;
+        Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+      }
+    }
+  } else if (arch == msp430) {
+    Layouts.LongDoubleWidth = 64;
+    Layouts.LongDoubleAlign = 16;
+  } else if (arch == ppc || arch == ppcle || arch == ppc64 || arch == ppc64le) {
+    // TODO: figure out how to get features
+
+    if (getOS() == AIX) {
+      Layouts.LongDoubleWidth = 64;
+      Layouts.LongDoubleAlign = 32;
+      Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+    } else if (isOSFreeBSD() || isOSNetBSD() || isOSOpenBSD() || isMusl()) {
+      Layouts.LongDoubleWidth = 64;
+      Layouts.LongDoubleAlign = 64;
+      Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+    } else {
+      Layouts.LongDoubleWidth = 128;
+      Layouts.LongDoubleAlign = 128;
+      Layouts.LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble();
+    }
+  } else if (arch == riscv32 || arch == riscv64) {
+    Layouts.LongDoubleWidth = 128;
+    Layouts.LongDoubleAlign = 128;
+    Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+  } else if (arch == sparcv9) {
+    // The SPARCv8 System V ABI has long double 128-bits in size, but 64-bit
+    // aligned. The SPARCv9 SCD 2.4.1 says 16-byte aligned.
+    Layouts.LongDoubleWidth = 128;
+    Layouts.LongDoubleAlign = 128;
+    Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+  } else if (arch == systemz) {
+    Layouts.LongDoubleWidth = 128;
+    Layouts.LongDoubleAlign = 64;
+    Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+  } else if (arch == tce || arch == tcele) {
+    Layouts.LongDoubleWidth = 32;
+    Layouts.LongDoubleAlign = 32;
+    Layouts.LongDoubleFormat = &llvm::APFloat::IEEEsingle();
+  } else if (arch == ve) {
+    Layouts.LongDoubleWidth = 128;
+    Layouts.LongDoubleAlign = 128;
+    Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+  } else if (arch == wasm32 || arch == wasm64) {
+    Layouts.LongDoubleWidth = Layouts.LongDoubleAlign = 128;
+    Layouts.LongDoubleFormat = &llvm::APFloat::IEEEquad();
+  } else if (arch == x86 || arch == x86_64) {
+    if (arch == x86_64) {
+      Layouts.LongDoubleWidth = 128;
+      Layouts.LongDoubleAlign = 128;
+    }
+    if (isOSDarwin()) {
+      Layouts.LongDoubleWidth = 128;
+      Layouts.LongDoubleAlign = 128;
+      Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+    } else if (isAndroid()) {
+      Layouts.LongDoubleWidth = 64;
+      Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+    } else if (isOSLinux()) {
+      Layouts.LongDoubleWidth = 96;
+      Layouts.LongDoubleAlign = 32;
+      Layouts.LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
+    } else if (isOSWindows()) {
+      if (isWindowsCygwinEnvironment()) {
+        Layouts.LongDoubleWidth = 64;
+        Layouts.LongDoubleAlign = 64;
+      } else if (isWindowsGNUEnvironment()) {
+        // Mingw64 rounds long double size and alignment up to 16 bytes, but
+        // sticks with x86 FP ops. Weird.
+        Layouts.LongDoubleWidth = 128;
+        Layouts.LongDoubleAlign = 128;
+        Layouts.LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
+      } else {
+        Layouts.LongDoubleWidth = 64;
+        Layouts.LongDoubleAlign = 64;
+        Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+      }
+    } else if (isOSIAMCU()) {
+      Layouts.LongDoubleWidth = 64;
+      Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+    } else if (isOHOSFamily()) {
+      Layouts.LongDoubleWidth = 64;
+      Layouts.LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+    }
+  } else if (arch == xcore) {
+    Layouts.LongDoubleAlign = 32;
+  }
+
+  return Layouts;
+}
+
 // HLSL triple environment orders are relied on in the front end
 static_assert(Triple::Vertex - Triple::Pixel == 1,
               "incorrect HLSL stage order");

>From 81806737fa62bb3086c503ebd04e734e37263113 Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Thu, 28 Dec 2023 04:01:22 -0500
Subject: [PATCH 3/4] [IR] Change `fp128` lowering to use `f128` functions by
 default

Switch from emitting long double functions to using `f128`-specific functions.

Fixes https://github.com/llvm/llvm-project/issues/44744.
---
 llvm/include/llvm/IR/RuntimeLibcalls.def      |  68 +++---
 .../CodeGen/Generic/f128-math-lowering.ll     | 226 +++++++++---------
 2 files changed, 147 insertions(+), 147 deletions(-)

diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.def b/llvm/include/llvm/IR/RuntimeLibcalls.def
index 19dea60bebf9be..7fce540612856a 100644
--- a/llvm/include/llvm/IR/RuntimeLibcalls.def
+++ b/llvm/include/llvm/IR/RuntimeLibcalls.def
@@ -110,12 +110,12 @@ HANDLE_LIBCALL(DIV_PPCF128, "__gcc_qdiv")
 HANDLE_LIBCALL(REM_F32, "fmodf")
 HANDLE_LIBCALL(REM_F64, "fmod")
 HANDLE_LIBCALL(REM_F80, "fmodl")
-HANDLE_LIBCALL(REM_F128, "fmodl")
+HANDLE_LIBCALL(REM_F128, "fmodf128")
 HANDLE_LIBCALL(REM_PPCF128, "fmodl")
 HANDLE_LIBCALL(FMA_F32, "fmaf")
 HANDLE_LIBCALL(FMA_F64, "fma")
 HANDLE_LIBCALL(FMA_F80, "fmal")
-HANDLE_LIBCALL(FMA_F128, "fmal")
+HANDLE_LIBCALL(FMA_F128, "fmaf128")
 HANDLE_LIBCALL(FMA_PPCF128, "fmal")
 HANDLE_LIBCALL(POWI_F32, "__powisf2")
 HANDLE_LIBCALL(POWI_F64, "__powidf2")
@@ -125,62 +125,62 @@ HANDLE_LIBCALL(POWI_PPCF128, "__powitf2")
 HANDLE_LIBCALL(SQRT_F32, "sqrtf")
 HANDLE_LIBCALL(SQRT_F64, "sqrt")
 HANDLE_LIBCALL(SQRT_F80, "sqrtl")
-HANDLE_LIBCALL(SQRT_F128, "sqrtl")
+HANDLE_LIBCALL(SQRT_F128, "sqrtf128")
 HANDLE_LIBCALL(SQRT_PPCF128, "sqrtl")
 HANDLE_LIBCALL(CBRT_F32, "cbrtf")
 HANDLE_LIBCALL(CBRT_F64, "cbrt")
 HANDLE_LIBCALL(CBRT_F80, "cbrtl")
-HANDLE_LIBCALL(CBRT_F128, "cbrtl")
+HANDLE_LIBCALL(CBRT_F128, "cbrtf128")
 HANDLE_LIBCALL(CBRT_PPCF128, "cbrtl")
 HANDLE_LIBCALL(LOG_F32, "logf")
 HANDLE_LIBCALL(LOG_F64, "log")
 HANDLE_LIBCALL(LOG_F80, "logl")
-HANDLE_LIBCALL(LOG_F128, "logl")
+HANDLE_LIBCALL(LOG_F128, "logf128")
 HANDLE_LIBCALL(LOG_PPCF128, "logl")
 HANDLE_LIBCALL(LOG_FINITE_F32, "__logf_finite")
 HANDLE_LIBCALL(LOG_FINITE_F64, "__log_finite")
 HANDLE_LIBCALL(LOG_FINITE_F80, "__logl_finite")
-HANDLE_LIBCALL(LOG_FINITE_F128, "__logl_finite")
+HANDLE_LIBCALL(LOG_FINITE_F128, "__logf128_finite")
 HANDLE_LIBCALL(LOG_FINITE_PPCF128, "__logl_finite")
 HANDLE_LIBCALL(LOG2_F32, "log2f")
 HANDLE_LIBCALL(LOG2_F64, "log2")
 HANDLE_LIBCALL(LOG2_F80, "log2l")
-HANDLE_LIBCALL(LOG2_F128, "log2l")
+HANDLE_LIBCALL(LOG2_F128, "log2f128")
 HANDLE_LIBCALL(LOG2_PPCF128, "log2l")
 HANDLE_LIBCALL(LOG2_FINITE_F32, "__log2f_finite")
 HANDLE_LIBCALL(LOG2_FINITE_F64, "__log2_finite")
 HANDLE_LIBCALL(LOG2_FINITE_F80, "__log2l_finite")
-HANDLE_LIBCALL(LOG2_FINITE_F128, "__log2l_finite")
+HANDLE_LIBCALL(LOG2_FINITE_F128, "__log2f128_finite")
 HANDLE_LIBCALL(LOG2_FINITE_PPCF128, "__log2l_finite")
 HANDLE_LIBCALL(LOG10_F32, "log10f")
 HANDLE_LIBCALL(LOG10_F64, "log10")
 HANDLE_LIBCALL(LOG10_F80, "log10l")
-HANDLE_LIBCALL(LOG10_F128, "log10l")
+HANDLE_LIBCALL(LOG10_F128, "log10f128")
 HANDLE_LIBCALL(LOG10_PPCF128, "log10l")
 HANDLE_LIBCALL(LOG10_FINITE_F32, "__log10f_finite")
 HANDLE_LIBCALL(LOG10_FINITE_F64, "__log10_finite")
 HANDLE_LIBCALL(LOG10_FINITE_F80, "__log10l_finite")
-HANDLE_LIBCALL(LOG10_FINITE_F128, "__log10l_finite")
+HANDLE_LIBCALL(LOG10_FINITE_F128, "__log10f128_finite")
 HANDLE_LIBCALL(LOG10_FINITE_PPCF128, "__log10l_finite")
 HANDLE_LIBCALL(EXP_F32, "expf")
 HANDLE_LIBCALL(EXP_F64, "exp")
 HANDLE_LIBCALL(EXP_F80, "expl")
-HANDLE_LIBCALL(EXP_F128, "expl")
+HANDLE_LIBCALL(EXP_F128, "expf128")
 HANDLE_LIBCALL(EXP_PPCF128, "expl")
 HANDLE_LIBCALL(EXP_FINITE_F32, "__expf_finite")
 HANDLE_LIBCALL(EXP_FINITE_F64, "__exp_finite")
 HANDLE_LIBCALL(EXP_FINITE_F80, "__expl_finite")
-HANDLE_LIBCALL(EXP_FINITE_F128, "__expl_finite")
+HANDLE_LIBCALL(EXP_FINITE_F128, "__expf128_finite")
 HANDLE_LIBCALL(EXP_FINITE_PPCF128, "__expl_finite")
 HANDLE_LIBCALL(EXP2_F32, "exp2f")
 HANDLE_LIBCALL(EXP2_F64, "exp2")
 HANDLE_LIBCALL(EXP2_F80, "exp2l")
-HANDLE_LIBCALL(EXP2_F128, "exp2l")
+HANDLE_LIBCALL(EXP2_F128, "exp2f128")
 HANDLE_LIBCALL(EXP2_PPCF128, "exp2l")
 HANDLE_LIBCALL(EXP2_FINITE_F32, "__exp2f_finite")
 HANDLE_LIBCALL(EXP2_FINITE_F64, "__exp2_finite")
 HANDLE_LIBCALL(EXP2_FINITE_F80, "__exp2l_finite")
-HANDLE_LIBCALL(EXP2_FINITE_F128, "__exp2l_finite")
+HANDLE_LIBCALL(EXP2_FINITE_F128, "__exp2f128_finite")
 HANDLE_LIBCALL(EXP2_FINITE_PPCF128, "__exp2l_finite")
 HANDLE_LIBCALL(EXP10_F32, "exp10f")
 HANDLE_LIBCALL(EXP10_F64, "exp10")
@@ -190,12 +190,12 @@ HANDLE_LIBCALL(EXP10_PPCF128, "exp10l")
 HANDLE_LIBCALL(SIN_F32, "sinf")
 HANDLE_LIBCALL(SIN_F64, "sin")
 HANDLE_LIBCALL(SIN_F80, "sinl")
-HANDLE_LIBCALL(SIN_F128, "sinl")
+HANDLE_LIBCALL(SIN_F128, "sinf128")
 HANDLE_LIBCALL(SIN_PPCF128, "sinl")
 HANDLE_LIBCALL(COS_F32, "cosf")
 HANDLE_LIBCALL(COS_F64, "cos")
 HANDLE_LIBCALL(COS_F80, "cosl")
-HANDLE_LIBCALL(COS_F128, "cosl")
+HANDLE_LIBCALL(COS_F128, "cosf128")
 HANDLE_LIBCALL(COS_PPCF128, "cosl")
 HANDLE_LIBCALL(SINCOS_F32, nullptr)
 HANDLE_LIBCALL(SINCOS_F64, nullptr)
@@ -207,92 +207,92 @@ HANDLE_LIBCALL(SINCOS_STRET_F64, nullptr)
 HANDLE_LIBCALL(POW_F32, "powf")
 HANDLE_LIBCALL(POW_F64, "pow")
 HANDLE_LIBCALL(POW_F80, "powl")
-HANDLE_LIBCALL(POW_F128, "powl")
+HANDLE_LIBCALL(POW_F128, "powf128")
 HANDLE_LIBCALL(POW_PPCF128, "powl")
 HANDLE_LIBCALL(POW_FINITE_F32, "__powf_finite")
 HANDLE_LIBCALL(POW_FINITE_F64, "__pow_finite")
 HANDLE_LIBCALL(POW_FINITE_F80, "__powl_finite")
-HANDLE_LIBCALL(POW_FINITE_F128, "__powl_finite")
+HANDLE_LIBCALL(POW_FINITE_F128, "__powf128_finite")
 HANDLE_LIBCALL(POW_FINITE_PPCF128, "__powl_finite")
 HANDLE_LIBCALL(CEIL_F32, "ceilf")
 HANDLE_LIBCALL(CEIL_F64, "ceil")
 HANDLE_LIBCALL(CEIL_F80, "ceill")
-HANDLE_LIBCALL(CEIL_F128, "ceill")
+HANDLE_LIBCALL(CEIL_F128, "ceilf128")
 HANDLE_LIBCALL(CEIL_PPCF128, "ceill")
 HANDLE_LIBCALL(TRUNC_F32, "truncf")
 HANDLE_LIBCALL(TRUNC_F64, "trunc")
 HANDLE_LIBCALL(TRUNC_F80, "truncl")
-HANDLE_LIBCALL(TRUNC_F128, "truncl")
+HANDLE_LIBCALL(TRUNC_F128, "truncf128")
 HANDLE_LIBCALL(TRUNC_PPCF128, "truncl")
 HANDLE_LIBCALL(RINT_F32, "rintf")
 HANDLE_LIBCALL(RINT_F64, "rint")
 HANDLE_LIBCALL(RINT_F80, "rintl")
-HANDLE_LIBCALL(RINT_F128, "rintl")
+HANDLE_LIBCALL(RINT_F128, "rintf128")
 HANDLE_LIBCALL(RINT_PPCF128, "rintl")
 HANDLE_LIBCALL(NEARBYINT_F32, "nearbyintf")
 HANDLE_LIBCALL(NEARBYINT_F64, "nearbyint")
 HANDLE_LIBCALL(NEARBYINT_F80, "nearbyintl")
-HANDLE_LIBCALL(NEARBYINT_F128, "nearbyintl")
+HANDLE_LIBCALL(NEARBYINT_F128, "nearbyintf128")
 HANDLE_LIBCALL(NEARBYINT_PPCF128, "nearbyintl")
 HANDLE_LIBCALL(ROUND_F32, "roundf")
 HANDLE_LIBCALL(ROUND_F64, "round")
 HANDLE_LIBCALL(ROUND_F80, "roundl")
-HANDLE_LIBCALL(ROUND_F128, "roundl")
+HANDLE_LIBCALL(ROUND_F128, "roundf128")
 HANDLE_LIBCALL(ROUND_PPCF128, "roundl")
 HANDLE_LIBCALL(ROUNDEVEN_F32, "roundevenf")
 HANDLE_LIBCALL(ROUNDEVEN_F64, "roundeven")
 HANDLE_LIBCALL(ROUNDEVEN_F80, "roundevenl")
-HANDLE_LIBCALL(ROUNDEVEN_F128, "roundevenl")
+HANDLE_LIBCALL(ROUNDEVEN_F128, "roundevenf128")
 HANDLE_LIBCALL(ROUNDEVEN_PPCF128, "roundevenl")
 HANDLE_LIBCALL(FLOOR_F32, "floorf")
 HANDLE_LIBCALL(FLOOR_F64, "floor")
 HANDLE_LIBCALL(FLOOR_F80, "floorl")
-HANDLE_LIBCALL(FLOOR_F128, "floorl")
+HANDLE_LIBCALL(FLOOR_F128, "floorf128")
 HANDLE_LIBCALL(FLOOR_PPCF128, "floorl")
 HANDLE_LIBCALL(COPYSIGN_F32, "copysignf")
 HANDLE_LIBCALL(COPYSIGN_F64, "copysign")
 HANDLE_LIBCALL(COPYSIGN_F80, "copysignl")
-HANDLE_LIBCALL(COPYSIGN_F128, "copysignl")
+HANDLE_LIBCALL(COPYSIGN_F128, "copysignf128")
 HANDLE_LIBCALL(COPYSIGN_PPCF128, "copysignl")
 HANDLE_LIBCALL(FMIN_F32, "fminf")
 HANDLE_LIBCALL(FMIN_F64, "fmin")
 HANDLE_LIBCALL(FMIN_F80, "fminl")
-HANDLE_LIBCALL(FMIN_F128, "fminl")
+HANDLE_LIBCALL(FMIN_F128, "fminf128")
 HANDLE_LIBCALL(FMIN_PPCF128, "fminl")
 HANDLE_LIBCALL(FMAX_F32, "fmaxf")
 HANDLE_LIBCALL(FMAX_F64, "fmax")
 HANDLE_LIBCALL(FMAX_F80, "fmaxl")
-HANDLE_LIBCALL(FMAX_F128, "fmaxl")
+HANDLE_LIBCALL(FMAX_F128, "fmaxf128")
 HANDLE_LIBCALL(FMAX_PPCF128, "fmaxl")
 HANDLE_LIBCALL(LROUND_F32, "lroundf")
 HANDLE_LIBCALL(LROUND_F64, "lround")
 HANDLE_LIBCALL(LROUND_F80, "lroundl")
-HANDLE_LIBCALL(LROUND_F128, "lroundl")
+HANDLE_LIBCALL(LROUND_F128, "lroundf128")
 HANDLE_LIBCALL(LROUND_PPCF128, "lroundl")
 HANDLE_LIBCALL(LLROUND_F32, "llroundf")
 HANDLE_LIBCALL(LLROUND_F64, "llround")
 HANDLE_LIBCALL(LLROUND_F80, "llroundl")
-HANDLE_LIBCALL(LLROUND_F128, "llroundl")
+HANDLE_LIBCALL(LLROUND_F128, "llroundf128")
 HANDLE_LIBCALL(LLROUND_PPCF128, "llroundl")
 HANDLE_LIBCALL(LRINT_F32, "lrintf")
 HANDLE_LIBCALL(LRINT_F64, "lrint")
 HANDLE_LIBCALL(LRINT_F80, "lrintl")
-HANDLE_LIBCALL(LRINT_F128, "lrintl")
+HANDLE_LIBCALL(LRINT_F128, "lrintf128")
 HANDLE_LIBCALL(LRINT_PPCF128, "lrintl")
 HANDLE_LIBCALL(LLRINT_F32, "llrintf")
 HANDLE_LIBCALL(LLRINT_F64, "llrint")
 HANDLE_LIBCALL(LLRINT_F80, "llrintl")
-HANDLE_LIBCALL(LLRINT_F128, "llrintl")
+HANDLE_LIBCALL(LLRINT_F128, "llrintf128")
 HANDLE_LIBCALL(LLRINT_PPCF128, "llrintl")
 HANDLE_LIBCALL(LDEXP_F32, "ldexpf")
 HANDLE_LIBCALL(LDEXP_F64, "ldexp")
 HANDLE_LIBCALL(LDEXP_F80, "ldexpl")
-HANDLE_LIBCALL(LDEXP_F128, "ldexpl")
+HANDLE_LIBCALL(LDEXP_F128, "ldexpf128")
 HANDLE_LIBCALL(LDEXP_PPCF128, "ldexpl")
 HANDLE_LIBCALL(FREXP_F32, "frexpf")
 HANDLE_LIBCALL(FREXP_F64, "frexp")
 HANDLE_LIBCALL(FREXP_F80, "frexpl")
-HANDLE_LIBCALL(FREXP_F128, "frexpl")
+HANDLE_LIBCALL(FREXP_F128, "frexpf128")
 HANDLE_LIBCALL(FREXP_PPCF128, "frexpl")
 
 // Floating point environment
diff --git a/llvm/test/CodeGen/Generic/f128-math-lowering.ll b/llvm/test/CodeGen/Generic/f128-math-lowering.ll
index 8a70786d97fe67..530abb34cbdee9 100644
--- a/llvm/test/CodeGen/Generic/f128-math-lowering.ll
+++ b/llvm/test/CodeGen/Generic/f128-math-lowering.ll
@@ -33,11 +33,11 @@ declare fp128 @llvm.cbrt.f128(fp128)
 
 define fp128 @test_ceilf128(fp128 %a) {
 ; CHECK-LABEL:      test_ceilf128:
-; CHECK-AARCH64:    b ceill
-; CHECK-RISCV32:    call ceill at plt
-; CHECK-S390X:      brasl {{%.*}} ceill at PLT
-; CHECK-X64:        jmp ceill at PLT
-; CHECK-X86:        calll ceill
+; CHECK-AARCH64:    b ceilf128
+; CHECK-RISCV32:    call ceilf128 at plt
+; CHECK-S390X:      brasl {{%.*}} ceilf128 at PLT
+; CHECK-X64:        jmp ceilf128 at PLT
+; CHECK-X86:        calll ceilf128
 start:
   %0 = tail call fp128 @llvm.ceil.f128(fp128 %a)
   ret fp128 %0
@@ -138,11 +138,11 @@ declare fp128 @llvm.copysign.f128(fp128, fp128)
 
 define fp128 @test_cosf128(fp128 %a) {
 ; CHECK-LABEL:      test_cosf128:
-; CHECK-AARCH64:    b cosl
-; CHECK-RISCV32:    call cosl at plt
-; CHECK-S390X:      brasl {{%.*}} cosl at PLT
-; CHECK-X64:        jmp cosl at PLT
-; CHECK-X86:        calll cosl
+; CHECK-AARCH64:    b cosf128
+; CHECK-RISCV32:    call cosf128 at plt
+; CHECK-S390X:      brasl {{%.*}} cosf128 at PLT
+; CHECK-X64:        jmp cosf128 at PLT
+; CHECK-X86:        calll cosf128
 start:
   %0 = tail call fp128 @llvm.cos.f128(fp128 %a)
   ret fp128 %0
@@ -153,11 +153,11 @@ declare fp128 @llvm.cos.f128(fp128)
 
 define fp128 @test_exp2f128(fp128 %a) {
 ; CHECK-LABEL:      test_exp2f128:
-; CHECK-AARCH64:    b exp2l
-; CHECK-RISCV32:    call exp2l at plt
-; CHECK-S390X:      brasl {{%.*}} exp2l at PLT
-; CHECK-X64:        jmp exp2l at PLT
-; CHECK-X86:        calll exp2l
+; CHECK-AARCH64:    b exp2f128
+; CHECK-RISCV32:    call exp2f128 at plt
+; CHECK-S390X:      brasl {{%.*}} exp2f128 at PLT
+; CHECK-X64:        jmp exp2f128 at PLT
+; CHECK-X86:        calll exp2f128
 start:
   %0 = tail call fp128 @llvm.exp2.f128(fp128 %a)
   ret fp128 %0
@@ -183,11 +183,11 @@ declare fp128 @llvm.__exp2f128_finite.f128(fp128)
 
 define fp128 @test_expf128(fp128 %a) {
 ; CHECK-LABEL:      test_expf128:
-; CHECK-AARCH64:    b expl
-; CHECK-RISCV32:    call expl at plt
-; CHECK-S390X:      brasl {{%.*}} expl at PLT
-; CHECK-X64:        jmp expl at PLT
-; CHECK-X86:        calll expl
+; CHECK-AARCH64:    b expf128
+; CHECK-RISCV32:    call expf128 at plt
+; CHECK-S390X:      brasl {{%.*}} expf128 at PLT
+; CHECK-X64:        jmp expf128 at PLT
+; CHECK-X86:        calll expf128
 start:
   %0 = tail call fp128 @llvm.exp.f128(fp128 %a)
   ret fp128 %0
@@ -213,11 +213,11 @@ declare fp128 @llvm.__expf128_finite.f128(fp128)
 
 define fp128 @test_floorf128(fp128 %a) {
 ; CHECK-LABEL:      test_floorf128:
-; CHECK-AARCH64:    b floorl
-; CHECK-RISCV32:    call floorl at plt
-; CHECK-S390X:      brasl {{%.*}} floorl at PLT
-; CHECK-X64:        jmp floorl at PLT
-; CHECK-X86:        calll floorl
+; CHECK-AARCH64:    b floorf128
+; CHECK-RISCV32:    call floorf128 at plt
+; CHECK-S390X:      brasl {{%.*}} floorf128 at PLT
+; CHECK-X64:        jmp floorf128 at PLT
+; CHECK-X86:        calll floorf128
 start:
   %0 = tail call fp128 @llvm.floor.f128(fp128 %a)
   ret fp128 %0
@@ -228,11 +228,11 @@ declare fp128 @llvm.floor.f128(fp128)
 
 define fp128 @test_fmaf128(fp128 %a, fp128 %b, fp128 %c) {
 ; CHECK-LABEL:      test_fmaf128:
-; CHECK-AARCH64:    b fmal
-; CHECK-RISCV32:    call fmal at plt
-; CHECK-S390X:      brasl {{%.*}} fmal at PLT
-; CHECK-X64:        jmp fmal at PLT
-; CHECK-X86:        calll fmal
+; CHECK-AARCH64:    b fmaf128
+; CHECK-RISCV32:    call fmaf128 at plt
+; CHECK-S390X:      brasl {{%.*}} fmaf128 at PLT
+; CHECK-X64:        jmp fmaf128 at PLT
+; CHECK-X86:        calll fmaf128
 start:
   %0 = tail call fp128 @llvm.fma.f128(fp128 %a, fp128 %b, fp128 %c)
   ret fp128 %0
@@ -288,11 +288,11 @@ declare fp128 @llvm.fmod.f128(fp128, fp128)
 
 define { fp128, i32 } @test_frexpf128(fp128 %a) {
 ; CHECK-LABEL:      test_frexpf128:
-; CHECK-AARCH64:    bl frexpl
-; CHECK-RISCV32:    call frexpl at plt
-; CHECK-S390X:      brasl {{%.*}} frexpl at PLT
-; CHECK-X64:        callq frexpl at PLT
-; CHECK-X86:        calll frexpl
+; CHECK-AARCH64:    bl frexpf128
+; CHECK-RISCV32:    call frexpf128 at plt
+; CHECK-S390X:      brasl {{%.*}} frexpf128 at PLT
+; CHECK-X64:        callq frexpf128 at PLT
+; CHECK-X86:        calll frexpf128
 start:
   %0 = tail call { fp128, i32 } @llvm.frexp.f128(fp128 %a)
   ret { fp128, i32 } %0
@@ -303,11 +303,11 @@ declare { fp128, i32 } @llvm.frexp.f128(fp128)
 
 define fp128 @test_ldexpf128(fp128 %a, i32 %b) {
 ; CHECK-LABEL:      test_ldexpf128:
-; CHECK-AARCH64:    b ldexpl
-; CHECK-RISCV32:    call ldexpl at plt
-; CHECK-S390X:      brasl {{%.*}} ldexpl at PLT
-; CHECK-X64:        jmp ldexpl at PLT
-; CHECK-X86:        calll ldexpl
+; CHECK-AARCH64:    b ldexpf128
+; CHECK-RISCV32:    call ldexpf128 at plt
+; CHECK-S390X:      brasl {{%.*}} ldexpf128 at PLT
+; CHECK-X64:        jmp ldexpf128 at PLT
+; CHECK-X86:        calll ldexpf128
 start:
   %0 = tail call fp128 @llvm.ldexp.f128(fp128 %a, i32 %b)
   ret fp128 %0
@@ -318,11 +318,11 @@ declare fp128 @llvm.ldexp.f128(fp128, i32)
 
 define i64 @test_llrintf128(fp128 %a) {
 ; CHECK-LABEL:      test_llrintf128:
-; CHECK-AARCH64:    b llrintl
-; CHECK-RISCV32:    call llrintl at plt
-; CHECK-S390X:      brasl {{%.*}} llrintl at PLT
-; CHECK-X64:        jmp llrintl at PLT
-; CHECK-X86:        calll llrintl
+; CHECK-AARCH64:    b llrintf128
+; CHECK-RISCV32:    call llrintf128 at plt
+; CHECK-S390X:      brasl {{%.*}} llrintf128 at PLT
+; CHECK-X64:        jmp llrintf128 at PLT
+; CHECK-X86:        calll llrintf128
 start:
   %0 = tail call i64 @llvm.llrint.f128(fp128 %a)
   ret i64 %0
@@ -333,11 +333,11 @@ declare i64 @llvm.llrint.f128(fp128)
 
 define i64 @test_llroundf128(fp128 %a) {
 ; CHECK-LABEL:      test_llroundf128:
-; CHECK-AARCH64:    b llroundl
-; CHECK-RISCV32:    call llroundl at plt
-; CHECK-S390X:      brasl {{%.*}} llroundl at PLT
-; CHECK-X64:        jmp llroundl at PLT
-; CHECK-X86:        calll llroundl
+; CHECK-AARCH64:    b llroundf128
+; CHECK-RISCV32:    call llroundf128 at plt
+; CHECK-S390X:      brasl {{%.*}} llroundf128 at PLT
+; CHECK-X64:        jmp llroundf128 at PLT
+; CHECK-X86:        calll llroundf128
 start:
   %0 = tail call i64 @llvm.llround.i64.f128(fp128 %a)
   ret i64 %0
@@ -348,11 +348,11 @@ declare i64 @llvm.llround.i64.f128(fp128)
 
 define fp128 @test_log10f128(fp128 %a) {
 ; CHECK-LABEL:      test_log10f128:
-; CHECK-AARCH64:    b log10l
-; CHECK-RISCV32:    call log10l at plt
-; CHECK-S390X:      brasl {{%.*}} log10l at PLT
-; CHECK-X64:        jmp log10l at PLT
-; CHECK-X86:        calll log10l
+; CHECK-AARCH64:    b log10f128
+; CHECK-RISCV32:    call log10f128 at plt
+; CHECK-S390X:      brasl {{%.*}} log10f128 at PLT
+; CHECK-X64:        jmp log10f128 at PLT
+; CHECK-X86:        calll log10f128
 start:
   %0 = tail call fp128 @llvm.log10.f128(fp128 %a)
   ret fp128 %0
@@ -378,11 +378,11 @@ declare fp128 @llvm.__log10f128_finite.f128(fp128)
 
 define fp128 @test_log2f128(fp128 %a) {
 ; CHECK-LABEL:      test_log2f128:
-; CHECK-AARCH64:    b log2l
-; CHECK-RISCV32:    call log2l at plt
-; CHECK-S390X:      brasl {{%.*}} log2l at PLT
-; CHECK-X64:        jmp log2l at PLT
-; CHECK-X86:        calll log2l
+; CHECK-AARCH64:    b log2f128
+; CHECK-RISCV32:    call log2f128 at plt
+; CHECK-S390X:      brasl {{%.*}} log2f128 at PLT
+; CHECK-X64:        jmp log2f128 at PLT
+; CHECK-X86:        calll log2f128
 start:
   %0 = tail call fp128 @llvm.log2.f128(fp128 %a)
   ret fp128 %0
@@ -408,11 +408,11 @@ declare fp128 @llvm.__log2f128_finite.f128(fp128)
 
 define fp128 @test_logf128(fp128 %a) {
 ; CHECK-LABEL:      test_logf128:
-; CHECK-AARCH64:    b logl
-; CHECK-RISCV32:    call logl at plt
-; CHECK-S390X:      brasl {{%.*}} logl at PLT
-; CHECK-X64:        jmp logl at PLT
-; CHECK-X86:        calll logl
+; CHECK-AARCH64:    b logf128
+; CHECK-RISCV32:    call logf128 at plt
+; CHECK-S390X:      brasl {{%.*}} logf128 at PLT
+; CHECK-X64:        jmp logf128 at PLT
+; CHECK-X86:        calll logf128
 start:
   %0 = tail call fp128 @llvm.log.f128(fp128 %a)
   ret fp128 %0
@@ -438,11 +438,11 @@ declare fp128 @llvm.__logf128_finite.f128(fp128)
 
 define i64 @test_lrintf128(fp128 %a) {
 ; CHECK-LABEL:      test_lrintf128:
-; CHECK-AARCH64:    b lrintl
-; CHECK-RISCV32:    call lrintl at plt
-; CHECK-S390X:      brasl {{%.*}} lrintl at PLT
-; CHECK-X64:        jmp lrintl at PLT
-; CHECK-X86:        calll lrintl
+; CHECK-AARCH64:    b lrintf128
+; CHECK-RISCV32:    call lrintf128 at plt
+; CHECK-S390X:      brasl {{%.*}} lrintf128 at PLT
+; CHECK-X64:        jmp lrintf128 at PLT
+; CHECK-X86:        calll lrintf128
 start:
   %0 = tail call i64 @llvm.lrint.f128(fp128 %a)
   ret i64 %0
@@ -453,11 +453,11 @@ declare i64 @llvm.lrint.f128(fp128)
 
 define i64 @test_lroundf128(fp128 %a) {
 ; CHECK-LABEL:      test_lroundf128:
-; CHECK-AARCH64:    b lroundl
-; CHECK-RISCV32:    call lroundl at plt
-; CHECK-S390X:      brasl {{%.*}} lroundl at PLT
-; CHECK-X64:        jmp lroundl at PLT
-; CHECK-X86:        calll lroundl
+; CHECK-AARCH64:    b lroundf128
+; CHECK-RISCV32:    call lroundf128 at plt
+; CHECK-S390X:      brasl {{%.*}} lroundf128 at PLT
+; CHECK-X64:        jmp lroundf128 at PLT
+; CHECK-X86:        calll lroundf128
 start:
   %0 = tail call i64 @llvm.lround.i64.f128(fp128 %a)
   ret i64 %0
@@ -468,11 +468,11 @@ declare i64 @llvm.lround.i64.f128(fp128)
 
 define fp128 @test_nearbyintf128(fp128 %a) {
 ; CHECK-LABEL:      test_nearbyintf128:
-; CHECK-AARCH64:    b nearbyintl
-; CHECK-RISCV32:    call nearbyintl at plt
-; CHECK-S390X:      brasl {{%.*}} nearbyintl at PLT
-; CHECK-X64:        jmp nearbyintl at PLT
-; CHECK-X86:        calll nearbyintl
+; CHECK-AARCH64:    b nearbyintf128
+; CHECK-RISCV32:    call nearbyintf128 at plt
+; CHECK-S390X:      brasl {{%.*}} nearbyintf128 at PLT
+; CHECK-X64:        jmp nearbyintf128 at PLT
+; CHECK-X86:        calll nearbyintf128
 start:
   %0 = tail call fp128 @llvm.nearbyint.f128(fp128 %a)
   ret fp128 %0
@@ -483,11 +483,11 @@ declare fp128 @llvm.nearbyint.f128(fp128)
 
 define fp128 @test_powf128(fp128 %a, fp128 %b) {
 ; CHECK-LABEL:      test_powf128:
-; CHECK-AARCH64:    b powl
-; CHECK-RISCV32:    call powl at plt
-; CHECK-S390X:      brasl {{%.*}} powl at PLT
-; CHECK-X64:        jmp powl at PLT
-; CHECK-X86:        calll powl
+; CHECK-AARCH64:    b powf128
+; CHECK-RISCV32:    call powf128 at plt
+; CHECK-S390X:      brasl {{%.*}} powf128 at PLT
+; CHECK-X64:        jmp powf128 at PLT
+; CHECK-X86:        calll powf128
 start:
   %0 = tail call fp128 @llvm.pow.f128(fp128 %a, fp128 %b)
   ret fp128 %0
@@ -513,8 +513,8 @@ declare fp128 @llvm.__powf128_finite.f128(fp128, fp128)
 
 define fp128 @test_rintf128(fp128 %a) {
 ; CHECK-LABEL:      test_rintf128:
-; CHECK-AARCH64:    b rintl
-; CHECK-RISCV32:    call rintl at plt
+; CHECK-AARCH64:    b rintf128
+; CHECK-RISCV32:    call rintf128 at plt
 ;
 ; CHECK-S390X-LABEL: test_rintf128:
 ; CHECK-S390X:       # %bb.0: # %start
@@ -525,8 +525,8 @@ define fp128 @test_rintf128(fp128 %a) {
 ; CHECK-S390X-NEXT:    std %f2, 8(%r2)
 ; CHECK-S390X-NEXT:    br %r14
 ;
-; CHECK-X64:        jmp rintl at PLT
-; CHECK-X86:        calll rintl
+; CHECK-X64:        jmp rintf128 at PLT
+; CHECK-X86:        calll rintf128
 start:
   %0 = tail call fp128 @llvm.rint.f128(fp128 %a)
   ret fp128 %0
@@ -537,11 +537,11 @@ declare fp128 @llvm.rint.f128(fp128)
 
 define fp128 @test_roundevenf128(fp128 %a) {
 ; CHECK-LABEL:      test_roundevenf128:
-; CHECK-AARCH64:    b roundevenl
-; CHECK-RISCV32:    call roundevenl at plt
-; CHECK-S390X:      brasl {{%.*}} roundevenl at PLT
-; CHECK-X64:        jmp roundevenl at PLT
-; CHECK-X86:        calll roundevenl
+; CHECK-AARCH64:    b roundevenf128
+; CHECK-RISCV32:    call roundevenf128 at plt
+; CHECK-S390X:      brasl {{%.*}} roundevenf128 at PLT
+; CHECK-X64:        jmp roundevenf128 at PLT
+; CHECK-X86:        calll roundevenf128
 start:
   %0 = tail call fp128 @llvm.roundeven.f128(fp128 %a)
   ret fp128 %0
@@ -552,11 +552,11 @@ declare fp128 @llvm.roundeven.f128(fp128)
 
 define fp128 @test_roundf128(fp128 %a) {
 ; CHECK-LABEL:      test_roundf128:
-; CHECK-AARCH64:    b roundl
-; CHECK-RISCV32:    call roundl at plt
-; CHECK-S390X:      brasl {{%.*}} roundl at PLT
-; CHECK-X64:        jmp roundl at PLT
-; CHECK-X86:        calll roundl
+; CHECK-AARCH64:    b roundf128
+; CHECK-RISCV32:    call roundf128 at plt
+; CHECK-S390X:      brasl {{%.*}} roundf128 at PLT
+; CHECK-X64:        jmp roundf128 at PLT
+; CHECK-X86:        calll roundf128
 start:
   %0 = tail call fp128 @llvm.round.f128(fp128 %a)
   ret fp128 %0
@@ -567,11 +567,11 @@ declare fp128 @llvm.round.f128(fp128)
 
 define fp128 @test_sinf128(fp128 %a) {
 ; CHECK-LABEL:      test_sinf128:
-; CHECK-AARCH64:    b sinl
-; CHECK-RISCV32:    call sinl at plt
-; CHECK-S390X:      brasl {{%.*}} sinl at PLT
-; CHECK-X64:        jmp sinl at PLT
-; CHECK-X86:        calll sinl
+; CHECK-AARCH64:    b sinf128
+; CHECK-RISCV32:    call sinf128 at plt
+; CHECK-S390X:      brasl {{%.*}} sinf128 at PLT
+; CHECK-X64:        jmp sinf128 at PLT
+; CHECK-X86:        calll sinf128
 start:
   %0 = tail call fp128 @llvm.sin.f128(fp128 %a)
   ret fp128 %0
@@ -582,11 +582,11 @@ declare fp128 @llvm.sin.f128(fp128)
 
 define fp128 @test_sqrtf128(fp128 %a) {
 ; CHECK-LABEL:      test_sqrtf128:
-; CHECK-AARCH64:    b sqrtl
-; CHECK-RISCV32:    call sqrtl at plt
+; CHECK-AARCH64:    b sqrtf128
+; CHECK-RISCV32:    call sqrtf128 at plt
 ; CHECK-S390X:      sqxbr {{%.*}} {{%.*}}
-; CHECK-X64:        jmp sqrtl at PLT
-; CHECK-X86:        calll sqrtl
+; CHECK-X64:        jmp sqrtf128 at PLT
+; CHECK-X86:        calll sqrtf128
 start:
   %0 = tail call fp128 @llvm.sqrt.f128(fp128 %a)
   ret fp128 %0
@@ -597,11 +597,11 @@ declare fp128 @llvm.sqrt.f128(fp128)
 
 define fp128 @test_truncf128(fp128 %a) {
 ; CHECK-LABEL:      test_truncf128:
-; CHECK-AARCH64:    b truncl
-; CHECK-RISCV32:    call truncl at plt
-; CHECK-S390X:      brasl {{%.*}} truncl at PLT
-; CHECK-X64:        jmp truncl at PLT
-; CHECK-X86:        calll truncl
+; CHECK-AARCH64:    b truncf128
+; CHECK-RISCV32:    call truncf128 at plt
+; CHECK-S390X:      brasl {{%.*}} truncf128 at PLT
+; CHECK-X64:        jmp truncf128 at PLT
+; CHECK-X86:        calll truncf128
 start:
   %0 = tail call fp128 @llvm.trunc.f128(fp128 %a)
   ret fp128 %0

>From c00254c7826ae88676e858f7ee64d52cd9149bdd Mon Sep 17 00:00:00 2001
From: Trevor Gross <tmgross at umich.edu>
Date: Fri, 29 Dec 2023 03:45:24 -0500
Subject: [PATCH 4/4] [IR] Use long double functions for `fp128` intrinsics
 where possible

Functions such as `sinf128` are not always available. For targets where `long
double` is `f128`, use functions such as `sinfl` instead.
---
 llvm/lib/CodeGen/TargetLoweringBase.cpp       |  38 +++++
 .../CodeGen/Generic/f128-math-lowering.ll     | 136 +++++++++---------
 2 files changed, 105 insertions(+), 69 deletions(-)

diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 7dffd8d4f2b28e..0f534fce08c545 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -221,6 +221,44 @@ void TargetLoweringBase::InitLibcalls(const Triple &TT) {
     setLibcallName(RTLIB::FREXP_F128, nullptr);
     setLibcallName(RTLIB::FREXP_PPCF128, nullptr);
   }
+
+  if (TT.isLongDoubleF128()) {
+    // Use the more available long double functions for fp128 if possible
+    setLibcallName(RTLIB::REM_F128, "fmodl");
+    setLibcallName(RTLIB::FMA_F128, "fmal");
+    setLibcallName(RTLIB::SQRT_F128, "sqrtl");
+    setLibcallName(RTLIB::CBRT_F128, "cbrtl");
+    setLibcallName(RTLIB::LOG_F128, "logl");
+    setLibcallName(RTLIB::LOG_FINITE_F128, "__logl_finite");
+    setLibcallName(RTLIB::LOG2_F128, "log2l");
+    setLibcallName(RTLIB::LOG2_FINITE_F128, "__log2l_finite");
+    setLibcallName(RTLIB::LOG10_F128, "log10l");
+    setLibcallName(RTLIB::LOG10_FINITE_F128, "__log10l_finite");
+    setLibcallName(RTLIB::EXP_F128, "expl");
+    setLibcallName(RTLIB::EXP_FINITE_F128, "__expl_finite");
+    setLibcallName(RTLIB::EXP2_F128, "exp2l");
+    setLibcallName(RTLIB::EXP2_FINITE_F128, "__exp2l_finite");
+    setLibcallName(RTLIB::SIN_F128, "sinl");
+    setLibcallName(RTLIB::COS_F128, "cosl");
+    setLibcallName(RTLIB::POW_F128, "powl");
+    setLibcallName(RTLIB::POW_FINITE_F128, "__powl_finite");
+    setLibcallName(RTLIB::CEIL_F128, "ceill");
+    setLibcallName(RTLIB::TRUNC_F128, "truncl");
+    setLibcallName(RTLIB::RINT_F128, "rintl");
+    setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintl");
+    setLibcallName(RTLIB::ROUND_F128, "roundl");
+    setLibcallName(RTLIB::ROUNDEVEN_F128, "roundevenl");
+    setLibcallName(RTLIB::FLOOR_F128, "floorl");
+    setLibcallName(RTLIB::COPYSIGN_F128, "copysignl");
+    setLibcallName(RTLIB::FMIN_F128, "fminl");
+    setLibcallName(RTLIB::FMAX_F128, "fmaxl");
+    setLibcallName(RTLIB::LROUND_F128, "lroundl");
+    setLibcallName(RTLIB::LLROUND_F128, "llroundl");
+    setLibcallName(RTLIB::LRINT_F128, "lrintl");
+    setLibcallName(RTLIB::LLRINT_F128, "llrintl");
+    setLibcallName(RTLIB::LDEXP_F128, "ldexpl");
+    setLibcallName(RTLIB::FREXP_F128, "frexpl");
+  }
 }
 
 /// GetFPLibCall - Helper to return the right libcall for the given floating
diff --git a/llvm/test/CodeGen/Generic/f128-math-lowering.ll b/llvm/test/CodeGen/Generic/f128-math-lowering.ll
index 530abb34cbdee9..4eff82850c59ab 100644
--- a/llvm/test/CodeGen/Generic/f128-math-lowering.ll
+++ b/llvm/test/CodeGen/Generic/f128-math-lowering.ll
@@ -13,8 +13,6 @@
 ; We test on x86 and x64 which have 80-bit ld, as well as aarch64 (ld == f128),
 ; riscv32 (ld == f64), and s380x (ld == f128 with different alignment from
 ; x64/aarch64 f128).
-;
-; FIXME: these emit calls to long double functions but should emit f128 calls
 
 define fp128 @test_cbrtf128(fp128 %a) {
 ; CHECK-LABEL:      test_cbrtf128:
@@ -33,9 +31,9 @@ declare fp128 @llvm.cbrt.f128(fp128)
 
 define fp128 @test_ceilf128(fp128 %a) {
 ; CHECK-LABEL:      test_ceilf128:
-; CHECK-AARCH64:    b ceilf128
-; CHECK-RISCV32:    call ceilf128 at plt
-; CHECK-S390X:      brasl {{%.*}} ceilf128 at PLT
+; CHECK-AARCH64:    b ceill
+; CHECK-RISCV32:    call ceill at plt
+; CHECK-S390X:      brasl {{%.*}} ceill at PLT
 ; CHECK-X64:        jmp ceilf128 at PLT
 ; CHECK-X86:        calll ceilf128
 start:
@@ -138,9 +136,9 @@ declare fp128 @llvm.copysign.f128(fp128, fp128)
 
 define fp128 @test_cosf128(fp128 %a) {
 ; CHECK-LABEL:      test_cosf128:
-; CHECK-AARCH64:    b cosf128
-; CHECK-RISCV32:    call cosf128 at plt
-; CHECK-S390X:      brasl {{%.*}} cosf128 at PLT
+; CHECK-AARCH64:    b cosl
+; CHECK-RISCV32:    call cosl at plt
+; CHECK-S390X:      brasl {{%.*}} cosl at PLT
 ; CHECK-X64:        jmp cosf128 at PLT
 ; CHECK-X86:        calll cosf128
 start:
@@ -153,9 +151,9 @@ declare fp128 @llvm.cos.f128(fp128)
 
 define fp128 @test_exp2f128(fp128 %a) {
 ; CHECK-LABEL:      test_exp2f128:
-; CHECK-AARCH64:    b exp2f128
-; CHECK-RISCV32:    call exp2f128 at plt
-; CHECK-S390X:      brasl {{%.*}} exp2f128 at PLT
+; CHECK-AARCH64:    b exp2l
+; CHECK-RISCV32:    call exp2l at plt
+; CHECK-S390X:      brasl {{%.*}} exp2l at PLT
 ; CHECK-X64:        jmp exp2f128 at PLT
 ; CHECK-X86:        calll exp2f128
 start:
@@ -183,9 +181,9 @@ declare fp128 @llvm.__exp2f128_finite.f128(fp128)
 
 define fp128 @test_expf128(fp128 %a) {
 ; CHECK-LABEL:      test_expf128:
-; CHECK-AARCH64:    b expf128
-; CHECK-RISCV32:    call expf128 at plt
-; CHECK-S390X:      brasl {{%.*}} expf128 at PLT
+; CHECK-AARCH64:    b expl
+; CHECK-RISCV32:    call expl at plt
+; CHECK-S390X:      brasl {{%.*}} expl at PLT
 ; CHECK-X64:        jmp expf128 at PLT
 ; CHECK-X86:        calll expf128
 start:
@@ -213,9 +211,9 @@ declare fp128 @llvm.__expf128_finite.f128(fp128)
 
 define fp128 @test_floorf128(fp128 %a) {
 ; CHECK-LABEL:      test_floorf128:
-; CHECK-AARCH64:    b floorf128
-; CHECK-RISCV32:    call floorf128 at plt
-; CHECK-S390X:      brasl {{%.*}} floorf128 at PLT
+; CHECK-AARCH64:    b floorl
+; CHECK-RISCV32:    call floorl at plt
+; CHECK-S390X:      brasl {{%.*}} floorl at PLT
 ; CHECK-X64:        jmp floorf128 at PLT
 ; CHECK-X86:        calll floorf128
 start:
@@ -228,9 +226,9 @@ declare fp128 @llvm.floor.f128(fp128)
 
 define fp128 @test_fmaf128(fp128 %a, fp128 %b, fp128 %c) {
 ; CHECK-LABEL:      test_fmaf128:
-; CHECK-AARCH64:    b fmaf128
-; CHECK-RISCV32:    call fmaf128 at plt
-; CHECK-S390X:      brasl {{%.*}} fmaf128 at PLT
+; CHECK-AARCH64:    b fmal
+; CHECK-RISCV32:    call fmal at plt
+; CHECK-S390X:      brasl {{%.*}} fmal at PLT
 ; CHECK-X64:        jmp fmaf128 at PLT
 ; CHECK-X86:        calll fmaf128
 start:
@@ -288,9 +286,9 @@ declare fp128 @llvm.fmod.f128(fp128, fp128)
 
 define { fp128, i32 } @test_frexpf128(fp128 %a) {
 ; CHECK-LABEL:      test_frexpf128:
-; CHECK-AARCH64:    bl frexpf128
-; CHECK-RISCV32:    call frexpf128 at plt
-; CHECK-S390X:      brasl {{%.*}} frexpf128 at PLT
+; CHECK-AARCH64:    bl frexpl
+; CHECK-RISCV32:    call frexpl at plt
+; CHECK-S390X:      brasl {{%.*}} frexpl at PLT
 ; CHECK-X64:        callq frexpf128 at PLT
 ; CHECK-X86:        calll frexpf128
 start:
@@ -303,9 +301,9 @@ declare { fp128, i32 } @llvm.frexp.f128(fp128)
 
 define fp128 @test_ldexpf128(fp128 %a, i32 %b) {
 ; CHECK-LABEL:      test_ldexpf128:
-; CHECK-AARCH64:    b ldexpf128
-; CHECK-RISCV32:    call ldexpf128 at plt
-; CHECK-S390X:      brasl {{%.*}} ldexpf128 at PLT
+; CHECK-AARCH64:    b ldexpl
+; CHECK-RISCV32:    call ldexpl at plt
+; CHECK-S390X:      brasl {{%.*}} ldexpl at PLT
 ; CHECK-X64:        jmp ldexpf128 at PLT
 ; CHECK-X86:        calll ldexpf128
 start:
@@ -318,9 +316,9 @@ declare fp128 @llvm.ldexp.f128(fp128, i32)
 
 define i64 @test_llrintf128(fp128 %a) {
 ; CHECK-LABEL:      test_llrintf128:
-; CHECK-AARCH64:    b llrintf128
-; CHECK-RISCV32:    call llrintf128 at plt
-; CHECK-S390X:      brasl {{%.*}} llrintf128 at PLT
+; CHECK-AARCH64:    b llrintl
+; CHECK-RISCV32:    call llrintl at plt
+; CHECK-S390X:      brasl {{%.*}} llrintl at PLT
 ; CHECK-X64:        jmp llrintf128 at PLT
 ; CHECK-X86:        calll llrintf128
 start:
@@ -333,9 +331,9 @@ declare i64 @llvm.llrint.f128(fp128)
 
 define i64 @test_llroundf128(fp128 %a) {
 ; CHECK-LABEL:      test_llroundf128:
-; CHECK-AARCH64:    b llroundf128
-; CHECK-RISCV32:    call llroundf128 at plt
-; CHECK-S390X:      brasl {{%.*}} llroundf128 at PLT
+; CHECK-AARCH64:    b llroundl
+; CHECK-RISCV32:    call llroundl at plt
+; CHECK-S390X:      brasl {{%.*}} llroundl at PLT
 ; CHECK-X64:        jmp llroundf128 at PLT
 ; CHECK-X86:        calll llroundf128
 start:
@@ -348,9 +346,9 @@ declare i64 @llvm.llround.i64.f128(fp128)
 
 define fp128 @test_log10f128(fp128 %a) {
 ; CHECK-LABEL:      test_log10f128:
-; CHECK-AARCH64:    b log10f128
-; CHECK-RISCV32:    call log10f128 at plt
-; CHECK-S390X:      brasl {{%.*}} log10f128 at PLT
+; CHECK-AARCH64:    b log10l
+; CHECK-RISCV32:    call log10l at plt
+; CHECK-S390X:      brasl {{%.*}} log10l at PLT
 ; CHECK-X64:        jmp log10f128 at PLT
 ; CHECK-X86:        calll log10f128
 start:
@@ -378,9 +376,9 @@ declare fp128 @llvm.__log10f128_finite.f128(fp128)
 
 define fp128 @test_log2f128(fp128 %a) {
 ; CHECK-LABEL:      test_log2f128:
-; CHECK-AARCH64:    b log2f128
-; CHECK-RISCV32:    call log2f128 at plt
-; CHECK-S390X:      brasl {{%.*}} log2f128 at PLT
+; CHECK-AARCH64:    b log2l
+; CHECK-RISCV32:    call log2l at plt
+; CHECK-S390X:      brasl {{%.*}} log2l at PLT
 ; CHECK-X64:        jmp log2f128 at PLT
 ; CHECK-X86:        calll log2f128
 start:
@@ -408,9 +406,9 @@ declare fp128 @llvm.__log2f128_finite.f128(fp128)
 
 define fp128 @test_logf128(fp128 %a) {
 ; CHECK-LABEL:      test_logf128:
-; CHECK-AARCH64:    b logf128
-; CHECK-RISCV32:    call logf128 at plt
-; CHECK-S390X:      brasl {{%.*}} logf128 at PLT
+; CHECK-AARCH64:    b logl
+; CHECK-RISCV32:    call logl at plt
+; CHECK-S390X:      brasl {{%.*}} logl at PLT
 ; CHECK-X64:        jmp logf128 at PLT
 ; CHECK-X86:        calll logf128
 start:
@@ -438,9 +436,9 @@ declare fp128 @llvm.__logf128_finite.f128(fp128)
 
 define i64 @test_lrintf128(fp128 %a) {
 ; CHECK-LABEL:      test_lrintf128:
-; CHECK-AARCH64:    b lrintf128
-; CHECK-RISCV32:    call lrintf128 at plt
-; CHECK-S390X:      brasl {{%.*}} lrintf128 at PLT
+; CHECK-AARCH64:    b lrintl
+; CHECK-RISCV32:    call lrintl at plt
+; CHECK-S390X:      brasl {{%.*}} lrintl at PLT
 ; CHECK-X64:        jmp lrintf128 at PLT
 ; CHECK-X86:        calll lrintf128
 start:
@@ -453,9 +451,9 @@ declare i64 @llvm.lrint.f128(fp128)
 
 define i64 @test_lroundf128(fp128 %a) {
 ; CHECK-LABEL:      test_lroundf128:
-; CHECK-AARCH64:    b lroundf128
-; CHECK-RISCV32:    call lroundf128 at plt
-; CHECK-S390X:      brasl {{%.*}} lroundf128 at PLT
+; CHECK-AARCH64:    b lroundl
+; CHECK-RISCV32:    call lroundl at plt
+; CHECK-S390X:      brasl {{%.*}} lroundl at PLT
 ; CHECK-X64:        jmp lroundf128 at PLT
 ; CHECK-X86:        calll lroundf128
 start:
@@ -468,9 +466,9 @@ declare i64 @llvm.lround.i64.f128(fp128)
 
 define fp128 @test_nearbyintf128(fp128 %a) {
 ; CHECK-LABEL:      test_nearbyintf128:
-; CHECK-AARCH64:    b nearbyintf128
-; CHECK-RISCV32:    call nearbyintf128 at plt
-; CHECK-S390X:      brasl {{%.*}} nearbyintf128 at PLT
+; CHECK-AARCH64:    b nearbyintl
+; CHECK-RISCV32:    call nearbyintl at plt
+; CHECK-S390X:      brasl {{%.*}} nearbyintl at PLT
 ; CHECK-X64:        jmp nearbyintf128 at PLT
 ; CHECK-X86:        calll nearbyintf128
 start:
@@ -483,9 +481,9 @@ declare fp128 @llvm.nearbyint.f128(fp128)
 
 define fp128 @test_powf128(fp128 %a, fp128 %b) {
 ; CHECK-LABEL:      test_powf128:
-; CHECK-AARCH64:    b powf128
-; CHECK-RISCV32:    call powf128 at plt
-; CHECK-S390X:      brasl {{%.*}} powf128 at PLT
+; CHECK-AARCH64:    b powl
+; CHECK-RISCV32:    call powl at plt
+; CHECK-S390X:      brasl {{%.*}} powl at PLT
 ; CHECK-X64:        jmp powf128 at PLT
 ; CHECK-X86:        calll powf128
 start:
@@ -513,8 +511,8 @@ declare fp128 @llvm.__powf128_finite.f128(fp128, fp128)
 
 define fp128 @test_rintf128(fp128 %a) {
 ; CHECK-LABEL:      test_rintf128:
-; CHECK-AARCH64:    b rintf128
-; CHECK-RISCV32:    call rintf128 at plt
+; CHECK-AARCH64:    b rintl
+; CHECK-RISCV32:    call rintl at plt
 ;
 ; CHECK-S390X-LABEL: test_rintf128:
 ; CHECK-S390X:       # %bb.0: # %start
@@ -537,9 +535,9 @@ declare fp128 @llvm.rint.f128(fp128)
 
 define fp128 @test_roundevenf128(fp128 %a) {
 ; CHECK-LABEL:      test_roundevenf128:
-; CHECK-AARCH64:    b roundevenf128
-; CHECK-RISCV32:    call roundevenf128 at plt
-; CHECK-S390X:      brasl {{%.*}} roundevenf128 at PLT
+; CHECK-AARCH64:    b roundevenl
+; CHECK-RISCV32:    call roundevenl at plt
+; CHECK-S390X:      brasl {{%.*}} roundevenl at PLT
 ; CHECK-X64:        jmp roundevenf128 at PLT
 ; CHECK-X86:        calll roundevenf128
 start:
@@ -552,9 +550,9 @@ declare fp128 @llvm.roundeven.f128(fp128)
 
 define fp128 @test_roundf128(fp128 %a) {
 ; CHECK-LABEL:      test_roundf128:
-; CHECK-AARCH64:    b roundf128
-; CHECK-RISCV32:    call roundf128 at plt
-; CHECK-S390X:      brasl {{%.*}} roundf128 at PLT
+; CHECK-AARCH64:    b roundl
+; CHECK-RISCV32:    call roundl at plt
+; CHECK-S390X:      brasl {{%.*}} roundl at PLT
 ; CHECK-X64:        jmp roundf128 at PLT
 ; CHECK-X86:        calll roundf128
 start:
@@ -567,9 +565,9 @@ declare fp128 @llvm.round.f128(fp128)
 
 define fp128 @test_sinf128(fp128 %a) {
 ; CHECK-LABEL:      test_sinf128:
-; CHECK-AARCH64:    b sinf128
-; CHECK-RISCV32:    call sinf128 at plt
-; CHECK-S390X:      brasl {{%.*}} sinf128 at PLT
+; CHECK-AARCH64:    b sinl
+; CHECK-RISCV32:    call sinl at plt
+; CHECK-S390X:      brasl {{%.*}} sinl at PLT
 ; CHECK-X64:        jmp sinf128 at PLT
 ; CHECK-X86:        calll sinf128
 start:
@@ -582,8 +580,8 @@ declare fp128 @llvm.sin.f128(fp128)
 
 define fp128 @test_sqrtf128(fp128 %a) {
 ; CHECK-LABEL:      test_sqrtf128:
-; CHECK-AARCH64:    b sqrtf128
-; CHECK-RISCV32:    call sqrtf128 at plt
+; CHECK-AARCH64:    b sqrtl
+; CHECK-RISCV32:    call sqrtl at plt
 ; CHECK-S390X:      sqxbr {{%.*}} {{%.*}}
 ; CHECK-X64:        jmp sqrtf128 at PLT
 ; CHECK-X86:        calll sqrtf128
@@ -597,9 +595,9 @@ declare fp128 @llvm.sqrt.f128(fp128)
 
 define fp128 @test_truncf128(fp128 %a) {
 ; CHECK-LABEL:      test_truncf128:
-; CHECK-AARCH64:    b truncf128
-; CHECK-RISCV32:    call truncf128 at plt
-; CHECK-S390X:      brasl {{%.*}} truncf128 at PLT
+; CHECK-AARCH64:    b truncl
+; CHECK-RISCV32:    call truncl at plt
+; CHECK-S390X:      brasl {{%.*}} truncl at PLT
 ; CHECK-X64:        jmp truncf128 at PLT
 ; CHECK-X86:        calll truncf128
 start:



More information about the cfe-commits mailing list