[llvm] [Hexagon] Omit calls to specialized {float, fix} routines (PR #117423)
Brian Cain via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 22 22:09:30 PST 2024
https://github.com/androm3da created https://github.com/llvm/llvm-project/pull/117423
These were introduced in 1213a7a57fdc (Hexagon backend support, 2011-12-12) but they aren't present in libclangrt.builtins-hexagon. The generic versions of these functions are present in the builtins, though. So it should suffice to call those instead.
>From 47c4f544aa1b988633c710e2ba33fc41e040fc24 Mon Sep 17 00:00:00 2001
From: Brian Cain <brian.cain at oss.qualcomm.com>
Date: Fri, 22 Nov 2024 21:35:10 -0800
Subject: [PATCH] [Hexagon] Omit calls to specialized {float,fix} routines
These were introduced in 1213a7a57fdc (Hexagon backend support, 2011-12-12)
but they aren't present in libclangrt.builtins-hexagon. The generic versions
of these functions are present in the builtins, though. So it should suffice
to call those instead.
---
.../Target/Hexagon/HexagonISelLowering.cpp | 7 -
.../CodeGen/Hexagon/i128-fpconv-strict.ll | 139 ++++++++++++++++++
2 files changed, 139 insertions(+), 7 deletions(-)
create mode 100644 llvm/test/CodeGen/Hexagon/i128-fpconv-strict.ll
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 2c8d141aa21080..5dc2b35616ed2c 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -1860,13 +1860,6 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
- setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
- setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
- setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
- setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
- setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
- setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
-
// This is the only fast library function for sqrtd.
if (FastMath)
setLibcallName(RTLIB::SQRT_F64, "__hexagon_fast2_sqrtdf2");
diff --git a/llvm/test/CodeGen/Hexagon/i128-fpconv-strict.ll b/llvm/test/CodeGen/Hexagon/i128-fpconv-strict.ll
new file mode 100644
index 00000000000000..f643c0f2a5f77a
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/i128-fpconv-strict.ll
@@ -0,0 +1,139 @@
+; RUN: llc < %s -mtriple=hexagon-unknown-CHECK-musl \
+; RUN: | FileCheck %s -check-prefix=CHECK
+
+define i64 @double_to_i128(double %d) nounwind strictfp {
+; CHECK-LABEL: double_to_i128:
+; CHECK: // %bb.0:
+; CHECK: call __fixdfti
+; CHECK: dealloc_return
+ %1 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f64(double %d, metadata !"fpexcept.strict")
+ %2 = trunc i128 %1 to i64
+ ret i64 %2
+}
+
+define i64 @double_to_ui128(double %d) nounwind strictfp {
+; CHECK-LABEL: double_to_ui128:
+; CHECK: // %bb.0:
+; CHECK: call __fixunsdfti
+; CHECK: dealloc_return
+ %1 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f64(double %d, metadata !"fpexcept.strict")
+ %2 = trunc i128 %1 to i64
+ ret i64 %2
+}
+
+define i64 @float_to_i128(float %d) nounwind strictfp {
+; CHECK-LABEL: float_to_i128:
+; CHECK: // %bb.0:
+; CHECK: call __fixsfti
+; CHECK: dealloc_return
+ %1 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f32(float %d, metadata !"fpexcept.strict")
+ %2 = trunc i128 %1 to i64
+ ret i64 %2
+}
+
+define i64 @float_to_ui128(float %d) nounwind strictfp {
+; CHECK-LABEL: float_to_ui128:
+; CHECK: // %bb.0:
+; CHECK: call __fixunssfti
+; CHECK: dealloc_return
+ %1 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f32(float %d, metadata !"fpexcept.strict")
+ %2 = trunc i128 %1 to i64
+ ret i64 %2
+}
+
+define i64 @longdouble_to_i128(ptr nocapture readonly %0) nounwind strictfp {
+; CHECK-LABEL: longdouble_to_i128:
+; CHECK: // %bb.0:
+; CHECK: call __fixxfti
+; CHECK: dealloc_return
+ %2 = load x86_fp80, ptr %0, align 16
+ %3 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f80(x86_fp80 %2, metadata !"fpexcept.strict")
+ %4 = trunc i128 %3 to i64
+ ret i64 %4
+}
+
+define i64 @longdouble_to_ui128(ptr nocapture readonly %0) nounwind strictfp {
+; CHECK-LABEL: longdouble_to_ui128:
+; CHECK: // %bb.0:
+; CHECK: call __fixunsxfti
+; CHECK: dealloc_return
+ %2 = load x86_fp80, ptr %0, align 16
+ %3 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f80(x86_fp80 %2, metadata !"fpexcept.strict")
+ %4 = trunc i128 %3 to i64
+ ret i64 %4
+}
+
+define double @i128_to_double(ptr nocapture readonly %0) nounwind strictfp {
+; CHECK-LABEL: i128_to_double:
+; CHECK: // %bb.0:
+; CHECK: call __floattidf
+; CHECK: dealloc_return
+ %2 = load i128, ptr %0, align 16
+ %3 = tail call double @llvm.experimental.constrained.sitofp.f64.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret double %3
+}
+
+define double @ui128_to_double(ptr nocapture readonly %0) nounwind strictfp {
+; CHECK-LABEL: ui128_to_double:
+; CHECK: // %bb.0:
+; CHECK: call __floatuntidf
+; CHECK: dealloc_return
+ %2 = load i128, ptr %0, align 16
+ %3 = tail call double @llvm.experimental.constrained.uitofp.f64.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret double %3
+}
+
+define float @i128_to_float(ptr nocapture readonly %0) nounwind strictfp {
+; CHECK-LABEL: i128_to_float:
+; CHECK: // %bb.0:
+; CHECK: call __floattisf
+; CHECK: dealloc_return
+ %2 = load i128, ptr %0, align 16
+ %3 = tail call float @llvm.experimental.constrained.sitofp.f32.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %3
+}
+
+define float @ui128_to_float(ptr nocapture readonly %0) nounwind strictfp {
+; CHECK-LABEL: ui128_to_float:
+; CHECK: // %bb.0:
+; CHECK: call __floatuntisf
+; CHECK: dealloc_return
+ %2 = load i128, ptr %0, align 16
+ %3 = tail call float @llvm.experimental.constrained.uitofp.f32.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %3
+}
+
+define void @i128_to_longdouble(ptr noalias nocapture sret(x86_fp80) align 16 %agg.result, ptr nocapture readonly %0) nounwind strictfp {
+; CHECK-LABEL: i128_to_longdouble:
+; CHECK: // %bb.0:
+; CHECK: call __floattixf
+; CHECK: dealloc_return
+ %2 = load i128, ptr %0, align 16
+ %3 = tail call x86_fp80 @llvm.experimental.constrained.sitofp.f80.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ store x86_fp80 %3, ptr %agg.result, align 16
+ ret void
+}
+
+define void @ui128_to_longdouble(ptr noalias nocapture sret(x86_fp80) align 16 %agg.result, ptr nocapture readonly %0) nounwind strictfp {
+; CHECK-LABEL: ui128_to_longdouble:
+; CHECK: // %bb.0:
+; CHECK: call __floatuntixf
+; CHECK: dealloc_return
+ %2 = load i128, ptr %0, align 16
+ %3 = tail call x86_fp80 @llvm.experimental.constrained.uitofp.f80.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ store x86_fp80 %3, ptr %agg.result, align 16
+ ret void
+}
+
+declare i128 @llvm.experimental.constrained.fptosi.i128.f64(double, metadata)
+declare i128 @llvm.experimental.constrained.fptoui.i128.f64(double, metadata)
+declare i128 @llvm.experimental.constrained.fptosi.i128.f32(float, metadata)
+declare i128 @llvm.experimental.constrained.fptoui.i128.f32(float, metadata)
+declare i128 @llvm.experimental.constrained.fptosi.i128.f80(x86_fp80, metadata)
+declare i128 @llvm.experimental.constrained.fptoui.i128.f80(x86_fp80, metadata)
+declare double @llvm.experimental.constrained.sitofp.f64.i128(i128, metadata, metadata)
+declare double @llvm.experimental.constrained.uitofp.f64.i128(i128, metadata, metadata)
+declare float @llvm.experimental.constrained.sitofp.f32.i128(i128, metadata, metadata)
+declare float @llvm.experimental.constrained.uitofp.f32.i128(i128, metadata, metadata)
+declare x86_fp80 @llvm.experimental.constrained.sitofp.f80.i128(i128, metadata, metadata)
+declare x86_fp80 @llvm.experimental.constrained.uitofp.f80.i128(i128, metadata, metadata)
More information about the llvm-commits
mailing list