[clang] [CUDA] fix wrapper cmath header to match #136101 (PR #139164)
Artem Belevich via cfe-commits
cfe-commits at lists.llvm.org
Mon May 12 11:34:56 PDT 2025
https://github.com/Artem-B updated https://github.com/llvm/llvm-project/pull/139164
>From a1d60feed11174b9d2106b57ee15ff6d9bc56fa4 Mon Sep 17 00:00:00 2001
From: Artem Belevich <tra at google.com>
Date: Thu, 8 May 2025 14:43:47 -0700
Subject: [PATCH] [CUDA] remove obsolete GPU-side __constexpr* wrappers
libc++ no longer has them, so there's nothing to wrap.
---
clang/lib/Headers/cuda_wrappers/cmath | 50 ---------------------------
1 file changed, 50 deletions(-)
diff --git a/clang/lib/Headers/cuda_wrappers/cmath b/clang/lib/Headers/cuda_wrappers/cmath
index 7deca678bf252..8e9ee34791965 100644
--- a/clang/lib/Headers/cuda_wrappers/cmath
+++ b/clang/lib/Headers/cuda_wrappers/cmath
@@ -39,56 +39,6 @@
__attribute__((device)) long double logb(long double);
__attribute__((device)) long double scalbn(long double, int);
-namespace std {
-
-// For __constexpr_fmin/fmax we only need device-side overloads before c++14
-// where they are not constexpr.
-#if _LIBCPP_STD_VER < 14
-
-__attribute__((device))
-inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 float __constexpr_fmax(float __x, float __y) _NOEXCEPT {
- return __builtin_fmaxf(__x, __y);
-}
-
-__attribute__((device))
-inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 double __constexpr_fmax(double __x, double __y) _NOEXCEPT {
- return __builtin_fmax(__x, __y);
-}
-
-__attribute__((device))
-inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 long double
-__constexpr_fmax(long double __x, long double __y) _NOEXCEPT {
- return __builtin_fmaxl(__x, __y);
-}
-
-template <class _Tp, class _Up, __enable_if_t<is_arithmetic<_Tp>::value && is_arithmetic<_Up>::value, int> = 0>
-__attribute__((device))
-_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename __promote<_Tp, _Up>::type
-__constexpr_fmax(_Tp __x, _Up __y) _NOEXCEPT {
- using __result_type = typename __promote<_Tp, _Up>::type;
- return std::__constexpr_fmax(static_cast<__result_type>(__x), static_cast<__result_type>(__y));
-}
-#endif // _LIBCPP_STD_VER < 14
-
-// For logb/scalbn templates we must always provide device overloads because
-// libc++ implementation uses __builtin_XXX which gets translated into a libcall
-// which we can't handle on GPU. We need to forward those to CUDA-provided
-// implementations.
-
-template <class _Tp>
-__attribute__((device))
-_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __constexpr_logb(_Tp __x) {
- return ::logb(__x);
-}
-
-template <class _Tp>
-__attribute__((device))
-_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Tp __constexpr_scalbn(_Tp __x, int __exp) {
- return ::scalbn(__x, __exp);
-}
-
-} // namespace std//
-
#endif // _LIBCPP_STD_VER
#endif // include guard
More information about the cfe-commits
mailing list