[libc-commits] [libc] [llvm] [libc][math] Refactor sinf implementation to header-only in src/__support/math folder. (PR #177963)

Muhammad Bassiouni via libc-commits libc-commits at lists.llvm.org
Mon Jan 26 09:00:36 PST 2026


================
@@ -0,0 +1,194 @@
+//===-- Single-precision sin function -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIBC_SRC___SUPPORT_MATH_SINF_H
+#define LIBC_SRC___SUPPORT_MATH_SINF_H
+
+#include "src/__support/FPUtil/BasicOperations.h"
+#include "src/__support/FPUtil/FEnvImpl.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/PolyEval.h"
+#include "src/__support/FPUtil/multiply_add.h"
+#include "src/__support/FPUtil/rounding_mode.h"
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/optimization.h"            // LIBC_UNLIKELY
+#include "src/__support/macros/properties/cpu_features.h" // LIBC_TARGET_CPU_HAS_FMA
+
+#if defined(LIBC_MATH_HAS_SKIP_ACCURATE_PASS) &&                               \
+    defined(LIBC_MATH_HAS_INTERMEDIATE_COMP_IN_FLOAT) &&                       \
+    defined(LIBC_TARGET_CPU_HAS_FMA_FLOAT)
+
+#include "sincosf_float_eval.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+namespace math {
+
+LIBC_INLINE static float sinf(float x) {
+  return math::sincosf_float_eval::sincosf_eval</*IS_SIN*/ true>(x);
+}
+
+} // namespace math
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#else // !LIBC_MATH_HAS_INTERMEDIATE_COMP_IN_FLOAT
+
+#include "src/__support/math/sincosf_utils.h"
+
+#ifdef LIBC_TARGET_CPU_HAS_FMA_DOUBLE
+#include "src/__support/math/range_reduction_fma.h"
+#else // !LIBC_TARGET_CPU_HAS_FMA_DOUBLE
+#include "src/__support/math/range_reduction.h"
+#endif // LIBC_TARGET_CPU_HAS_FMA_DOUBLE
+
+namespace LIBC_NAMESPACE_DECL {
+
+namespace math {
+
+LIBC_INLINE static float sinf(float x) {
+  using FPBits = typename fputil::FPBits<float>;
+  FPBits xbits(x);
+
+  uint32_t x_u = xbits.uintval();
+  uint32_t x_abs = x_u & 0x7fff'ffffU;
+  double xd = static_cast<double>(x);
+
+  // Range reduction:
+  // For |x| > pi/32, we perform range reduction as follows:
+  // Find k and y such that:
+  //   x = (k + y) * pi/32
+  //   k is an integer
+  //   |y| < 0.5
+  // For small range (|x| < 2^45 when FMA instructions are available, 2^22
+  // otherwise), this is done by performing:
+  //   k = round(x * 32/pi)
+  //   y = x * 32/pi - k
+  // For large range, we will omit all the higher parts of 32/pi such that the
+  // least significant bits of their full products with x are larger than 63,
+  // since sin((k + y + 64*i) * pi/32) = sin(x + i * 2pi) = sin(x).
+  //
+  // When FMA instructions are not available, we store the digits of 32/pi in
+  // chunks of 28-bit precision.  This will make sure that the products:
+  //   x * THIRTYTWO_OVER_PI_28[i] are all exact.
+  // When FMA instructions are available, we simply store the digits of 32/pi in
+  // chunks of doubles (53-bit of precision).
+  // So when multiplying by the largest values of single precision, the
+  // resulting output should be correct up to 2^(-208 + 128) ~ 2^-80.  By the
+  // worst-case analysis of range reduction, |y| >= 2^-38, so this should give
+  // us more than 40 bits of accuracy. For the worst-case estimation of range
+  // reduction, see for instances:
+  //   Elementary Functions by J-M. Muller, Chapter 11,
+  //   Handbook of Floating-Point Arithmetic by J-M. Muller et. al.,
+  //   Chapter 10.2.
+  //
+  // Once k and y are computed, we then deduce the answer by the sine of sum
+  // formula:
+  //   sin(x) = sin((k + y)*pi/32)
+  //          = sin(y*pi/32) * cos(k*pi/32) + cos(y*pi/32) * sin(k*pi/32)
+  // The values of sin(k*pi/32) and cos(k*pi/32) for k = 0..31 are precomputed
+  // and stored using a vector of 32 doubles. Sin(y*pi/32) and cos(y*pi/32) are
+  // computed using degree-7 and degree-6 minimax polynomials generated by
+  // Sollya respectively.
+
+  // |x| <= pi/16
+  if (LIBC_UNLIKELY(x_abs <= 0x3e49'0fdbU)) {
+
+    // |x| < 0x1.d12ed2p-12f
+    if (LIBC_UNLIKELY(x_abs < 0x39e8'9769U)) {
+      if (LIBC_UNLIKELY(x_abs == 0U)) {
+        // For signed zeros.
+        return x;
+      }
+      // When |x| < 2^-12, the relative error of the approximation sin(x) ~ x
+      // is:
+      //   |sin(x) - x| / |sin(x)| < |x^3| / (6|x|)
+      //                           = x^2 / 6
+      //                           < 2^-25
+      //                           < epsilon(1)/2.
+      // So the correctly rounded values of sin(x) are:
+      //   = x - sign(x)*eps(x) if rounding mode = FE_TOWARDZERO,
+      //                        or (rounding mode = FE_UPWARD and x is
+      //                        negative),
+      //   = x otherwise.
+      // To simplify the rounding decision and make it more efficient, we use
+      //   fma(x, -2^-25, x) instead.
+      // An exhaustive test shows that this formula work correctly for all
+      // rounding modes up to |x| < 0x1.c555dep-11f.
+      // Note: to use the formula x - 2^-25*x to decide the correct rounding, we
+      // do need fma(x, -2^-25, x) to prevent underflow caused by -2^-25*x when
+      // |x| < 2^-125. For targets without FMA instructions, we simply use
+      // double for intermediate results as it is more efficient than using an
+      // emulated version of FMA.
+#if defined(LIBC_TARGET_CPU_HAS_FMA_FLOAT)
+      return fputil::multiply_add(x, -0x1.0p-25f, x);
+#else
+      return static_cast<float>(fputil::multiply_add(xd, -0x1.0p-25, xd));
+#endif // LIBC_TARGET_CPU_HAS_FMA_FLOAT
+    }
+
+    // |x| < pi/16.
+    double xsq = xd * xd;
+
+    // Degree-9 polynomial approximation:
+    //   sin(x) ~ x + a_3 x^3 + a_5 x^5 + a_7 x^7 + a_9 x^9
+    //          = x (1 + a_3 x^2 + ... + a_9 x^8)
+    //          = x * P(x^2)
+    // generated by Sollya with the following commands:
+    // > display = hexadecimal;
+    // > Q = fpminimax(sin(x)/x, [|0, 2, 4, 6, 8|], [|1, D...|], [0, pi/16]);
+    double result =
+        fputil::polyeval(xsq, 1.0, -0x1.55555555554c6p-3, 0x1.1111111085e65p-7,
+                         -0x1.a019f70fb4d4fp-13, 0x1.718d179815e74p-19);
+    return static_cast<float>(xd * result);
+  }
+
+#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+  if (LIBC_UNLIKELY(x_abs == 0x4619'9998U)) { // x = 0x1.33333p13
+    float r = -0x1.63f4bap-2f;
+    int rounding = fputil::quick_get_round();
+    if ((rounding == FE_DOWNWARD && xbits.is_pos()) ||
+        (rounding == FE_UPWARD && xbits.is_neg()))
+      r = -0x1.63f4bcp-2f;
+    return xbits.is_neg() ? -r : r;
+  }
+#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+
+  if (LIBC_UNLIKELY(x_abs >= 0x7f80'0000U)) {
+    if (xbits.is_signaling_nan()) {
+      fputil::raise_except_if_required(FE_INVALID);
+      return FPBits::quiet_nan().get_val();
+    }
+
+    if (x_abs == 0x7f80'0000U) {
+      fputil::set_errno_if_required(EDOM);
+      fputil::raise_except_if_required(FE_INVALID);
+    }
+    return x + FPBits::quiet_nan().get_val();
+  }
+
+  // Combine the results with the sine of sum formula:
+  //   sin(x) = sin((k + y)*pi/32)
+  //          = sin(y*pi/32) * cos(k*pi/32) + cos(y*pi/32) * sin(k*pi/32)
+  //          = sin_y * cos_k + (1 + cosm1_y) * sin_k
+  //          = sin_y * cos_k + (cosm1_y * sin_k + sin_k)
+  double sin_k, cos_k, sin_y, cosm1_y;
+
+  sincosf_eval(xd, x_abs, sin_k, cos_k, sin_y, cosm1_y);
+
+  return static_cast<float>(fputil::multiply_add(
+      sin_y, cos_k, fputil::multiply_add(cosm1_y, sin_k, sin_k)));
+}
+
+} // namespace math
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LIBC_MATH_HAS_INTERMEDIATE_COMP_IN_FLOAT
+
+#endif // LIBC_SRC___SUPPORT_MATH_SINF_H
----------------
bassiounix wrote:

```suggestion
#endif // LLVM_LIBC_SRC___SUPPORT_MATH_SINF_H
```

https://github.com/llvm/llvm-project/pull/177963


More information about the libc-commits mailing list