[Libclc-dev] [PATCH] math: Implement remainder(x, y)
Aaron Watry via Libclc-dev
libclc-dev at lists.llvm.org
Mon Feb 6 20:32:45 PST 2017
On Thu, Jan 19, 2017 at 2:47 PM, Matt Arsenault <arsenm2 at gmail.com> wrote:
> This fails conformance for me:
>
>
>
Interestingly enough some (but not all) of the test inputs start to pass
when I forcefully enable subnormal support in libclc
(--enable-runtime-subnormal and also updating generic/lib/shared/
subnormal_config.cl to enable 32-bit subnormals).
I'll be playing with this a bit as time permits.
--Aaron
>
> > On Jan 18, 2017, at 20:04, Aaron Watry via Libclc-dev <
> libclc-dev at lists.llvm.org> wrote:
> >
> > Mostly ported from the amd-builtins branch.
> >
> > The amd-builtins branch uses __amdil_improved_fdiv_f32 and FTZ which
> aren't available in generic CLC.
> >
> > __amdil_improved_fdiv_f32 points to native_divide which does
> native_recip(y)*x.
> >
> > Since we don't have native_divide or native_recip yet, I've just stuck
> an actual division here.
> >
> > I've taken a shot at a replacement for FTZ(x), but feel free to suggest
> alternatives.
> >
> > Tested via piglit on a Radeon HD 7850 using the tests just sent to that
> list.
> >
> > v2: Use __builtin_canonicalizef(float) instead of custom flush-to-zero
> function
> >
> > Signed-off-by: Aaron Watry <awatry at gmail.com>
> > CC: Tom Stellard <thomas.stellard at amd.com>
> > CC: Matt Arsenault <Matthew.Arsenault at amd.com>
> > ---
> > generic/include/clc/clc.h | 1 +
> > generic/include/clc/math/remainder.h | 2 +
> > generic/include/clc/math/remainder.inc | 1 +
> > generic/lib/SOURCES | 1 +
> > generic/lib/math/remainder.cl | 508
> +++++++++++++++++++++++++++++++++
> > 5 files changed, 513 insertions(+)
> > create mode 100644 generic/include/clc/math/remainder.h
> > create mode 100644 generic/include/clc/math/remainder.inc
> > create mode 100644 generic/lib/math/remainder.cl
> >
> > diff --git a/generic/include/clc/clc.h b/generic/include/clc/clc.h
> > index 024bf27..2d4af4b 100644
> > --- a/generic/include/clc/clc.h
> > +++ b/generic/include/clc/clc.h
> > @@ -82,6 +82,7 @@
> > #include <clc/math/nextafter.h>
> > #include <clc/math/pow.h>
> > #include <clc/math/pown.h>
> > +#include <clc/math/remainder.h>
> > #include <clc/math/rint.h>
> > #include <clc/math/round.h>
> > #include <clc/math/sin.h>
> > diff --git a/generic/include/clc/math/remainder.h
> b/generic/include/clc/math/remainder.h
> > new file mode 100644
> > index 0000000..97d9fad
> > --- /dev/null
> > +++ b/generic/include/clc/math/remainder.h
> > @@ -0,0 +1,2 @@
> > +#define __CLC_BODY <clc/math/remainder.inc>
> > +#include <clc/math/gentype.inc>
> > diff --git a/generic/include/clc/math/remainder.inc
> b/generic/include/clc/math/remainder.inc
> > new file mode 100644
> > index 0000000..00d0d69
> > --- /dev/null
> > +++ b/generic/include/clc/math/remainder.inc
> > @@ -0,0 +1 @@
> > +_CLC_OVERLOAD _CLC_DECL __CLC_GENTYPE remainder(__CLC_GENTYPE x,
> __CLC_GENTYPE y);
> > diff --git a/generic/lib/SOURCES b/generic/lib/SOURCES
> > index 517daba..39208bf 100644
> > --- a/generic/lib/SOURCES
> > +++ b/generic/lib/SOURCES
> > @@ -113,6 +113,7 @@ math/tables.cl
> > math/clc_nextafter.cl
> > math/nextafter.cl
> > math/pown.cl
> > +math/remainder.cl
> > math/sin.cl
> > math/sincos.cl
> > math/sincos_helpers.cl
> > diff --git a/generic/lib/math/remainder.cl b/generic/lib/math/remainder.
> cl
> > new file mode 100644
> > index 0000000..df42db0
> > --- /dev/null
> > +++ b/generic/lib/math/remainder.cl
> > @@ -0,0 +1,508 @@
> > +#include <clc/clc.h>
> > +
> > +#include "math.h"
> > +#include "config.h"
> > +#include "../clcmacro.h"
> > +
> > +inline float _clc_remainder_scaleFullRangef32(float y, float t) {
> > + float ay, ty, r = 0;
> > + int k, iiy, iy, exp_iy0, exp_iy, manty, signy, miy;
> > + int delta, shift, ir;
> > +
> > + ay = fabs(t);
> > + k = ay > 1024 ? 1024 : (int) ay;
> > + k = t < 0 ? -k : k;
> > + t = (float) k;
> > +
> > + iiy = as_int(y);
> > + iy = iiy & EXSIGNBIT_SP32;
> > + signy = iiy & SIGNBIT_SP32;
> > + ay = as_float(iy);
> > +
> > + exp_iy0 = iy & EXPBITS_SP32;
> > + manty = iy & MANTBITS_SP32;
> > +
> > + //sub-normal
> > + ty = exp_iy0 == 0 ? (float) manty : as_float(iy);
> > + k = exp_iy0 == 0 ? k - 149 : k;
> > + ay = ty;
> > + iy = as_int(ay);
> > + exp_iy0 = iy & EXPBITS_SP32;
> > + exp_iy = (exp_iy0 >> EXPSHIFTBITS_SP32) - EXPBIAS_SP32;
> > + // add k to y's exponent
> > + r = as_float(iy + (k << EXPSHIFTBITS_SP32));
> > + r = (exp_iy + k) > 127 ? as_float(PINFBITPATT_SP32) : r;
> > + // add k to y's exponent
> > + delta = -126 - (exp_iy + k);
> > +
> > + // sub-normal
> > + miy = iy & MANTBITS_SP32;
> > + miy |= IMPBIT_SP32;
> > + shift = delta > 23 ? 24 : delta;
> > + shift = delta < 0 ? 0 : shift;
> > + miy >>= shift;
> > + r = delta > 0 ? as_float(miy) : r;
> > + r = t > (float) (2 * EMAX_SP32) ? as_float(PINFBITPATT_SP32) : r;
> > + ir = as_int(r);
> > + r = ir <= PINFBITPATT_SP32 ? as_float(as_int(r) | signy) : r;
> > + return r;
> > +}
> > +/* Scales the float x by 2.0**n.
> > +Assumes 2*EMIN <= n <= 2*EMAX, though this condition is not checked. */
> > +inline float _clc_remainder_scaleFloat_2(float x, int n) {
> > + float t1, t2;
> > + int n1, n2;
> > + n1 = n / 2;
> > + n2 = n - n1;
> > + /* Construct the numbers t1 = 2.0**n1 and t2 = 2.0**n2 */
> > + t1 = as_float((n1 + EXPBIAS_SP32) << EXPSHIFTBITS_SP32);
> > + t2 = as_float((n2 + EXPBIAS_SP32) << EXPSHIFTBITS_SP32);
> > + return (x * t1) * t2;
> > +}
> > +/* Scales the float x by 2.0**n.
> > + Assumes EMIN <= n <= EMAX, though this condition is not checked. */
> > +inline float _clc_remainder_scaleFloat_1(float x, int n) {
> > + float t;
> > + /* Construct the number t = 2.0**n */
> > + t = as_float((n + EXPBIAS_SP32) << EXPSHIFTBITS_SP32);
> > + return x * t;
> > +}
> > +/* Computes the exact product of x and y, the result being the
> > +nearly double length number (z,zz) */
> > +inline void _clc_remainder_mul12f(float x, float y, float *z, float
> *zz) {
> > + float hx, tx, hy, ty;
> > + // Split x into hx (head) and tx (tail). Do the same for y.
> > + uint u;
> > + u = as_uint(x);
> > + u &= 0xfffff000;
> > + hx = as_float(u);
> > + tx = x - hx;
> > + u = as_uint(y);
> > + u &= 0xfffff000;
> > + hy = as_float(u);
> > + ty = y - hy;
> > + *z = x * y;
> > + *zz = (((hx * hy - *z) + hx * ty) + tx * hy) + tx * ty;
> > +}
> > +
> > +_CLC_OVERLOAD _CLC_DEF float remainder(float x, float y) {
> > + if (!__clc_fp32_subnormals_supported()) {
> > + const int loop_scale = 12;
> > + const float fscale = 1.0f / (float) (1 << loop_scale);
> > +
> > + int ntimes;
> > + float ret = 0;
> > + int ui_x, ui_y, ui_ax, ui_ay, xexp, yexp, signx;
> > + float af_x, af_y, af_ybase, fx, fxp, fxm, fy, w, scale, t, c,
> cc, v;
> > + float yscale, scaled_w, saved_w, div, sdiv, ratio, sratio,
> fxexp, sub_fx;
> > + int iw_scaled, wexp, it, i, ifx, ex, ey;
> > + ;
> > + float xr, xr0, xr_base, yr;
> > + uint q;
> > +
> > + ui_x = as_int(x);
> > + ui_y = as_int(y);
> > + ui_ax = ui_x & EXSIGNBIT_SP32;
> > + ui_ay = ui_y & EXSIGNBIT_SP32;
> > +
> > + /* special case handle */
> > + if (ui_ax > PINFBITPATT_SP32)
> > + return x;
> > + if (ui_ax == PINFBITPATT_SP32)
> > + return as_float(QNANBITPATT_SP32);
> > + if (ui_ay > PINFBITPATT_SP32)
> > + return y;
> > + if (ui_ay == PINFBITPATT_SP32)
> > + return x;
> > + if (ui_ay == 0 && ui_ax == 0)
> > + return as_float(QNANBITPATT_SP32);
> > + if (ui_ax == 0)
> > + return x;
> > + if (ui_ay == 0)
> > + return as_float(QNANBITPATT_SP32);
> > +
> > + signx = ui_x & SIGNBIT_SP32;
> > + af_x = as_float(ui_ax);
> > + af_ybase = af_y = as_float(ui_ay);
> > + yexp = (int) ((ui_y & EXPBITS_SP32) >> EXPSHIFTBITS_SP32);
> > +
> > + yscale = (float) ((yexp < 48 && ui_ay != 0) ? (48 - yexp) : 0);
> > + if (yscale != 0) {
> > + af_y = _clc_remainder_scaleFullRangef32(af_ybase, yscale);
> > + }
> > +
> > + ui_y = as_int(af_y);
> > + yexp = (int) ((ui_y & EXPBITS_SP32) >> EXPSHIFTBITS_SP32);
> > + xexp = (int) ((ui_x & EXPBITS_SP32) >> EXPSHIFTBITS_SP32);
> > + fx = af_x;
> > + fy = af_y;
> > +
> > + /* Set ntimes to the number of times we need to do a
> > + partial remainder. If the exponent of x is an exact multiple
> > + of 24 larger than the exponent of y, and the mantissa of x is
> > + less than the mantissa of y, ntimes will be one too large
> > + but it doesn't matter - it just means that we'll go round
> > + the loop below one extra time. */
> > + ntimes = (xexp - yexp) / loop_scale;
> > + ntimes = xexp <= yexp ? 0 : ntimes;
> > +
> > + /* Set w = y * 2^(ntimes*loop_scale) */
> > + w = _clc_remainder_scaleFloat_2(fy, ntimes * loop_scale);
> > + w = ntimes == 0 ? fy : w;
> > +
> > + /* Set scale = 2^(-loop_scale) */
> > + scale = ntimes == 0 ? 1.0f : fscale;
> > +
> > + // make sure recip does not overflow
> > + wexp = (int) ((as_int(w) & EXPBITS_SP32) >> EXPSHIFTBITS_SP32)
> - EXPBIAS_SP32;
> > + saved_w = w;
> > + scaled_w = _clc_remainder_scaleFloat_1(w, -14);
> > + iw_scaled = wexp > 105 & wexp <= 127;
> > + w = (iw_scaled & ntimes) > 0 ? scaled_w : w;
> > +
> > + /* Each time round the loop we compute a partial remainder.
> > + This is done by subtracting a large multiple of w
> > + from x each time, where w is a scaled up version of y.
> > + The subtraction can be performed exactly when performed
> > + in double precision, and the result at each stage can
> > + fit exactly in a single precision number. */
> > + for (i = 0; i < ntimes; i++) {
> > + /* Set fx = fx - w * t, where t is equal to trunc(dx/w). */
> > + div = fx / w; //was __amdil_improved_div_f32 =>
> native_div(x, y) => native_recip(y)*x
> > + sdiv = _clc_remainder_scaleFloat_1(div, -14);
> > + div = iw_scaled ? sdiv : div;
> > + t = floor(div);
> > + w = saved_w;
> > + iw_scaled = 0;
> > +
> > + /* At this point, t may be one too large due to rounding of
> fx/w */
> > +
> > + /* Compute w * t in quad precision */
> > + _clc_remainder_mul12f(w, t, &c, &cc);
> > +
> > + /* Subtract w * t from fx */
> > + v = fx - c;
> > + fx = v + (((fx - v) - c) - cc);
> > +
> > + /* If t was one too large, fx will be negative. Add back
> one w */
> > + /* It might be possible to speed up this loop by finding
> > + a way to compute correctly truncated t directly from fx
> and w.
> > + We would then avoid the need for this check on negative
> fx. */
> > + fxp = fx + w;
> > + fxm = fx - w;
> > + fx = fx < 0.0f ? fxp : fx;
> > + fx = fx >= w ? fxm : fx;
> > +
> > + /* Scale w down by for the next iteration */
> > + w *= scale;
> > + saved_w = w;
> > + }
> > +
> > + /* One more time */
> > + // iw = as_int(w);
> > + ifx = as_int(fx);
> > + fxexp = (int) ((ifx & EXPBITS_SP32) >> EXPSHIFTBITS_SP32);
> > + // wexp = (int) ((iw & EXPBITS_SP32) >> EXPSHIFTBITS_SP32);
> > + sub_fx = fx;
> > + // make sure recip does not overflow
> > + wexp = (int) ((as_int(w) & EXPBITS_SP32) >> EXPSHIFTBITS_SP32)
> - EXPBIAS_SP32;
> > + saved_w = w;
> > + scaled_w = _clc_remainder_scaleFloat_1(w, -14);
> > + iw_scaled = wexp > 105 & wexp <= 127;
> > + w = iw_scaled ? scaled_w : w;
> > + ratio = fx / w; //was the amdil equivalent of native_divide(x,
> y) => native_recip(y)*x;
> > + sratio = _clc_remainder_scaleFloat_1(ratio, -14);
> > + ratio = iw_scaled ? sratio : ratio;
> > + t = floor(ratio);
> > + it = (int) t;
> > +
> > + w = saved_w;
> > + _clc_remainder_mul12f(w, t, &c, &cc);
> > +
> > + v = fx - c;
> > + fx = v + (((fx - v) - c) - cc);
> > +
> > + if (fx < 0.0f) {
> > + fx += w;
> > + it--;
> > + }
> > +
> > + if (fx >= w) {
> > + fx -= w;
> > + it++;
> > + }
> > +
> > + // sub-normal fax
> > + fx = fxexp == 0 ? sub_fx : fx;
> > +
> > + float scaleback = 0;
> > +
> > + // in case fx == 0 and we'got a divisor
> > + it = (yscale > 30) ? 0 : ((unsigned int) it << (int) yscale);
> > +
> > + if (as_int(fx) != 0 && yscale != 0) {
> > + xr = fx;
> > + xr_base = fx;
> > + yr = af_ybase;
> > + q = 0;
> > + ex = ilogb(fx);
> > + ey = ilogb(af_ybase);
> > +
> > + yr = (float) _clc_remainder_scaleFullRangef32(af_ybase,
> (float) -ey);
> > + xr = (float) _clc_remainder_scaleFullRangef32(fx, (float)
> -ex);
> > +
> > + for (i = ex - ey; i > 0; i--) {
> > + q <<= 1;
> > + xr0 = xr;
> > + xr = (xr0 >= yr) ? xr0 - yr : xr0;
> > + q = (xr0 >= yr) ? q + 1 : q;
> > + xr += xr;
> > + }
> > + q <<= 1;
> > + xr0 = xr;
> > + xr = (xr0 >= yr) ? xr0 - yr : xr0;
> > + q = (xr0 >= yr) ? q + 1 : q;
> > + xr = _clc_remainder_scaleFullRangef32(xr, (float) ey);
> > +
> > + fx = (ex - ey >= 0) ? xr : xr_base;
> > + q = (ex - ey >= 0) ? q : 0;
> > + it += q;
> > +
> > + xexp = (int) ((as_int(fx) & EXPBITS_SP32) >>
> EXPSHIFTBITS_SP32);
> > +
> > + w = af_ybase;
> > + if (xexp < 24) {
> > + fx = _clc_remainder_scaleFullRangef32(fx, 48);
> > + w = _clc_remainder_scaleFullRangef32(af_ybase, 48);
> > + scaleback = -48;
> > + }
> > + }
> > + /* At this point, dx lies in the range [0,dy) */
> > + /* For the remainder function, we need to adjust dx
> > + so that it lies in the range (-y/2, y/2] by carefully
> > + subtracting w (== fy == y) if necessary. */
> > + if (fx * 2.f > w || ((fx * 2.f == w) && (it & 1))) {
> > + fx -= w;
> > + it++;
> > + }
> > + if (scaleback != 0) {
> > + fx = _clc_remainder_scaleFullRangef32(fx, scaleback);
> > + }
> > +
> > + ret = (signx) ? as_float(as_int(fx) ^ SIGNBIT_SP32) : fx;
> > +
> > + return ret;
> > +
> > + }
> > +
> > + //Otherwise, Subnormals are supported
> > +
> > + x = __builtin_canonicalizef(x);
> > + y = __builtin_canonicalizef(y);
> > +
> > + int ux = as_int(x);
> > + int ax = ux & EXSIGNBIT_SP32;
> > + float xa = as_float(ax);
> > + int sx = ux ^ ax;
> > + int ex = ax >> EXPSHIFTBITS_SP32;
> > +
> > + int uy = as_int(y);
> > + int ay = uy & EXSIGNBIT_SP32;
> > + float ya = as_float(ay);
> > + int ey = ay >> EXPSHIFTBITS_SP32;
> > +
> > + float xr = as_float(0x3f800000 | (ax & 0x007fffff));
> > + float yr = as_float(0x3f800000 | (ay & 0x007fffff));
> > + int c;
> > + int k = ex - ey;
> > +
> > + uint q = 0;
> > +
> > +#define _CLC_BIT c = xr >= yr; q = (q << 1) | c; xr -= c ? yr : 0.0f;
> xr += xr
> > +
> > + while (k > 3) {
> > + _CLC_BIT;
> > + _CLC_BIT;
> > + _CLC_BIT;
> > + _CLC_BIT;
> > + k -= 4;
> > + }
> > +
> > + while (k > 0) {
> > + _CLC_BIT;
> > + --k;
> > + }
> > +
> > +#undef _CLC_BIT
> > +
> > + c = xr > yr;
> > + q = (q << 1) | c;
> > + xr -= c ? yr : 0.0f;
> > +
> > + int lt = ex < ey;
> > +
> > + q = lt ? 0 : q;
> > + xr = lt ? xa : xr;
> > + yr = lt ? ya : yr;
> > +
> > + c = (yr < 2.0f * xr) | ((yr == 2.0f * xr) & ((q & 0x1) == 0x1));
> > + xr -= c ? yr : 0.0f;
> > + q += c;
> > +
> > + float s = as_float(ey << EXPSHIFTBITS_SP32);
> > + xr *= lt ? 1.0f : s;
> > +
> > + c = ax == ay;
> > + xr = c ? 0.0f : xr;
> > +
> > + xr = as_float(sx ^ as_int(xr));
> > +
> > + c = ax > PINFBITPATT_SP32 | ay > PINFBITPATT_SP32 | ax ==
> PINFBITPATT_SP32 | ay == 0;
> > + xr = c ? as_float(QNANBITPATT_SP32) : xr;
> > +
> > + return xr;
> > +}
> > +
> > +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, float, remainder, float,
> float);
> > +
> > +#ifdef cl_khr_fp64
> > +
> > +#pragma OPENCL EXTENSION cl_khr_fp64 : enable
> > +
> > +inline double
> > +_clc_remainder_ldexp(double x, int n) {
> > + // XXX Have to go twice here because the hardware can't handle the
> full range (yet)
> > + int nh = n >> 1;
> > + return ldexp(ldexp(x, nh), n - nh);
> > +}
> > +
> > +_CLC_OVERLOAD _CLC_DEF double remainder(double y, double x) {
> > + ulong ux = as_ulong(x);
> > + ulong ax = ux & ~SIGNBIT_DP64;
> > + ulong xsgn = ux ^ ax;
> > + double dx = as_double(ax);
> > + int xexp = convert_int(ax >> EXPSHIFTBITS_DP64);
> > + int xexp1 = 11 - (int) clz(ax & MANTBITS_DP64);
> > + xexp1 = xexp < 1 ? xexp1 : xexp;
> > +
> > + ulong uy = as_ulong(y);
> > + ulong ay = uy & ~SIGNBIT_DP64;
> > + double dy = as_double(ay);
> > + int yexp = convert_int(ay >> EXPSHIFTBITS_DP64);
> > + int yexp1 = 11 - (int) clz(ay & MANTBITS_DP64);
> > + yexp1 = yexp < 1 ? yexp1 : yexp;
> > +
> > + int qsgn = ((ux ^ uy) & SIGNBIT_DP64) == 0UL ? 1 : -1;
> > +
> > + // First assume |x| > |y|
> > +
> > + // Set ntimes to the number of times we need to do a
> > + // partial remainder. If the exponent of x is an exact multiple
> > + // of 53 larger than the exponent of y, and the mantissa of x is
> > + // less than the mantissa of y, ntimes will be one too large
> > + // but it doesn't matter - it just means that we'll go round
> > + // the loop below one extra time.
> > + int ntimes = max(0, (xexp1 - yexp1) / 53);
> > + double w = _clc_remainder_ldexp(dy, ntimes * 53);
> > + w = ntimes == 0 ? dy : w;
> > + double scale = ntimes == 0 ? 1.0 : 0x1.0p-53;
> > +
> > + // Each time round the loop we compute a partial remainder.
> > + // This is done by subtracting a large multiple of w
> > + // from x each time, where w is a scaled up version of y.
> > + // The subtraction must be performed exactly in quad
> > + // precision, though the result at each stage can
> > + // fit exactly in a double precision number.
> > + int i;
> > + double t, v, p, pp;
> > +
> > + for (i = 0; i < ntimes; i++) {
> > + // Compute integral multiplier
> > + t = trunc(dx / w);
> > +
> > + // Compute w * t in quad precision
> > + p = w * t;
> > + pp = fma(w, t, -p);
> > +
> > + // Subtract w * t from dx
> > + v = dx - p;
> > + dx = v + (((dx - v) - p) - pp);
> > +
> > + // If t was one too large, dx will be negative. Add back one w.
> > + dx += dx < 0.0 ? w : 0.0;
> > +
> > + // Scale w down by 2^(-53) for the next iteration
> > + w *= scale;
> > + }
> > +
> > + // One more time
> > + // Variable todd says whether the integer t is odd or not
> > + t = floor(dx / w);
> > + long lt = (long) t;
> > + int todd = lt & 1;
> > +
> > + p = w * t;
> > + pp = fma(w, t, -p);
> > + v = dx - p;
> > + dx = v + (((dx - v) - p) - pp);
> > + i = dx < 0.0;
> > + todd ^= i;
> > + dx += i ? w : 0.0;
> > +
> > + // At this point, dx lies in the range [0,dy)
> > +
> > + // For the fmod function, we're done apart from setting the correct
> sign.
> > + //
> > + // For the remainder function, we need to adjust dx
> > + // so that it lies in the range (-y/2, y/2] by carefully
> > + // subtracting w (== dy == y) if necessary. The rigmarole
> > + // with todd is to get the correct sign of the result
> > + // when x/y lies exactly half way between two integers,
> > + // when we need to choose the even integer.
> > +
> > + int al = (2.0 * dx > w) | (todd & (2.0 * dx == w));
> > + double dxl = dx - (al ? w : 0.0);
> > +
> > + int ag = (dx > 0.5 * w) | (todd & (dx == 0.5 * w));
> > + double dxg = dx - (ag ? w : 0.0);
> > +
> > + dx = dy < 0x1.0p+1022 ? dxl : dxg;
> > +
> > + double ret = as_double(xsgn ^ as_ulong(dx));
> > + dx = as_double(ax);
> > +
> > + // Now handle |x| == |y|
> > + int c = dx == dy;
> > + t = as_double(xsgn);
> > + ret = c ? t : ret;
> > +
> > + // Next, handle |x| < |y|
> > + c = dx < dy;
> > + ret = c ? x : ret;
> > +
> > + c &= (yexp < 1023 & 2.0 * dx > dy) | (dx > 0.5 * dy);
> > + // we could use a conversion here instead since qsgn = +-1
> > + p = qsgn == 1 ? -1.0 : 1.0;
> > + t = fma(y, p, x);
> > + ret = c ? t : ret;
> > +
> > + // We don't need anything special for |x| == 0
> > +
> > + // |y| is 0
> > + c = dy == 0.0;
> > + ret = c ? as_double(QNANBITPATT_DP64) : ret;
> > +
> > + // y is +-Inf, NaN
> > + c = yexp > BIASEDEMAX_DP64;
> > + t = y == y ? x : y;
> > + ret = c ? t : ret;
> > +
> > + // x is +=Inf, NaN
> > + c = xexp > BIASEDEMAX_DP64;
> > + ret = c ? as_double(QNANBITPATT_DP64) : ret;
> > +
> > + return ret;
> > +}
> > +
> > +_CLC_BINARY_VECTORIZE(_CLC_OVERLOAD _CLC_DEF, double, remainder,
> double, double);
> > +#endif
> > \ No newline at end of file
> > --
> > 2.9.3
> >
> > _______________________________________________
> > Libclc-dev mailing list
> > Libclc-dev at lists.llvm.org
> > http://lists.llvm.org/cgi-bin/mailman/listinfo/libclc-dev
>
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/libclc-dev/attachments/20170206/0f0afc0d/attachment-0001.html>
More information about the Libclc-dev
mailing list