glsl: Add "built-in" functions to do uint64_to_fp32(uint64_t)
[mesa.git] / src / compiler / glsl / float64.glsl
1 /*
2 * The implementations contained in this file are heavily based on the
3 * implementations found in the Berkeley SoftFloat library. As such, they are
4 * licensed under the same 3-clause BSD license:
5 *
6 * License for Berkeley SoftFloat Release 3e
7 *
8 * John R. Hauser
9 * 2018 January 20
10 *
11 * The following applies to the whole of SoftFloat Release 3e as well as to
12 * each source file individually.
13 *
14 * Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of the
15 * University of California. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are met:
19 *
20 * 1. Redistributions of source code must retain the above copyright notice,
21 * this list of conditions, and the following disclaimer.
22 *
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions, and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 *
27 * 3. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
32 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
33 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
34 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
35 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
36 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
38 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
40 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 */
42
43 #version 430
44 #extension GL_ARB_gpu_shader_int64 : enable
45 #extension GL_ARB_shader_bit_encoding : enable
46 #extension GL_EXT_shader_integer_mix : enable
47 #extension GL_MESA_shader_integer_functions : enable
48
49 #pragma warning(off)
50
51 /* Software IEEE floating-point rounding mode.
52 * GLSL spec section "4.7.1 Range and Precision":
53 * The rounding mode cannot be set and is undefined.
54 * But here, we are able to define the rounding mode at the compilation time.
55 */
56 #define FLOAT_ROUND_NEAREST_EVEN 0
57 #define FLOAT_ROUND_TO_ZERO 1
58 #define FLOAT_ROUND_DOWN 2
59 #define FLOAT_ROUND_UP 3
60 #define FLOAT_ROUNDING_MODE FLOAT_ROUND_NEAREST_EVEN
61
62 /* Absolute value of a Float64 :
63 * Clear the sign bit
64 */
65 uint64_t
66 __fabs64(uint64_t __a)
67 {
68 uvec2 a = unpackUint2x32(__a);
69 a.y &= 0x7FFFFFFFu;
70 return packUint2x32(a);
71 }
72
73 /* Returns 1 if the double-precision floating-point value `a' is a NaN;
74 * otherwise returns 0.
75 */
76 bool
77 __is_nan(uint64_t __a)
78 {
79 uvec2 a = unpackUint2x32(__a);
80 return (0xFFE00000u <= (a.y<<1)) &&
81 ((a.x != 0u) || ((a.y & 0x000FFFFFu) != 0u));
82 }
83
84 /* Negate value of a Float64 :
85 * Toggle the sign bit
86 */
87 uint64_t
88 __fneg64(uint64_t __a)
89 {
90 uvec2 a = unpackUint2x32(__a);
91 uint t = a.y;
92
93 t ^= (1u << 31);
94 a.y = mix(t, a.y, __is_nan(__a));
95 return packUint2x32(a);
96 }
97
98 uint64_t
99 __fsign64(uint64_t __a)
100 {
101 uvec2 a = unpackUint2x32(__a);
102 uvec2 retval;
103 retval.x = 0u;
104 retval.y = mix((a.y & 0x80000000u) | 0x3FF00000u, 0u, (a.y << 1 | a.x) == 0u);
105 return packUint2x32(retval);
106 }
107
108 /* Returns the fraction bits of the double-precision floating-point value `a'.*/
109 uint
110 __extractFloat64FracLo(uint64_t a)
111 {
112 return unpackUint2x32(a).x;
113 }
114
115 uint
116 __extractFloat64FracHi(uint64_t a)
117 {
118 return unpackUint2x32(a).y & 0x000FFFFFu;
119 }
120
121 /* Returns the exponent bits of the double-precision floating-point value `a'.*/
122 int
123 __extractFloat64Exp(uint64_t __a)
124 {
125 uvec2 a = unpackUint2x32(__a);
126 return int((a.y>>20) & 0x7FFu);
127 }
128
129 bool
130 __feq64_nonnan(uint64_t __a, uint64_t __b)
131 {
132 uvec2 a = unpackUint2x32(__a);
133 uvec2 b = unpackUint2x32(__b);
134 return (a.x == b.x) &&
135 ((a.y == b.y) || ((a.x == 0u) && (((a.y | b.y)<<1) == 0u)));
136 }
137
138 /* Returns true if the double-precision floating-point value `a' is equal to the
139 * corresponding value `b', and false otherwise. The comparison is performed
140 * according to the IEEE Standard for Floating-Point Arithmetic.
141 */
142 bool
143 __feq64(uint64_t a, uint64_t b)
144 {
145 if (__is_nan(a) || __is_nan(b))
146 return false;
147
148 return __feq64_nonnan(a, b);
149 }
150
151 /* Returns true if the double-precision floating-point value `a' is not equal
152 * to the corresponding value `b', and false otherwise. The comparison is
153 * performed according to the IEEE Standard for Floating-Point Arithmetic.
154 */
155 bool
156 __fne64(uint64_t a, uint64_t b)
157 {
158 if (__is_nan(a) || __is_nan(b))
159 return true;
160
161 return !__feq64_nonnan(a, b);
162 }
163
164 /* Returns the sign bit of the double-precision floating-point value `a'.*/
165 uint
166 __extractFloat64Sign(uint64_t a)
167 {
168 return unpackUint2x32(a).y >> 31;
169 }
170
171 /* Returns true if the 64-bit value formed by concatenating `a0' and `a1' is less
172 * than the 64-bit value formed by concatenating `b0' and `b1'. Otherwise,
173 * returns false.
174 */
175 bool
176 lt64(uint a0, uint a1, uint b0, uint b1)
177 {
178 return (a0 < b0) || ((a0 == b0) && (a1 < b1));
179 }
180
181 bool
182 __flt64_nonnan(uint64_t __a, uint64_t __b)
183 {
184 uvec2 a = unpackUint2x32(__a);
185 uvec2 b = unpackUint2x32(__b);
186 uint aSign = __extractFloat64Sign(__a);
187 uint bSign = __extractFloat64Sign(__b);
188 if (aSign != bSign)
189 return (aSign != 0u) && ((((a.y | b.y)<<1) | a.x | b.x) != 0u);
190
191 return mix(lt64(a.y, a.x, b.y, b.x), lt64(b.y, b.x, a.y, a.x), aSign != 0u);
192 }
193
194 /* Returns true if the double-precision floating-point value `a' is less than
195 * the corresponding value `b', and false otherwise. The comparison is performed
196 * according to the IEEE Standard for Floating-Point Arithmetic.
197 */
198 bool
199 __flt64(uint64_t a, uint64_t b)
200 {
201 if (__is_nan(a) || __is_nan(b))
202 return false;
203
204 return __flt64_nonnan(a, b);
205 }
206
207 /* Returns true if the double-precision floating-point value `a' is greater
208 * than or equal to * the corresponding value `b', and false otherwise. The
209 * comparison is performed * according to the IEEE Standard for Floating-Point
210 * Arithmetic.
211 */
212 bool
213 __fge64(uint64_t a, uint64_t b)
214 {
215 if (__is_nan(a) || __is_nan(b))
216 return false;
217
218 return !__flt64_nonnan(a, b);
219 }
220
221 /* Adds the 64-bit value formed by concatenating `a0' and `a1' to the 64-bit
222 * value formed by concatenating `b0' and `b1'. Addition is modulo 2^64, so
223 * any carry out is lost. The result is broken into two 32-bit pieces which
224 * are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
225 */
226 void
227 __add64(uint a0, uint a1, uint b0, uint b1,
228 out uint z0Ptr,
229 out uint z1Ptr)
230 {
231 uint z1 = a1 + b1;
232 z1Ptr = z1;
233 z0Ptr = a0 + b0 + uint(z1 < a1);
234 }
235
236
237 /* Subtracts the 64-bit value formed by concatenating `b0' and `b1' from the
238 * 64-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo
239 * 2^64, so any borrow out (carry out) is lost. The result is broken into two
240 * 32-bit pieces which are stored at the locations pointed to by `z0Ptr' and
241 * `z1Ptr'.
242 */
243 void
244 __sub64(uint a0, uint a1, uint b0, uint b1,
245 out uint z0Ptr,
246 out uint z1Ptr)
247 {
248 z1Ptr = a1 - b1;
249 z0Ptr = a0 - b0 - uint(a1 < b1);
250 }
251
252 /* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the
253 * number of bits given in `count'. If any nonzero bits are shifted off, they
254 * are "jammed" into the least significant bit of the result by setting the
255 * least significant bit to 1. The value of `count' can be arbitrarily large;
256 * in particular, if `count' is greater than 64, the result will be either 0
257 * or 1, depending on whether the concatenation of `a0' and `a1' is zero or
258 * nonzero. The result is broken into two 32-bit pieces which are stored at
259 * the locations pointed to by `z0Ptr' and `z1Ptr'.
260 */
261 void
262 __shift64RightJamming(uint a0,
263 uint a1,
264 int count,
265 out uint z0Ptr,
266 out uint z1Ptr)
267 {
268 uint z0;
269 uint z1;
270 int negCount = (-count) & 31;
271
272 z0 = mix(0u, a0, count == 0);
273 z0 = mix(z0, (a0 >> count), count < 32);
274
275 z1 = uint((a0 | a1) != 0u); /* count >= 64 */
276 uint z1_lt64 = (a0>>(count & 31)) | uint(((a0<<negCount) | a1) != 0u);
277 z1 = mix(z1, z1_lt64, count < 64);
278 z1 = mix(z1, (a0 | uint(a1 != 0u)), count == 32);
279 uint z1_lt32 = (a0<<negCount) | (a1>>count) | uint ((a1<<negCount) != 0u);
280 z1 = mix(z1, z1_lt32, count < 32);
281 z1 = mix(z1, a1, count == 0);
282 z1Ptr = z1;
283 z0Ptr = z0;
284 }
285
286 /* Shifts the 96-bit value formed by concatenating `a0', `a1', and `a2' right
287 * by 32 _plus_ the number of bits given in `count'. The shifted result is
288 * at most 64 nonzero bits; these are broken into two 32-bit pieces which are
289 * stored at the locations pointed to by `z0Ptr' and `z1Ptr'. The bits shifted
290 * off form a third 32-bit result as follows: The _last_ bit shifted off is
291 * the most-significant bit of the extra result, and the other 31 bits of the
292 * extra result are all zero if and only if _all_but_the_last_ bits shifted off
293 * were all zero. This extra result is stored in the location pointed to by
294 * `z2Ptr'. The value of `count' can be arbitrarily large.
295 * (This routine makes more sense if `a0', `a1', and `a2' are considered
296 * to form a fixed-point value with binary point between `a1' and `a2'. This
297 * fixed-point value is shifted right by the number of bits given in `count',
298 * and the integer part of the result is returned at the locations pointed to
299 * by `z0Ptr' and `z1Ptr'. The fractional part of the result may be slightly
300 * corrupted as described above, and is returned at the location pointed to by
301 * `z2Ptr'.)
302 */
303 void
304 __shift64ExtraRightJamming(uint a0, uint a1, uint a2,
305 int count,
306 out uint z0Ptr,
307 out uint z1Ptr,
308 out uint z2Ptr)
309 {
310 uint z0 = 0u;
311 uint z1;
312 uint z2;
313 int negCount = (-count) & 31;
314
315 z2 = mix(uint(a0 != 0u), a0, count == 64);
316 z2 = mix(z2, a0 << negCount, count < 64);
317 z2 = mix(z2, a1 << negCount, count < 32);
318
319 z1 = mix(0u, (a0 >> (count & 31)), count < 64);
320 z1 = mix(z1, (a0<<negCount) | (a1>>count), count < 32);
321
322 a2 = mix(a2 | a1, a2, count < 32);
323 z0 = mix(z0, a0 >> count, count < 32);
324 z2 |= uint(a2 != 0u);
325
326 z0 = mix(z0, 0u, (count == 32));
327 z1 = mix(z1, a0, (count == 32));
328 z2 = mix(z2, a1, (count == 32));
329 z0 = mix(z0, a0, (count == 0));
330 z1 = mix(z1, a1, (count == 0));
331 z2 = mix(z2, a2, (count == 0));
332 z2Ptr = z2;
333 z1Ptr = z1;
334 z0Ptr = z0;
335 }
336
337 /* Shifts the 64-bit value formed by concatenating `a0' and `a1' left by the
338 * number of bits given in `count'. Any bits shifted off are lost. The value
339 * of `count' must be less than 32. The result is broken into two 32-bit
340 * pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
341 */
342 void
343 __shortShift64Left(uint a0, uint a1,
344 int count,
345 out uint z0Ptr,
346 out uint z1Ptr)
347 {
348 z1Ptr = a1<<count;
349 z0Ptr = mix((a0 << count | (a1 >> ((-count) & 31))), a0, count == 0);
350 }
351
352 /* Packs the sign `zSign', the exponent `zExp', and the significand formed by
353 * the concatenation of `zFrac0' and `zFrac1' into a double-precision floating-
354 * point value, returning the result. After being shifted into the proper
355 * positions, the three fields `zSign', `zExp', and `zFrac0' are simply added
356 * together to form the most significant 32 bits of the result. This means
357 * that any integer portion of `zFrac0' will be added into the exponent. Since
358 * a properly normalized significand will have an integer portion equal to 1,
359 * the `zExp' input should be 1 less than the desired result exponent whenever
360 * `zFrac0' and `zFrac1' concatenated form a complete, normalized significand.
361 */
362 uint64_t
363 __packFloat64(uint zSign, int zExp, uint zFrac0, uint zFrac1)
364 {
365 uvec2 z;
366
367 z.y = (zSign << 31) + (uint(zExp) << 20) + zFrac0;
368 z.x = zFrac1;
369 return packUint2x32(z);
370 }
371
372 /* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
373 * and extended significand formed by the concatenation of `zFrac0', `zFrac1',
374 * and `zFrac2', and returns the proper double-precision floating-point value
375 * corresponding to the abstract input. Ordinarily, the abstract value is
376 * simply rounded and packed into the double-precision format, with the inexact
377 * exception raised if the abstract input cannot be represented exactly.
378 * However, if the abstract value is too large, the overflow and inexact
379 * exceptions are raised and an infinity or maximal finite value is returned.
380 * If the abstract value is too small, the input value is rounded to a
381 * subnormal number, and the underflow and inexact exceptions are raised if the
382 * abstract input cannot be represented exactly as a subnormal double-precision
383 * floating-point number.
384 * The input significand must be normalized or smaller. If the input
385 * significand is not normalized, `zExp' must be 0; in that case, the result
386 * returned is a subnormal number, and it must not require rounding. In the
387 * usual case that the input significand is normalized, `zExp' must be 1 less
388 * than the "true" floating-point exponent. The handling of underflow and
389 * overflow follows the IEEE Standard for Floating-Point Arithmetic.
390 */
391 uint64_t
392 __roundAndPackFloat64(uint zSign,
393 int zExp,
394 uint zFrac0,
395 uint zFrac1,
396 uint zFrac2)
397 {
398 bool roundNearestEven;
399 bool increment;
400
401 roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
402 increment = int(zFrac2) < 0;
403 if (!roundNearestEven) {
404 if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {
405 increment = false;
406 } else {
407 if (zSign != 0u) {
408 increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
409 (zFrac2 != 0u);
410 } else {
411 increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
412 (zFrac2 != 0u);
413 }
414 }
415 }
416 if (0x7FD <= zExp) {
417 if ((0x7FD < zExp) ||
418 ((zExp == 0x7FD) &&
419 (0x001FFFFFu == zFrac0 && 0xFFFFFFFFu == zFrac1) &&
420 increment)) {
421 if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) ||
422 ((zSign != 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)) ||
423 ((zSign == 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN))) {
424 return __packFloat64(zSign, 0x7FE, 0x000FFFFFu, 0xFFFFFFFFu);
425 }
426 return __packFloat64(zSign, 0x7FF, 0u, 0u);
427 }
428 if (zExp < 0) {
429 __shift64ExtraRightJamming(
430 zFrac0, zFrac1, zFrac2, -zExp, zFrac0, zFrac1, zFrac2);
431 zExp = 0;
432 if (roundNearestEven) {
433 increment = zFrac2 < 0u;
434 } else {
435 if (zSign != 0u) {
436 increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
437 (zFrac2 != 0u);
438 } else {
439 increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
440 (zFrac2 != 0u);
441 }
442 }
443 }
444 }
445 if (increment) {
446 __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
447 zFrac1 &= ~((zFrac2 + uint(zFrac2 == 0u)) & uint(roundNearestEven));
448 } else {
449 zExp = mix(zExp, 0, (zFrac0 | zFrac1) == 0u);
450 }
451 return __packFloat64(zSign, zExp, zFrac0, zFrac1);
452 }
453
454 /* Returns the number of leading 0 bits before the most-significant 1 bit of
455 * `a'. If `a' is zero, 32 is returned.
456 */
457 int
458 __countLeadingZeros32(uint a)
459 {
460 int shiftCount;
461 shiftCount = mix(31 - findMSB(a), 32, a == 0u);
462 return shiftCount;
463 }
464
465 /* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
466 * and significand formed by the concatenation of `zSig0' and `zSig1', and
467 * returns the proper double-precision floating-point value corresponding
468 * to the abstract input. This routine is just like `__roundAndPackFloat64'
469 * except that the input significand has fewer bits and does not have to be
470 * normalized. In all cases, `zExp' must be 1 less than the "true" floating-
471 * point exponent.
472 */
473 uint64_t
474 __normalizeRoundAndPackFloat64(uint zSign,
475 int zExp,
476 uint zFrac0,
477 uint zFrac1)
478 {
479 int shiftCount;
480 uint zFrac2;
481
482 if (zFrac0 == 0u) {
483 zExp -= 32;
484 zFrac0 = zFrac1;
485 zFrac1 = 0u;
486 }
487
488 shiftCount = __countLeadingZeros32(zFrac0) - 11;
489 if (0 <= shiftCount) {
490 zFrac2 = 0u;
491 __shortShift64Left(zFrac0, zFrac1, shiftCount, zFrac0, zFrac1);
492 } else {
493 __shift64ExtraRightJamming(
494 zFrac0, zFrac1, 0u, -shiftCount, zFrac0, zFrac1, zFrac2);
495 }
496 zExp -= shiftCount;
497 return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);
498 }
499
500 /* Takes two double-precision floating-point values `a' and `b', one of which
501 * is a NaN, and returns the appropriate NaN result.
502 */
503 uint64_t
504 __propagateFloat64NaN(uint64_t __a, uint64_t __b)
505 {
506 bool aIsNaN = __is_nan(__a);
507 bool bIsNaN = __is_nan(__b);
508 uvec2 a = unpackUint2x32(__a);
509 uvec2 b = unpackUint2x32(__b);
510 a.y |= 0x00080000u;
511 b.y |= 0x00080000u;
512
513 return packUint2x32(mix(b, mix(a, b, bvec2(bIsNaN, bIsNaN)), bvec2(aIsNaN, aIsNaN)));
514 }
515
516 /* Returns the result of adding the double-precision floating-point values
517 * `a' and `b'. The operation is performed according to the IEEE Standard for
518 * Floating-Point Arithmetic.
519 */
520 uint64_t
521 __fadd64(uint64_t a, uint64_t b)
522 {
523 uint aSign = __extractFloat64Sign(a);
524 uint bSign = __extractFloat64Sign(b);
525 uint aFracLo = __extractFloat64FracLo(a);
526 uint aFracHi = __extractFloat64FracHi(a);
527 uint bFracLo = __extractFloat64FracLo(b);
528 uint bFracHi = __extractFloat64FracHi(b);
529 int aExp = __extractFloat64Exp(a);
530 int bExp = __extractFloat64Exp(b);
531 uint zFrac0 = 0u;
532 uint zFrac1 = 0u;
533 int expDiff = aExp - bExp;
534 if (aSign == bSign) {
535 uint zFrac2 = 0u;
536 int zExp;
537 bool orig_exp_diff_is_zero = (expDiff == 0);
538
539 if (orig_exp_diff_is_zero) {
540 if (aExp == 0x7FF) {
541 bool propagate = (aFracHi | aFracLo | bFracHi | bFracLo) != 0u;
542 return mix(a, __propagateFloat64NaN(a, b), propagate);
543 }
544 __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
545 if (aExp == 0)
546 return __packFloat64(aSign, 0, zFrac0, zFrac1);
547 zFrac2 = 0u;
548 zFrac0 |= 0x00200000u;
549 zExp = aExp;
550 __shift64ExtraRightJamming(
551 zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
552 } else if (0 < expDiff) {
553 if (aExp == 0x7FF) {
554 bool propagate = (aFracHi | aFracLo) != 0u;
555 return mix(a, __propagateFloat64NaN(a, b), propagate);
556 }
557
558 expDiff = mix(expDiff, expDiff - 1, bExp == 0);
559 bFracHi = mix(bFracHi | 0x00100000u, bFracHi, bExp == 0);
560 __shift64ExtraRightJamming(
561 bFracHi, bFracLo, 0u, expDiff, bFracHi, bFracLo, zFrac2);
562 zExp = aExp;
563 } else if (expDiff < 0) {
564 if (bExp == 0x7FF) {
565 bool propagate = (bFracHi | bFracLo) != 0u;
566 return mix(__packFloat64(aSign, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);
567 }
568 expDiff = mix(expDiff, expDiff + 1, aExp == 0);
569 aFracHi = mix(aFracHi | 0x00100000u, aFracHi, aExp == 0);
570 __shift64ExtraRightJamming(
571 aFracHi, aFracLo, 0u, - expDiff, aFracHi, aFracLo, zFrac2);
572 zExp = bExp;
573 }
574 if (!orig_exp_diff_is_zero) {
575 aFracHi |= 0x00100000u;
576 __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
577 --zExp;
578 if (!(zFrac0 < 0x00200000u)) {
579 __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
580 ++zExp;
581 }
582 }
583 return __roundAndPackFloat64(aSign, zExp, zFrac0, zFrac1, zFrac2);
584
585 } else {
586 int zExp;
587
588 __shortShift64Left(aFracHi, aFracLo, 10, aFracHi, aFracLo);
589 __shortShift64Left(bFracHi, bFracLo, 10, bFracHi, bFracLo);
590 if (0 < expDiff) {
591 if (aExp == 0x7FF) {
592 bool propagate = (aFracHi | aFracLo) != 0u;
593 return mix(a, __propagateFloat64NaN(a, b), propagate);
594 }
595 expDiff = mix(expDiff, expDiff - 1, bExp == 0);
596 bFracHi = mix(bFracHi | 0x40000000u, bFracHi, bExp == 0);
597 __shift64RightJamming(bFracHi, bFracLo, expDiff, bFracHi, bFracLo);
598 aFracHi |= 0x40000000u;
599 __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
600 zExp = aExp;
601 --zExp;
602 return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);
603 }
604 if (expDiff < 0) {
605 if (bExp == 0x7FF) {
606 bool propagate = (bFracHi | bFracLo) != 0u;
607 return mix(__packFloat64(aSign ^ 1u, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);
608 }
609 expDiff = mix(expDiff, expDiff + 1, aExp == 0);
610 aFracHi = mix(aFracHi | 0x40000000u, aFracHi, aExp == 0);
611 __shift64RightJamming(aFracHi, aFracLo, - expDiff, aFracHi, aFracLo);
612 bFracHi |= 0x40000000u;
613 __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
614 zExp = bExp;
615 aSign ^= 1u;
616 --zExp;
617 return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);
618 }
619 if (aExp == 0x7FF) {
620 bool propagate = (aFracHi | aFracLo | bFracHi | bFracLo) != 0u;
621 return mix(0xFFFFFFFFFFFFFFFFUL, __propagateFloat64NaN(a, b), propagate);
622 }
623 bExp = mix(bExp, 1, aExp == 0);
624 aExp = mix(aExp, 1, aExp == 0);
625 bool zexp_normal = false;
626 bool blta = true;
627 if (bFracHi < aFracHi) {
628 __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
629 zexp_normal = true;
630 }
631 else if (aFracHi < bFracHi) {
632 __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
633 blta = false;
634 zexp_normal = true;
635 }
636 else if (bFracLo < aFracLo) {
637 __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
638 zexp_normal = true;
639 }
640 else if (aFracLo < bFracLo) {
641 __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
642 blta = false;
643 zexp_normal = true;
644 }
645 zExp = mix(bExp, aExp, blta);
646 aSign = mix(aSign ^ 1u, aSign, blta);
647 uint64_t retval_0 = __packFloat64(uint(FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN), 0, 0u, 0u);
648 uint64_t retval_1 = __normalizeRoundAndPackFloat64(aSign, zExp - 11, zFrac0, zFrac1);
649 return mix(retval_0, retval_1, zexp_normal);
650 }
651 }
652
653 /* Multiplies `a' by `b' to obtain a 64-bit product. The product is broken
654 * into two 32-bit pieces which are stored at the locations pointed to by
655 * `z0Ptr' and `z1Ptr'.
656 */
657 void
658 __mul32To64(uint a, uint b, out uint z0Ptr, out uint z1Ptr)
659 {
660 uint aLow = a & 0x0000FFFFu;
661 uint aHigh = a>>16;
662 uint bLow = b & 0x0000FFFFu;
663 uint bHigh = b>>16;
664 uint z1 = aLow * bLow;
665 uint zMiddleA = aLow * bHigh;
666 uint zMiddleB = aHigh * bLow;
667 uint z0 = aHigh * bHigh;
668 zMiddleA += zMiddleB;
669 z0 += ((uint(zMiddleA < zMiddleB)) << 16) + (zMiddleA >> 16);
670 zMiddleA <<= 16;
671 z1 += zMiddleA;
672 z0 += uint(z1 < zMiddleA);
673 z1Ptr = z1;
674 z0Ptr = z0;
675 }
676
677 /* Multiplies the 64-bit value formed by concatenating `a0' and `a1' to the
678 * 64-bit value formed by concatenating `b0' and `b1' to obtain a 128-bit
679 * product. The product is broken into four 32-bit pieces which are stored at
680 * the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'.
681 */
682 void
683 __mul64To128(uint a0, uint a1, uint b0, uint b1,
684 out uint z0Ptr,
685 out uint z1Ptr,
686 out uint z2Ptr,
687 out uint z3Ptr)
688 {
689 uint z0 = 0u;
690 uint z1 = 0u;
691 uint z2 = 0u;
692 uint z3 = 0u;
693 uint more1 = 0u;
694 uint more2 = 0u;
695
696 __mul32To64(a1, b1, z2, z3);
697 __mul32To64(a1, b0, z1, more2);
698 __add64(z1, more2, 0u, z2, z1, z2);
699 __mul32To64(a0, b0, z0, more1);
700 __add64(z0, more1, 0u, z1, z0, z1);
701 __mul32To64(a0, b1, more1, more2);
702 __add64(more1, more2, 0u, z2, more1, z2);
703 __add64(z0, z1, 0u, more1, z0, z1);
704 z3Ptr = z3;
705 z2Ptr = z2;
706 z1Ptr = z1;
707 z0Ptr = z0;
708 }
709
710 /* Normalizes the subnormal double-precision floating-point value represented
711 * by the denormalized significand formed by the concatenation of `aFrac0' and
712 * `aFrac1'. The normalized exponent is stored at the location pointed to by
713 * `zExpPtr'. The most significant 21 bits of the normalized significand are
714 * stored at the location pointed to by `zFrac0Ptr', and the least significant
715 * 32 bits of the normalized significand are stored at the location pointed to
716 * by `zFrac1Ptr'.
717 */
718 void
719 __normalizeFloat64Subnormal(uint aFrac0, uint aFrac1,
720 out int zExpPtr,
721 out uint zFrac0Ptr,
722 out uint zFrac1Ptr)
723 {
724 int shiftCount;
725 uint temp_zfrac0, temp_zfrac1;
726 shiftCount = __countLeadingZeros32(mix(aFrac0, aFrac1, aFrac0 == 0u)) - 11;
727 zExpPtr = mix(1 - shiftCount, -shiftCount - 31, aFrac0 == 0u);
728
729 temp_zfrac0 = mix(aFrac1<<shiftCount, aFrac1>>(-shiftCount), shiftCount < 0);
730 temp_zfrac1 = mix(0u, aFrac1<<(shiftCount & 31), shiftCount < 0);
731
732 __shortShift64Left(aFrac0, aFrac1, shiftCount, zFrac0Ptr, zFrac1Ptr);
733
734 zFrac0Ptr = mix(zFrac0Ptr, temp_zfrac0, aFrac0 == 0);
735 zFrac1Ptr = mix(zFrac1Ptr, temp_zfrac1, aFrac0 == 0);
736 }
737
738 /* Returns the result of multiplying the double-precision floating-point values
739 * `a' and `b'. The operation is performed according to the IEEE Standard for
740 * Floating-Point Arithmetic.
741 */
742 uint64_t
743 __fmul64(uint64_t a, uint64_t b)
744 {
745 uint zFrac0 = 0u;
746 uint zFrac1 = 0u;
747 uint zFrac2 = 0u;
748 uint zFrac3 = 0u;
749 int zExp;
750
751 uint aFracLo = __extractFloat64FracLo(a);
752 uint aFracHi = __extractFloat64FracHi(a);
753 uint bFracLo = __extractFloat64FracLo(b);
754 uint bFracHi = __extractFloat64FracHi(b);
755 int aExp = __extractFloat64Exp(a);
756 uint aSign = __extractFloat64Sign(a);
757 int bExp = __extractFloat64Exp(b);
758 uint bSign = __extractFloat64Sign(b);
759 uint zSign = aSign ^ bSign;
760 if (aExp == 0x7FF) {
761 if (((aFracHi | aFracLo) != 0u) ||
762 ((bExp == 0x7FF) && ((bFracHi | bFracLo) != 0u))) {
763 return __propagateFloat64NaN(a, b);
764 }
765 if ((uint(bExp) | bFracHi | bFracLo) == 0u)
766 return 0xFFFFFFFFFFFFFFFFUL;
767 return __packFloat64(zSign, 0x7FF, 0u, 0u);
768 }
769 if (bExp == 0x7FF) {
770 if ((bFracHi | bFracLo) != 0u)
771 return __propagateFloat64NaN(a, b);
772 if ((uint(aExp) | aFracHi | aFracLo) == 0u)
773 return 0xFFFFFFFFFFFFFFFFUL;
774 return __packFloat64(zSign, 0x7FF, 0u, 0u);
775 }
776 if (aExp == 0) {
777 if ((aFracHi | aFracLo) == 0u)
778 return __packFloat64(zSign, 0, 0u, 0u);
779 __normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);
780 }
781 if (bExp == 0) {
782 if ((bFracHi | bFracLo) == 0u)
783 return __packFloat64(zSign, 0, 0u, 0u);
784 __normalizeFloat64Subnormal(bFracHi, bFracLo, bExp, bFracHi, bFracLo);
785 }
786 zExp = aExp + bExp - 0x400;
787 aFracHi |= 0x00100000u;
788 __shortShift64Left(bFracHi, bFracLo, 12, bFracHi, bFracLo);
789 __mul64To128(
790 aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1, zFrac2, zFrac3);
791 __add64(zFrac0, zFrac1, aFracHi, aFracLo, zFrac0, zFrac1);
792 zFrac2 |= uint(zFrac3 != 0u);
793 if (0x00200000u <= zFrac0) {
794 __shift64ExtraRightJamming(
795 zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
796 ++zExp;
797 }
798 return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);
799 }
800
801 uint64_t
802 __ffma64(uint64_t a, uint64_t b, uint64_t c)
803 {
804 return __fadd64(__fmul64(a, b), c);
805 }
806
807 /* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the
808 * number of bits given in `count'. Any bits shifted off are lost. The value
809 * of `count' can be arbitrarily large; in particular, if `count' is greater
810 * than 64, the result will be 0. The result is broken into two 32-bit pieces
811 * which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
812 */
813 void
814 __shift64Right(uint a0, uint a1,
815 int count,
816 out uint z0Ptr,
817 out uint z1Ptr)
818 {
819 uint z0;
820 uint z1;
821 int negCount = (-count) & 31;
822
823 z0 = 0u;
824 z0 = mix(z0, (a0 >> count), count < 32);
825 z0 = mix(z0, a0, count == 0);
826
827 z1 = mix(0u, (a0 >> (count & 31)), count < 64);
828 z1 = mix(z1, (a0<<negCount) | (a1>>count), count < 32);
829 z1 = mix(z1, a0, count == 0);
830
831 z1Ptr = z1;
832 z0Ptr = z0;
833 }
834
835 /* Returns the result of converting the double-precision floating-point value
836 * `a' to the unsigned integer format. The conversion is performed according
837 * to the IEEE Standard for Floating-Point Arithmetic.
838 */
839 uint
840 __fp64_to_uint(uint64_t a)
841 {
842 uint aFracLo = __extractFloat64FracLo(a);
843 uint aFracHi = __extractFloat64FracHi(a);
844 int aExp = __extractFloat64Exp(a);
845 uint aSign = __extractFloat64Sign(a);
846
847 if ((aExp == 0x7FF) && ((aFracHi | aFracLo) != 0u))
848 return 0xFFFFFFFFu;
849
850 aFracHi |= mix(0u, 0x00100000u, aExp != 0);
851
852 int shiftDist = 0x427 - aExp;
853 if (0 < shiftDist)
854 __shift64RightJamming(aFracHi, aFracLo, shiftDist, aFracHi, aFracLo);
855
856 if ((aFracHi & 0xFFFFF000u) != 0u)
857 return mix(~0u, 0u, (aSign != 0u));
858
859 uint z = 0u;
860 uint zero = 0u;
861 __shift64Right(aFracHi, aFracLo, 12, zero, z);
862
863 uint expt = mix(~0u, 0u, (aSign != 0u));
864
865 return mix(z, expt, (aSign != 0u) && (z != 0u));
866 }
867
868 uint64_t
869 __uint_to_fp64(uint a)
870 {
871 if (a == 0u)
872 return 0ul;
873
874 int shiftDist = __countLeadingZeros32(a) + 21;
875
876 uint aHigh = 0u;
877 uint aLow = 0u;
878 int negCount = (- shiftDist) & 31;
879
880 aHigh = mix(0u, a<< shiftDist - 32, shiftDist < 64);
881 aLow = 0u;
882 aHigh = mix(aHigh, 0u, shiftDist == 0);
883 aLow = mix(aLow, a, shiftDist ==0);
884 aHigh = mix(aHigh, a >> negCount, shiftDist < 32);
885 aLow = mix(aLow, a << shiftDist, shiftDist < 32);
886
887 return __packFloat64(0u, 0x432 - shiftDist, aHigh, aLow);
888 }
889
890 uint64_t
891 __uint64_to_fp64(uint64_t a)
892 {
893 if (a == 0u)
894 return 0ul;
895
896 uvec2 aFrac = unpackUint2x32(a);
897 uint aFracLo = __extractFloat64FracLo(a);
898 uint aFracHi = __extractFloat64FracHi(a);
899
900 if ((aFracHi & 0x80000000u) != 0u) {
901 __shift64RightJamming(aFracHi, aFracLo, 1, aFracHi, aFracLo);
902 return __roundAndPackFloat64(0, 0x433, aFracHi, aFracLo, 0u);
903 } else {
904 return __normalizeRoundAndPackFloat64(0, 0x432, aFrac.y, aFrac.x);
905 }
906 }
907
908 uint64_t
909 __int64_to_fp64(int64_t a)
910 {
911 if (a==0)
912 return 0ul;
913
914 uint64_t absA = mix(uint64_t(a), uint64_t(-a), a < 0);
915 uint aFracHi = __extractFloat64FracHi(absA);
916 uvec2 aFrac = unpackUint2x32(absA);
917 uint zSign = uint(a < 0);
918
919 if ((aFracHi & 0x80000000u) != 0u) {
920 return mix(0ul, __packFloat64(1, 0x434, 0u, 0u), a < 0);
921 }
922
923 return __normalizeRoundAndPackFloat64(zSign, 0x432, aFrac.y, aFrac.x);
924 }
925
926 /* Returns the result of converting the double-precision floating-point value
927 * `a' to the 32-bit two's complement integer format. The conversion is
928 * performed according to the IEEE Standard for Floating-Point Arithmetic---
929 * which means in particular that the conversion is rounded according to the
930 * current rounding mode. If `a' is a NaN, the largest positive integer is
931 * returned. Otherwise, if the conversion overflows, the largest integer with
932 * the same sign as `a' is returned.
933 */
934 int
935 __fp64_to_int(uint64_t a)
936 {
937 uint aFracLo = __extractFloat64FracLo(a);
938 uint aFracHi = __extractFloat64FracHi(a);
939 int aExp = __extractFloat64Exp(a);
940 uint aSign = __extractFloat64Sign(a);
941
942 uint absZ = 0u;
943 uint aFracExtra = 0u;
944 int shiftCount = aExp - 0x413;
945
946 if (0 <= shiftCount) {
947 if (0x41E < aExp) {
948 if ((aExp == 0x7FF) && bool(aFracHi | aFracLo))
949 aSign = 0u;
950 return mix(0x7FFFFFFF, 0x80000000, bool(aSign));
951 }
952 __shortShift64Left(aFracHi | 0x00100000u, aFracLo, shiftCount, absZ, aFracExtra);
953 } else {
954 if (aExp < 0x3FF)
955 return 0;
956
957 aFracHi |= 0x00100000u;
958 aFracExtra = ( aFracHi << (shiftCount & 31)) | aFracLo;
959 absZ = aFracHi >> (- shiftCount);
960 }
961
962 int z = mix(int(absZ), -int(absZ), (aSign != 0u));
963 int nan = mix(0x7FFFFFFF, 0x80000000, bool(aSign));
964 return mix(z, nan, bool(aSign ^ uint(z < 0)) && bool(z));
965 }
966
967 /* Returns the result of converting the 32-bit two's complement integer `a'
968 * to the double-precision floating-point format. The conversion is performed
969 * according to the IEEE Standard for Floating-Point Arithmetic.
970 */
971 uint64_t
972 __int_to_fp64(int a)
973 {
974 uint zFrac0 = 0u;
975 uint zFrac1 = 0u;
976 if (a==0)
977 return __packFloat64(0u, 0, 0u, 0u);
978 uint zSign = uint(a < 0);
979 uint absA = mix(uint(a), uint(-a), a < 0);
980 int shiftCount = __countLeadingZeros32(absA) - 11;
981 if (0 <= shiftCount) {
982 zFrac0 = absA << shiftCount;
983 zFrac1 = 0u;
984 } else {
985 __shift64Right(absA, 0u, -shiftCount, zFrac0, zFrac1);
986 }
987 return __packFloat64(zSign, 0x412 - shiftCount, zFrac0, zFrac1);
988 }
989
990 bool
991 __fp64_to_bool(uint64_t a)
992 {
993 return !__feq64_nonnan(__fabs64(a), 0ul);
994 }
995
996 uint64_t
997 __bool_to_fp64(bool a)
998 {
999 return __int_to_fp64(int(a));
1000 }
1001
1002 /* Packs the sign `zSign', exponent `zExp', and significand `zFrac' into a
1003 * single-precision floating-point value, returning the result. After being
1004 * shifted into the proper positions, the three fields are simply added
1005 * together to form the result. This means that any integer portion of `zSig'
1006 * will be added into the exponent. Since a properly normalized significand
1007 * will have an integer portion equal to 1, the `zExp' input should be 1 less
1008 * than the desired result exponent whenever `zFrac' is a complete, normalized
1009 * significand.
1010 */
1011 float
1012 __packFloat32(uint zSign, int zExp, uint zFrac)
1013 {
1014 return uintBitsToFloat((zSign<<31) + (uint(zExp)<<23) + zFrac);
1015 }
1016
1017 /* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
1018 * and significand `zFrac', and returns the proper single-precision floating-
1019 * point value corresponding to the abstract input. Ordinarily, the abstract
1020 * value is simply rounded and packed into the single-precision format, with
1021 * the inexact exception raised if the abstract input cannot be represented
1022 * exactly. However, if the abstract value is too large, the overflow and
1023 * inexact exceptions are raised and an infinity or maximal finite value is
1024 * returned. If the abstract value is too small, the input value is rounded to
1025 * a subnormal number, and the underflow and inexact exceptions are raised if
1026 * the abstract input cannot be represented exactly as a subnormal single-
1027 * precision floating-point number.
1028 * The input significand `zFrac' has its binary point between bits 30
1029 * and 29, which is 7 bits to the left of the usual location. This shifted
1030 * significand must be normalized or smaller. If `zFrac' is not normalized,
1031 * `zExp' must be 0; in that case, the result returned is a subnormal number,
1032 * and it must not require rounding. In the usual case that `zFrac' is
1033 * normalized, `zExp' must be 1 less than the "true" floating-point exponent.
1034 * The handling of underflow and overflow follows the IEEE Standard for
1035 * Floating-Point Arithmetic.
1036 */
1037 float
1038 __roundAndPackFloat32(uint zSign, int zExp, uint zFrac)
1039 {
1040 bool roundNearestEven;
1041 int roundIncrement;
1042 int roundBits;
1043
1044 roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
1045 roundIncrement = 0x40;
1046 if (!roundNearestEven) {
1047 if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {
1048 roundIncrement = 0;
1049 } else {
1050 roundIncrement = 0x7F;
1051 if (zSign != 0u) {
1052 if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)
1053 roundIncrement = 0;
1054 } else {
1055 if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN)
1056 roundIncrement = 0;
1057 }
1058 }
1059 }
1060 roundBits = int(zFrac & 0x7Fu);
1061 if (0xFDu <= uint(zExp)) {
1062 if ((0xFD < zExp) || ((zExp == 0xFD) && (int(zFrac) + roundIncrement) < 0))
1063 return __packFloat32(zSign, 0xFF, 0u) - float(roundIncrement == 0);
1064 int count = -zExp;
1065 bool zexp_lt0 = zExp < 0;
1066 uint zFrac_lt0 = mix(uint(zFrac != 0u), (zFrac>>count) | uint((zFrac<<((-count) & 31)) != 0u), (-zExp) < 32);
1067 zFrac = mix(zFrac, zFrac_lt0, zexp_lt0);
1068 roundBits = mix(roundBits, int(zFrac) & 0x7f, zexp_lt0);
1069 zExp = mix(zExp, 0, zexp_lt0);
1070 }
1071 zFrac = (zFrac + uint(roundIncrement))>>7;
1072 zFrac &= ~uint(((roundBits ^ 0x40) == 0) && roundNearestEven);
1073
1074 return __packFloat32(zSign, mix(zExp, 0, zFrac == 0u), zFrac);
1075 }
1076
1077 /* Returns the result of converting the double-precision floating-point value
1078 * `a' to the single-precision floating-point format. The conversion is
1079 * performed according to the IEEE Standard for Floating-Point Arithmetic.
1080 */
1081 float
1082 __fp64_to_fp32(uint64_t __a)
1083 {
1084 uvec2 a = unpackUint2x32(__a);
1085 uint zFrac = 0u;
1086 uint allZero = 0u;
1087
1088 uint aFracLo = __extractFloat64FracLo(__a);
1089 uint aFracHi = __extractFloat64FracHi(__a);
1090 int aExp = __extractFloat64Exp(__a);
1091 uint aSign = __extractFloat64Sign(__a);
1092 if (aExp == 0x7FF) {
1093 __shortShift64Left(a.y, a.x, 12, a.y, a.x);
1094 float rval = uintBitsToFloat((aSign<<31) | 0x7FC00000u | (a.y>>9));
1095 rval = mix(__packFloat32(aSign, 0xFF, 0u), rval, (aFracHi | aFracLo) != 0u);
1096 return rval;
1097 }
1098 __shift64RightJamming(aFracHi, aFracLo, 22, allZero, zFrac);
1099 zFrac = mix(zFrac, zFrac | 0x40000000u, aExp != 0);
1100 return __roundAndPackFloat32(aSign, aExp - 0x381, zFrac);
1101 }
1102
1103 float
1104 __uint64_to_fp32(uint64_t __a)
1105 {
1106 uint zFrac = 0u;
1107 uvec2 aFrac = unpackUint2x32(__a);
1108 int shiftCount = __countLeadingZeros32(mix(aFrac.y, aFrac.x, aFrac.y == 0u));
1109 shiftCount -= mix(40, 8, aFrac.y == 0u);
1110
1111 if (0 <= shiftCount) {
1112 __shortShift64Left(aFrac.y, aFrac.x, shiftCount, aFrac.y, aFrac.x);
1113 bool is_zero = (aFrac.y | aFrac.x) == 0u;
1114 return mix(__packFloat32(0u, 0x95 - shiftCount, aFrac.x), 0, is_zero);
1115 }
1116
1117 shiftCount += 7;
1118 __shift64RightJamming(aFrac.y, aFrac.x, -shiftCount, aFrac.y, aFrac.x);
1119 zFrac = mix(aFrac.x<<shiftCount, aFrac.x, shiftCount < 0);
1120 return __roundAndPackFloat32(0u, 0x9C - shiftCount, zFrac);
1121 }
1122
1123 /* Returns the result of converting the single-precision floating-point value
1124 * `a' to the double-precision floating-point format.
1125 */
1126 uint64_t
1127 __fp32_to_fp64(float f)
1128 {
1129 uint a = floatBitsToUint(f);
1130 uint aFrac = a & 0x007FFFFFu;
1131 int aExp = int((a>>23) & 0xFFu);
1132 uint aSign = a>>31;
1133 uint zFrac0 = 0u;
1134 uint zFrac1 = 0u;
1135
1136 if (aExp == 0xFF) {
1137 if (aFrac != 0u) {
1138 uint nanLo = 0u;
1139 uint nanHi = a<<9;
1140 __shift64Right(nanHi, nanLo, 12, nanHi, nanLo);
1141 nanHi |= ((aSign<<31) | 0x7FF80000u);
1142 return packUint2x32(uvec2(nanLo, nanHi));
1143 }
1144 return __packFloat64(aSign, 0x7FF, 0u, 0u);
1145 }
1146
1147 if (aExp == 0) {
1148 if (aFrac == 0u)
1149 return __packFloat64(aSign, 0, 0u, 0u);
1150 /* Normalize subnormal */
1151 int shiftCount = __countLeadingZeros32(aFrac) - 8;
1152 aFrac <<= shiftCount;
1153 aExp = 1 - shiftCount;
1154 --aExp;
1155 }
1156
1157 __shift64Right(aFrac, 0u, 3, zFrac0, zFrac1);
1158 return __packFloat64(aSign, aExp + 0x380, zFrac0, zFrac1);
1159 }
1160
1161 /* Adds the 96-bit value formed by concatenating `a0', `a1', and `a2' to the
1162 * 96-bit value formed by concatenating `b0', `b1', and `b2'. Addition is
1163 * modulo 2^96, so any carry out is lost. The result is broken into three
1164 * 32-bit pieces which are stored at the locations pointed to by `z0Ptr',
1165 * `z1Ptr', and `z2Ptr'.
1166 */
1167 void
1168 __add96(uint a0, uint a1, uint a2,
1169 uint b0, uint b1, uint b2,
1170 out uint z0Ptr,
1171 out uint z1Ptr,
1172 out uint z2Ptr)
1173 {
1174 uint z2 = a2 + b2;
1175 uint carry1 = uint(z2 < a2);
1176 uint z1 = a1 + b1;
1177 uint carry0 = uint(z1 < a1);
1178 uint z0 = a0 + b0;
1179 z1 += carry1;
1180 z0 += uint(z1 < carry1);
1181 z0 += carry0;
1182 z2Ptr = z2;
1183 z1Ptr = z1;
1184 z0Ptr = z0;
1185 }
1186
1187 /* Subtracts the 96-bit value formed by concatenating `b0', `b1', and `b2' from
1188 * the 96-bit value formed by concatenating `a0', `a1', and `a2'. Subtraction
1189 * is modulo 2^96, so any borrow out (carry out) is lost. The result is broken
1190 * into three 32-bit pieces which are stored at the locations pointed to by
1191 * `z0Ptr', `z1Ptr', and `z2Ptr'.
1192 */
1193 void
1194 __sub96(uint a0, uint a1, uint a2,
1195 uint b0, uint b1, uint b2,
1196 out uint z0Ptr,
1197 out uint z1Ptr,
1198 out uint z2Ptr)
1199 {
1200 uint z2 = a2 - b2;
1201 uint borrow1 = uint(a2 < b2);
1202 uint z1 = a1 - b1;
1203 uint borrow0 = uint(a1 < b1);
1204 uint z0 = a0 - b0;
1205 z0 -= uint(z1 < borrow1);
1206 z1 -= borrow1;
1207 z0 -= borrow0;
1208 z2Ptr = z2;
1209 z1Ptr = z1;
1210 z0Ptr = z0;
1211 }
1212
1213 /* Returns an approximation to the 32-bit integer quotient obtained by dividing
1214 * `b' into the 64-bit value formed by concatenating `a0' and `a1'. The
1215 * divisor `b' must be at least 2^31. If q is the exact quotient truncated
1216 * toward zero, the approximation returned lies between q and q + 2 inclusive.
1217 * If the exact quotient q is larger than 32 bits, the maximum positive 32-bit
1218 * unsigned integer is returned.
1219 */
1220 uint
1221 __estimateDiv64To32(uint a0, uint a1, uint b)
1222 {
1223 uint b0;
1224 uint b1;
1225 uint rem0 = 0u;
1226 uint rem1 = 0u;
1227 uint term0 = 0u;
1228 uint term1 = 0u;
1229 uint z;
1230
1231 if (b <= a0)
1232 return 0xFFFFFFFFu;
1233 b0 = b>>16;
1234 z = (b0<<16 <= a0) ? 0xFFFF0000u : (a0 / b0)<<16;
1235 __mul32To64(b, z, term0, term1);
1236 __sub64(a0, a1, term0, term1, rem0, rem1);
1237 while (int(rem0) < 0) {
1238 z -= 0x10000u;
1239 b1 = b<<16;
1240 __add64(rem0, rem1, b0, b1, rem0, rem1);
1241 }
1242 rem0 = (rem0<<16) | (rem1>>16);
1243 z |= (b0<<16 <= rem0) ? 0xFFFFu : rem0 / b0;
1244 return z;
1245 }
1246
1247 uint
1248 __sqrtOddAdjustments(int index)
1249 {
1250 uint res = 0u;
1251 if (index == 0)
1252 res = 0x0004u;
1253 if (index == 1)
1254 res = 0x0022u;
1255 if (index == 2)
1256 res = 0x005Du;
1257 if (index == 3)
1258 res = 0x00B1u;
1259 if (index == 4)
1260 res = 0x011Du;
1261 if (index == 5)
1262 res = 0x019Fu;
1263 if (index == 6)
1264 res = 0x0236u;
1265 if (index == 7)
1266 res = 0x02E0u;
1267 if (index == 8)
1268 res = 0x039Cu;
1269 if (index == 9)
1270 res = 0x0468u;
1271 if (index == 10)
1272 res = 0x0545u;
1273 if (index == 11)
1274 res = 0x631u;
1275 if (index == 12)
1276 res = 0x072Bu;
1277 if (index == 13)
1278 res = 0x0832u;
1279 if (index == 14)
1280 res = 0x0946u;
1281 if (index == 15)
1282 res = 0x0A67u;
1283
1284 return res;
1285 }
1286
1287 uint
1288 __sqrtEvenAdjustments(int index)
1289 {
1290 uint res = 0u;
1291 if (index == 0)
1292 res = 0x0A2Du;
1293 if (index == 1)
1294 res = 0x08AFu;
1295 if (index == 2)
1296 res = 0x075Au;
1297 if (index == 3)
1298 res = 0x0629u;
1299 if (index == 4)
1300 res = 0x051Au;
1301 if (index == 5)
1302 res = 0x0429u;
1303 if (index == 6)
1304 res = 0x0356u;
1305 if (index == 7)
1306 res = 0x029Eu;
1307 if (index == 8)
1308 res = 0x0200u;
1309 if (index == 9)
1310 res = 0x0179u;
1311 if (index == 10)
1312 res = 0x0109u;
1313 if (index == 11)
1314 res = 0x00AFu;
1315 if (index == 12)
1316 res = 0x0068u;
1317 if (index == 13)
1318 res = 0x0034u;
1319 if (index == 14)
1320 res = 0x0012u;
1321 if (index == 15)
1322 res = 0x0002u;
1323
1324 return res;
1325 }
1326
1327 /* Returns an approximation to the square root of the 32-bit significand given
1328 * by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
1329 * `aExp' (the least significant bit) is 1, the integer returned approximates
1330 * 2^31*sqrt(`a'/2^31), where `a' is considered an integer. If bit 0 of `aExp'
1331 * is 0, the integer returned approximates 2^31*sqrt(`a'/2^30). In either
1332 * case, the approximation returned lies strictly within +/-2 of the exact
1333 * value.
1334 */
1335 uint
1336 __estimateSqrt32(int aExp, uint a)
1337 {
1338 uint z;
1339
1340 int index = int(a>>27 & 15u);
1341 if ((aExp & 1) != 0) {
1342 z = 0x4000u + (a>>17) - __sqrtOddAdjustments(index);
1343 z = ((a / z)<<14) + (z<<15);
1344 a >>= 1;
1345 } else {
1346 z = 0x8000u + (a>>17) - __sqrtEvenAdjustments(index);
1347 z = a / z + z;
1348 z = (0x20000u <= z) ? 0xFFFF8000u : (z<<15);
1349 if (z <= a)
1350 return uint(int(a)>>1);
1351 }
1352 return ((__estimateDiv64To32(a, 0u, z))>>1) + (z>>1);
1353 }
1354
1355 /* Returns the square root of the double-precision floating-point value `a'.
1356 * The operation is performed according to the IEEE Standard for Floating-Point
1357 * Arithmetic.
1358 */
1359 uint64_t
1360 __fsqrt64(uint64_t a)
1361 {
1362 uint zFrac0 = 0u;
1363 uint zFrac1 = 0u;
1364 uint zFrac2 = 0u;
1365 uint doubleZFrac0 = 0u;
1366 uint rem0 = 0u;
1367 uint rem1 = 0u;
1368 uint rem2 = 0u;
1369 uint rem3 = 0u;
1370 uint term0 = 0u;
1371 uint term1 = 0u;
1372 uint term2 = 0u;
1373 uint term3 = 0u;
1374 uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
1375
1376 uint aFracLo = __extractFloat64FracLo(a);
1377 uint aFracHi = __extractFloat64FracHi(a);
1378 int aExp = __extractFloat64Exp(a);
1379 uint aSign = __extractFloat64Sign(a);
1380 if (aExp == 0x7FF) {
1381 if ((aFracHi | aFracLo) != 0u)
1382 return __propagateFloat64NaN(a, a);
1383 if (aSign == 0u)
1384 return a;
1385 return default_nan;
1386 }
1387 if (aSign != 0u) {
1388 if ((uint(aExp) | aFracHi | aFracLo) == 0u)
1389 return a;
1390 return default_nan;
1391 }
1392 if (aExp == 0) {
1393 if ((aFracHi | aFracLo) == 0u)
1394 return __packFloat64(0u, 0, 0u, 0u);
1395 __normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);
1396 }
1397 int zExp = ((aExp - 0x3FF)>>1) + 0x3FE;
1398 aFracHi |= 0x00100000u;
1399 __shortShift64Left(aFracHi, aFracLo, 11, term0, term1);
1400 zFrac0 = (__estimateSqrt32(aExp, term0)>>1) + 1u;
1401 if (zFrac0 == 0u)
1402 zFrac0 = 0x7FFFFFFFu;
1403 doubleZFrac0 = zFrac0 + zFrac0;
1404 __shortShift64Left(aFracHi, aFracLo, 9 - (aExp & 1), aFracHi, aFracLo);
1405 __mul32To64(zFrac0, zFrac0, term0, term1);
1406 __sub64(aFracHi, aFracLo, term0, term1, rem0, rem1);
1407 while (int(rem0) < 0) {
1408 --zFrac0;
1409 doubleZFrac0 -= 2u;
1410 __add64(rem0, rem1, 0u, doubleZFrac0 | 1u, rem0, rem1);
1411 }
1412 zFrac1 = __estimateDiv64To32(rem1, 0u, doubleZFrac0);
1413 if ((zFrac1 & 0x1FFu) <= 5u) {
1414 if (zFrac1 == 0u)
1415 zFrac1 = 1u;
1416 __mul32To64(doubleZFrac0, zFrac1, term1, term2);
1417 __sub64(rem1, 0u, term1, term2, rem1, rem2);
1418 __mul32To64(zFrac1, zFrac1, term2, term3);
1419 __sub96(rem1, rem2, 0u, 0u, term2, term3, rem1, rem2, rem3);
1420 while (int(rem1) < 0) {
1421 --zFrac1;
1422 __shortShift64Left(0u, zFrac1, 1, term2, term3);
1423 term3 |= 1u;
1424 term2 |= doubleZFrac0;
1425 __add96(rem1, rem2, rem3, 0u, term2, term3, rem1, rem2, rem3);
1426 }
1427 zFrac1 |= uint((rem1 | rem2 | rem3) != 0u);
1428 }
1429 __shift64ExtraRightJamming(zFrac0, zFrac1, 0u, 10, zFrac0, zFrac1, zFrac2);
1430 return __roundAndPackFloat64(0u, zExp, zFrac0, zFrac1, zFrac2);
1431 }
1432
1433 uint64_t
1434 __ftrunc64(uint64_t __a)
1435 {
1436 uvec2 a = unpackUint2x32(__a);
1437 int aExp = __extractFloat64Exp(__a);
1438 uint zLo;
1439 uint zHi;
1440
1441 int unbiasedExp = aExp - 1023;
1442 int fracBits = 52 - unbiasedExp;
1443 uint maskLo = mix(~0u << fracBits, 0u, fracBits >= 32);
1444 uint maskHi = mix(~0u << (fracBits - 32), ~0u, fracBits < 33);
1445 zLo = maskLo & a.x;
1446 zHi = maskHi & a.y;
1447
1448 zLo = mix(zLo, 0u, unbiasedExp < 0);
1449 zHi = mix(zHi, 0u, unbiasedExp < 0);
1450 zLo = mix(zLo, a.x, unbiasedExp > 52);
1451 zHi = mix(zHi, a.y, unbiasedExp > 52);
1452 return packUint2x32(uvec2(zLo, zHi));
1453 }
1454
1455 uint64_t
1456 __ffloor64(uint64_t a)
1457 {
1458 bool is_positive = __fge64(a, 0ul);
1459 uint64_t tr = __ftrunc64(a);
1460
1461 if (is_positive || __feq64(tr, a)) {
1462 return tr;
1463 } else {
1464 return __fadd64(tr, 0xbff0000000000000ul /* -1.0 */);
1465 }
1466 }
1467
1468 uint64_t
1469 __fround64(uint64_t __a)
1470 {
1471 uvec2 a = unpackUint2x32(__a);
1472 int unbiasedExp = __extractFloat64Exp(__a) - 1023;
1473 uint aHi = a.y;
1474 uint aLo = a.x;
1475
1476 if (unbiasedExp < 20) {
1477 if (unbiasedExp < 0) {
1478 aHi &= 0x80000000u;
1479 if (unbiasedExp == -1 && aLo != 0u)
1480 aHi |= (1023u << 20);
1481 aLo = 0u;
1482 } else {
1483 uint maskExp = 0x000FFFFFu >> unbiasedExp;
1484 /* a is an integral value */
1485 if (((aHi & maskExp) == 0u) && (aLo == 0u))
1486 return __a;
1487
1488 aHi += 0x00080000u >> unbiasedExp;
1489 aHi &= ~maskExp;
1490 aLo = 0u;
1491 }
1492 } else if (unbiasedExp > 51 || unbiasedExp == 1024) {
1493 return __a;
1494 } else {
1495 uint maskExp = 0xFFFFFFFFu >> (unbiasedExp - 20);
1496 if ((aLo & maskExp) == 0u)
1497 return __a;
1498 uint tmp = aLo + (1u << (51 - unbiasedExp));
1499 if(tmp < aLo)
1500 aHi += 1u;
1501 aLo = tmp;
1502 aLo &= ~maskExp;
1503 }
1504
1505 a.x = aLo;
1506 a.y = aHi;
1507 return packUint2x32(a);
1508 }
1509
1510 uint64_t
1511 __fmin64(uint64_t a, uint64_t b)
1512 {
1513 if (__is_nan(a)) return b;
1514 if (__is_nan(b)) return a;
1515
1516 if (__flt64_nonnan(a, b)) return a;
1517 return b;
1518 }
1519
1520 uint64_t
1521 __fmax64(uint64_t a, uint64_t b)
1522 {
1523 if (__is_nan(a)) return b;
1524 if (__is_nan(b)) return a;
1525
1526 if (__flt64_nonnan(a, b)) return b;
1527 return a;
1528 }
1529
1530 uint64_t
1531 __ffract64(uint64_t a)
1532 {
1533 return __fadd64(a, __fneg64(__ffloor64(a)));
1534 }