1 /**************************************************************************
3 * Copyright 2008 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Math utilities and approximations for common math functions.
31 * Reduced precision is usually acceptable in shaders...
33 * "fast" is used in the names of functions which are low-precision,
34 * or at least lower-precision than the normal C lib functions.
42 #include "pipe/p_compiler.h"
57 #define M_SQRT2 1.41421356237309504880
60 #define POW2_TABLE_SIZE_LOG2 9
61 #define POW2_TABLE_SIZE (1 << POW2_TABLE_SIZE_LOG2)
62 #define POW2_TABLE_OFFSET (POW2_TABLE_SIZE/2)
63 #define POW2_TABLE_SCALE ((float)(POW2_TABLE_SIZE/2))
64 extern float pow2_table
[POW2_TABLE_SIZE
];
68 * Initialize math module. This should be called before using any
69 * other functions in this module.
90 * Extract the IEEE float32 exponent.
93 util_get_float32_exponent(float x
)
99 return ((f
.ui
>> 23) & 0xff) - 127;
104 * Fast version of 2^x
105 * Identity: exp2(a + b) = exp2(a) * exp2(b)
107 * Let fpart = x - ipart;
108 * So, exp2(x) = exp2(ipart) * exp2(fpart)
109 * Compute exp2(ipart) with i << ipart
110 * Compute exp2(fpart) with lookup table.
113 util_fast_exp2(float x
)
120 return 3.402823466e+38f
;
126 fpart
= x
- (float) ipart
;
129 * epart.f = (float) (1 << ipart)
130 * but faster and without integer overflow for ipart > 31
132 epart
.i
= (ipart
+ 127 ) << 23;
134 mpart
= pow2_table
[POW2_TABLE_OFFSET
+ (int)(fpart
* POW2_TABLE_SCALE
)];
136 return epart
.f
* mpart
;
141 * Fast approximation to exp(x).
144 util_fast_exp(float x
)
146 const float k
= 1.44269f
; /* = log2(e) */
147 return util_fast_exp2(k
* x
);
151 #define LOG2_TABLE_SIZE_LOG2 16
152 #define LOG2_TABLE_SCALE (1 << LOG2_TABLE_SIZE_LOG2)
153 #define LOG2_TABLE_SIZE (LOG2_TABLE_SCALE + 1)
154 extern float log2_table
[LOG2_TABLE_SIZE
];
158 * Fast approximation to log2(x).
161 util_fast_log2(float x
)
166 epart
= (float)(((num
.i
& 0x7f800000) >> 23) - 127);
167 /* mpart = log2_table[mantissa*LOG2_TABLE_SCALE + 0.5] */
168 mpart
= log2_table
[((num
.i
& 0x007fffff) + (1 << (22 - LOG2_TABLE_SIZE_LOG2
))) >> (23 - LOG2_TABLE_SIZE_LOG2
)];
169 return epart
+ mpart
;
174 * Fast approximation to x^y.
177 util_fast_pow(float x
, float y
)
179 return util_fast_exp2(util_fast_log2(x
) * y
);
184 * Floor(x), returned as int.
192 af
= (3 << 22) + 0.5 + (double) f
;
193 bf
= (3 << 22) + 0.5 - (double) f
;
194 u
.f
= (float) af
; ai
= u
.i
;
195 u
.f
= (float) bf
; bi
= u
.i
;
196 return (ai
- bi
) >> 1;
201 * Round float to nearest int.
206 #if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
208 __asm__ ("fistpl %0" : "=m" (r
) : "t" (f
) : "st");
210 #elif defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86)
219 return (int) (f
+ 0.5f
);
221 return (int) (f
- 0.5f
);
227 * Approximate floating point comparison
229 static inline boolean
230 util_is_approx(float a
, float b
, float tol
)
232 return fabsf(b
- a
) <= tol
;
237 * util_is_X_inf_or_nan = test if x is NaN or +/- Inf
238 * util_is_X_nan = test if x is NaN
239 * util_X_inf_sign = return +1 for +Inf, -1 for -Inf, or 0 for not Inf
241 * NaN can be checked with x != x, however this fails with the fast math flag
248 static inline boolean
249 util_is_inf_or_nan(float x
)
253 return (tmp
.ui
& 0x7f800000) == 0x7f800000;
257 static inline boolean
262 return (tmp
.ui
& 0x7fffffff) > 0x7f800000;
267 util_inf_sign(float x
)
271 if ((tmp
.ui
& 0x7fffffff) != 0x7f800000) {
275 return (x
< 0) ? -1 : 1;
282 static inline boolean
283 util_is_double_inf_or_nan(double x
)
287 return (tmp
.ui
& 0x7ff0000000000000ULL
) == 0x7ff0000000000000ULL
;
291 static inline boolean
292 util_is_double_nan(double x
)
296 return (tmp
.ui
& 0x7fffffffffffffffULL
) > 0x7ff0000000000000ULL
;
301 util_double_inf_sign(double x
)
305 if ((tmp
.ui
& 0x7fffffffffffffffULL
) != 0x7ff0000000000000ULL
) {
309 return (x
< 0) ? -1 : 1;
316 static inline boolean
317 util_is_half_inf_or_nan(int16_t x
)
319 return (x
& 0x7c00) == 0x7c00;
323 static inline boolean
324 util_is_half_nan(int16_t x
)
326 return (x
& 0x7fff) > 0x7c00;
331 util_half_inf_sign(int16_t x
)
333 if ((x
& 0x7fff) != 0x7c00) {
337 return (x
< 0) ? -1 : 1;
344 static inline unsigned
362 * Convert ubyte to float in [0, 1].
365 ubyte_to_float(ubyte ub
)
367 return (float) ub
* (1.0f
/ 255.0f
);
372 * Convert float in [0,1] to ubyte in [0,255] with clamping.
375 float_to_ubyte(float f
)
377 /* return 0 for NaN too */
381 else if (f
>= 1.0f
) {
387 tmp
.f
= tmp
.f
* (255.0f
/256.0f
) + 32768.0f
;
388 return (ubyte
) tmp
.i
;
393 byte_to_float_tex(int8_t b
)
395 return (b
== -128) ? -1.0F
: b
* 1.0F
/ 127.0F
;
399 float_to_byte_tex(float f
)
401 return (int8_t) (127.0F
* f
);
407 static inline unsigned
408 util_logbase2(unsigned n
)
410 #if defined(HAVE___BUILTIN_CLZ)
411 return ((sizeof(unsigned) * 8 - 1) - __builtin_clz(n
| 1));
414 if (n
>= 1<<16) { n
>>= 16; pos
+= 16; }
415 if (n
>= 1<< 8) { n
>>= 8; pos
+= 8; }
416 if (n
>= 1<< 4) { n
>>= 4; pos
+= 4; }
417 if (n
>= 1<< 2) { n
>>= 2; pos
+= 2; }
418 if (n
>= 1<< 1) { pos
+= 1; }
423 static inline uint64_t
424 util_logbase2_64(uint64_t n
)
426 #if defined(HAVE___BUILTIN_CLZLL)
427 return ((sizeof(uint64_t) * 8 - 1) - __builtin_clzll(n
| 1));
430 if (n
>= 1ull<<32) { n
>>= 32; pos
+= 32; }
431 if (n
>= 1ull<<16) { n
>>= 16; pos
+= 16; }
432 if (n
>= 1ull<< 8) { n
>>= 8; pos
+= 8; }
433 if (n
>= 1ull<< 4) { n
>>= 4; pos
+= 4; }
434 if (n
>= 1ull<< 2) { n
>>= 2; pos
+= 2; }
435 if (n
>= 1ull<< 1) { pos
+= 1; }
441 * Returns the ceiling of log n base 2, and 0 when n == 0. Equivalently,
442 * returns the smallest x such that n <= 2**x.
444 static inline unsigned
445 util_logbase2_ceil(unsigned n
)
450 return 1 + util_logbase2(n
- 1);
453 static inline uint64_t
454 util_logbase2_ceil64(uint64_t n
)
459 return 1ull + util_logbase2_64(n
- 1);
463 * Returns the smallest power of two >= x
465 static inline unsigned
466 util_next_power_of_two(unsigned x
)
468 #if defined(HAVE___BUILTIN_CLZ)
472 return (1 << ((sizeof(unsigned) * 8) - __builtin_clz(x
- 1)));
479 if (util_is_power_of_two_or_zero(x
))
483 val
= (val
>> 1) | val
;
484 val
= (val
>> 2) | val
;
485 val
= (val
>> 4) | val
;
486 val
= (val
>> 8) | val
;
487 val
= (val
>> 16) | val
;
493 static inline uint64_t
494 util_next_power_of_two64(uint64_t x
)
496 #if defined(HAVE___BUILTIN_CLZLL)
500 return (1ull << ((sizeof(uint64_t) * 8) - __builtin_clzll(x
- 1)));
507 if (util_is_power_of_two_or_zero64(x
))
511 val
= (val
>> 1) | val
;
512 val
= (val
>> 2) | val
;
513 val
= (val
>> 4) | val
;
514 val
= (val
>> 8) | val
;
515 val
= (val
>> 16) | val
;
516 val
= (val
>> 32) | val
;
524 * Return number of bits set in n.
526 static inline unsigned
527 util_bitcount(unsigned n
)
529 #if defined(HAVE___BUILTIN_POPCOUNT)
530 return __builtin_popcount(n
);
532 /* K&R classic bitcount.
534 * For each iteration, clear the LSB from the bitfield.
535 * Requires only one iteration per set bit, instead of
536 * one iteration per bit less than highest set bit.
539 for (bits
= 0; n
; bits
++) {
547 static inline unsigned
548 util_bitcount64(uint64_t n
)
550 #ifdef HAVE___BUILTIN_POPCOUNTLL
551 return __builtin_popcountll(n
);
553 return util_bitcount(n
) + util_bitcount(n
>> 32);
560 * Algorithm taken from:
561 * http://stackoverflow.com/questions/9144800/c-reverse-bits-in-unsigned-integer
563 static inline unsigned
564 util_bitreverse(unsigned n
)
566 n
= ((n
>> 1) & 0x55555555u
) | ((n
& 0x55555555u
) << 1);
567 n
= ((n
>> 2) & 0x33333333u
) | ((n
& 0x33333333u
) << 2);
568 n
= ((n
>> 4) & 0x0f0f0f0fu
) | ((n
& 0x0f0f0f0fu
) << 4);
569 n
= ((n
>> 8) & 0x00ff00ffu
) | ((n
& 0x00ff00ffu
) << 8);
570 n
= ((n
>> 16) & 0xffffu
) | ((n
& 0xffffu
) << 16);
575 * Convert from little endian to CPU byte order.
578 #ifdef PIPE_ARCH_BIG_ENDIAN
579 #define util_le64_to_cpu(x) util_bswap64(x)
580 #define util_le32_to_cpu(x) util_bswap32(x)
581 #define util_le16_to_cpu(x) util_bswap16(x)
583 #define util_le64_to_cpu(x) (x)
584 #define util_le32_to_cpu(x) (x)
585 #define util_le16_to_cpu(x) (x)
588 #define util_cpu_to_le64(x) util_le64_to_cpu(x)
589 #define util_cpu_to_le32(x) util_le32_to_cpu(x)
590 #define util_cpu_to_le16(x) util_le16_to_cpu(x)
593 * Reverse byte order of a 32 bit word.
595 static inline uint32_t
596 util_bswap32(uint32_t n
)
598 #if defined(HAVE___BUILTIN_BSWAP32)
599 return __builtin_bswap32(n
);
602 ((n
>> 8) & 0x0000ff00) |
603 ((n
<< 8) & 0x00ff0000) |
609 * Reverse byte order of a 64bit word.
611 static inline uint64_t
612 util_bswap64(uint64_t n
)
614 #if defined(HAVE___BUILTIN_BSWAP64)
615 return __builtin_bswap64(n
);
617 return ((uint64_t)util_bswap32((uint32_t)n
) << 32) |
618 util_bswap32((n
>> 32));
624 * Reverse byte order of a 16 bit word.
626 static inline uint16_t
627 util_bswap16(uint16_t n
)
634 util_memcpy_cpu_to_le32(void * restrict dest
, const void * restrict src
, size_t n
)
636 #ifdef PIPE_ARCH_BIG_ENDIAN
640 for (i
= 0, e
= n
/ 4; i
< e
; i
++) {
641 uint32_t * restrict d
= (uint32_t* restrict
)dest
;
642 const uint32_t * restrict s
= (const uint32_t* restrict
)src
;
643 d
[i
] = util_bswap32(s
[i
]);
647 return memcpy(dest
, src
, n
);
652 * Clamp X to [MIN, MAX].
653 * This is a macro to allow float, int, uint, etc. types.
654 * We arbitrarily turn NaN into MIN.
656 #define CLAMP( X, MIN, MAX ) ( (X)>(MIN) ? ((X)>(MAX) ? (MAX) : (X)) : (MIN) )
658 #define MIN2( A, B ) ( (A)<(B) ? (A) : (B) )
659 #define MAX2( A, B ) ( (A)>(B) ? (A) : (B) )
661 #define MIN3( A, B, C ) ((A) < (B) ? MIN2(A, C) : MIN2(B, C))
662 #define MAX3( A, B, C ) ((A) > (B) ? MAX2(A, C) : MAX2(B, C))
664 #define MIN4( A, B, C, D ) ((A) < (B) ? MIN3(A, C, D) : MIN3(B, C, D))
665 #define MAX4( A, B, C, D ) ((A) > (B) ? MAX3(A, C, D) : MAX3(B, C, D))
669 * Align a value, only works pot alignemnts.
672 align(int value
, int alignment
)
674 return (value
+ alignment
- 1) & ~(alignment
- 1);
677 static inline uint64_t
678 align64(uint64_t value
, unsigned alignment
)
680 return (value
+ alignment
- 1) & ~((uint64_t)alignment
- 1);
684 * Works like align but on npot alignments.
687 util_align_npot(size_t value
, size_t alignment
)
689 if (value
% alignment
)
690 return value
+ (alignment
- (value
% alignment
));
694 static inline unsigned
695 u_minify(unsigned value
, unsigned levels
)
697 return MAX2(1, value
>> levels
);
701 #define COPY_4V( DST, SRC ) \
703 (DST)[0] = (SRC)[0]; \
704 (DST)[1] = (SRC)[1]; \
705 (DST)[2] = (SRC)[2]; \
706 (DST)[3] = (SRC)[3]; \
712 #define COPY_4FV( DST, SRC ) COPY_4V(DST, SRC)
717 #define ASSIGN_4V( DST, V0, V1, V2, V3 ) \
727 static inline uint32_t
728 util_unsigned_fixed(float value
, unsigned frac_bits
)
730 return value
< 0 ? 0 : (uint32_t)(value
* (1<<frac_bits
));
733 static inline int32_t
734 util_signed_fixed(float value
, unsigned frac_bits
)
736 return (int32_t)(value
* (1<<frac_bits
));
740 util_fpstate_get(void);
742 util_fpstate_set_denorms_to_zero(unsigned current_fpstate
);
744 util_fpstate_set(unsigned fpstate
);
752 #endif /* U_MATH_H */