1 /**************************************************************************
3 * Copyright 2009-2010 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
33 * LLVM IR doesn't support all basic arithmetic operations we care about (most
34 * notably min/max and saturated operations), and it is often necessary to
35 * resort machine-specific intrinsics directly. The functions here hide all
36 * these implementation details from the other modules.
38 * We also do simple expressions simplification here. Reasons are:
39 * - it is very easy given we have all necessary information readily available
40 * - LLVM optimization passes fail to simplify several vector expressions
41 * - We often know value constraints which the optimization passes have no way
42 * of knowing, such as when source arguments are known to be in [0, 1] range.
44 * @author Jose Fonseca <jfonseca@vmware.com>
48 #include "util/u_memory.h"
49 #include "util/u_debug.h"
50 #include "util/u_math.h"
51 #include "util/u_string.h"
52 #include "util/u_cpu_detect.h"
54 #include "lp_bld_type.h"
55 #include "lp_bld_const.h"
56 #include "lp_bld_intr.h"
57 #include "lp_bld_logic.h"
58 #include "lp_bld_pack.h"
59 #include "lp_bld_debug.h"
60 #include "lp_bld_arit.h"
63 #define EXP_POLY_DEGREE 3
65 #define LOG_POLY_DEGREE 5
70 * No checks for special case values of a or b = 1 or 0 are done.
73 lp_build_min_simple(struct lp_build_context
*bld
,
77 const struct lp_type type
= bld
->type
;
78 const char *intrinsic
= NULL
;
81 assert(lp_check_value(type
, a
));
82 assert(lp_check_value(type
, b
));
84 /* TODO: optimize the constant case */
86 if(type
.width
* type
.length
== 128) {
88 if(type
.width
== 32 && util_cpu_caps
.has_sse
)
89 intrinsic
= "llvm.x86.sse.min.ps";
90 if(type
.width
== 64 && util_cpu_caps
.has_sse2
)
91 intrinsic
= "llvm.x86.sse2.min.pd";
94 if(type
.width
== 8 && !type
.sign
&& util_cpu_caps
.has_sse2
)
95 intrinsic
= "llvm.x86.sse2.pminu.b";
96 if(type
.width
== 8 && type
.sign
&& util_cpu_caps
.has_sse4_1
)
97 intrinsic
= "llvm.x86.sse41.pminsb";
98 if(type
.width
== 16 && !type
.sign
&& util_cpu_caps
.has_sse4_1
)
99 intrinsic
= "llvm.x86.sse41.pminuw";
100 if(type
.width
== 16 && type
.sign
&& util_cpu_caps
.has_sse2
)
101 intrinsic
= "llvm.x86.sse2.pmins.w";
102 if(type
.width
== 32 && !type
.sign
&& util_cpu_caps
.has_sse4_1
)
103 intrinsic
= "llvm.x86.sse41.pminud";
104 if(type
.width
== 32 && type
.sign
&& util_cpu_caps
.has_sse4_1
)
105 intrinsic
= "llvm.x86.sse41.pminsd";
110 return lp_build_intrinsic_binary(bld
->builder
, intrinsic
, lp_build_vec_type(bld
->type
), a
, b
);
112 cond
= lp_build_cmp(bld
, PIPE_FUNC_LESS
, a
, b
);
113 return lp_build_select(bld
, cond
, a
, b
);
119 * No checks for special case values of a or b = 1 or 0 are done.
122 lp_build_max_simple(struct lp_build_context
*bld
,
126 const struct lp_type type
= bld
->type
;
127 const char *intrinsic
= NULL
;
130 assert(lp_check_value(type
, a
));
131 assert(lp_check_value(type
, b
));
133 /* TODO: optimize the constant case */
135 if(type
.width
* type
.length
== 128) {
137 if(type
.width
== 32 && util_cpu_caps
.has_sse
)
138 intrinsic
= "llvm.x86.sse.max.ps";
139 if(type
.width
== 64 && util_cpu_caps
.has_sse2
)
140 intrinsic
= "llvm.x86.sse2.max.pd";
143 if(type
.width
== 8 && !type
.sign
&& util_cpu_caps
.has_sse2
)
144 intrinsic
= "llvm.x86.sse2.pmaxu.b";
145 if(type
.width
== 8 && type
.sign
&& util_cpu_caps
.has_sse4_1
)
146 intrinsic
= "llvm.x86.sse41.pmaxsb";
147 if(type
.width
== 16 && !type
.sign
&& util_cpu_caps
.has_sse4_1
)
148 intrinsic
= "llvm.x86.sse41.pmaxuw";
149 if(type
.width
== 16 && type
.sign
&& util_cpu_caps
.has_sse2
)
150 intrinsic
= "llvm.x86.sse2.pmaxs.w";
151 if(type
.width
== 32 && !type
.sign
&& util_cpu_caps
.has_sse4_1
)
152 intrinsic
= "llvm.x86.sse41.pmaxud";
153 if(type
.width
== 32 && type
.sign
&& util_cpu_caps
.has_sse4_1
)
154 intrinsic
= "llvm.x86.sse41.pmaxsd";
159 return lp_build_intrinsic_binary(bld
->builder
, intrinsic
, lp_build_vec_type(bld
->type
), a
, b
);
161 cond
= lp_build_cmp(bld
, PIPE_FUNC_GREATER
, a
, b
);
162 return lp_build_select(bld
, cond
, a
, b
);
167 * Generate 1 - a, or ~a depending on bld->type.
170 lp_build_comp(struct lp_build_context
*bld
,
173 const struct lp_type type
= bld
->type
;
175 assert(lp_check_value(type
, a
));
182 if(type
.norm
&& !type
.floating
&& !type
.fixed
&& !type
.sign
) {
183 if(LLVMIsConstant(a
))
184 return LLVMConstNot(a
);
186 return LLVMBuildNot(bld
->builder
, a
, "");
189 if(LLVMIsConstant(a
))
191 return LLVMConstFSub(bld
->one
, a
);
193 return LLVMConstSub(bld
->one
, a
);
196 return LLVMBuildFSub(bld
->builder
, bld
->one
, a
, "");
198 return LLVMBuildSub(bld
->builder
, bld
->one
, a
, "");
206 lp_build_add(struct lp_build_context
*bld
,
210 const struct lp_type type
= bld
->type
;
213 assert(lp_check_value(type
, a
));
214 assert(lp_check_value(type
, b
));
220 if(a
== bld
->undef
|| b
== bld
->undef
)
224 const char *intrinsic
= NULL
;
226 if(a
== bld
->one
|| b
== bld
->one
)
229 if(util_cpu_caps
.has_sse2
&&
230 type
.width
* type
.length
== 128 &&
231 !type
.floating
&& !type
.fixed
) {
233 intrinsic
= type
.sign
? "llvm.x86.sse2.padds.b" : "llvm.x86.sse2.paddus.b";
235 intrinsic
= type
.sign
? "llvm.x86.sse2.padds.w" : "llvm.x86.sse2.paddus.w";
239 return lp_build_intrinsic_binary(bld
->builder
, intrinsic
, lp_build_vec_type(bld
->type
), a
, b
);
242 if(LLVMIsConstant(a
) && LLVMIsConstant(b
))
244 res
= LLVMConstFAdd(a
, b
);
246 res
= LLVMConstAdd(a
, b
);
249 res
= LLVMBuildFAdd(bld
->builder
, a
, b
, "");
251 res
= LLVMBuildAdd(bld
->builder
, a
, b
, "");
253 /* clamp to ceiling of 1.0 */
254 if(bld
->type
.norm
&& (bld
->type
.floating
|| bld
->type
.fixed
))
255 res
= lp_build_min_simple(bld
, res
, bld
->one
);
257 /* XXX clamp to floor of -1 or 0??? */
263 /** Return the scalar sum of the elements of a */
265 lp_build_sum_vector(struct lp_build_context
*bld
,
268 const struct lp_type type
= bld
->type
;
269 LLVMValueRef index
, res
;
272 assert(lp_check_value(type
, a
));
274 if (type
.length
== 1) {
278 assert(!bld
->type
.norm
);
280 index
= LLVMConstInt(LLVMInt32Type(), 0, 0);
281 res
= LLVMBuildExtractElement(bld
->builder
, a
, index
, "");
283 for (i
= 1; i
< type
.length
; i
++) {
284 index
= LLVMConstInt(LLVMInt32Type(), i
, 0);
286 res
= LLVMBuildFAdd(bld
->builder
, res
,
287 LLVMBuildExtractElement(bld
->builder
,
291 res
= LLVMBuildAdd(bld
->builder
, res
,
292 LLVMBuildExtractElement(bld
->builder
,
305 lp_build_sub(struct lp_build_context
*bld
,
309 const struct lp_type type
= bld
->type
;
312 assert(lp_check_value(type
, a
));
313 assert(lp_check_value(type
, b
));
317 if(a
== bld
->undef
|| b
== bld
->undef
)
323 const char *intrinsic
= NULL
;
328 if(util_cpu_caps
.has_sse2
&&
329 type
.width
* type
.length
== 128 &&
330 !type
.floating
&& !type
.fixed
) {
332 intrinsic
= type
.sign
? "llvm.x86.sse2.psubs.b" : "llvm.x86.sse2.psubus.b";
334 intrinsic
= type
.sign
? "llvm.x86.sse2.psubs.w" : "llvm.x86.sse2.psubus.w";
338 return lp_build_intrinsic_binary(bld
->builder
, intrinsic
, lp_build_vec_type(bld
->type
), a
, b
);
341 if(LLVMIsConstant(a
) && LLVMIsConstant(b
))
343 res
= LLVMConstFSub(a
, b
);
345 res
= LLVMConstSub(a
, b
);
348 res
= LLVMBuildFSub(bld
->builder
, a
, b
, "");
350 res
= LLVMBuildSub(bld
->builder
, a
, b
, "");
352 if(bld
->type
.norm
&& (bld
->type
.floating
|| bld
->type
.fixed
))
353 res
= lp_build_max_simple(bld
, res
, bld
->zero
);
360 * Normalized 8bit multiplication.
364 * makes the following approximation to the division (Sree)
366 * a*b/255 ~= (a*(b + 1)) >> 256
368 * which is the fastest method that satisfies the following OpenGL criteria
370 * 0*0 = 0 and 255*255 = 255
374 * takes the geometric series approximation to the division
376 * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
378 * in this case just the first two terms to fit in 16bit arithmetic
380 * t/255 ~= (t + (t >> 8)) >> 8
382 * note that just by itself it doesn't satisfies the OpenGL criteria, as
383 * 255*255 = 254, so the special case b = 255 must be accounted or roundoff
386 * - geometric series plus rounding
388 * when using a geometric series division instead of truncating the result
389 * use roundoff in the approximation (Jim Blinn)
391 * t/255 ~= (t + (t >> 8) + 0x80) >> 8
393 * achieving the exact results
395 * @sa Alvy Ray Smith, Image Compositing Fundamentals, Tech Memo 4, Aug 15, 1995,
396 * ftp://ftp.alvyray.com/Acrobat/4_Comp.pdf
397 * @sa Michael Herf, The "double blend trick", May 2000,
398 * http://www.stereopsis.com/doubleblend.html
401 lp_build_mul_u8n(LLVMBuilderRef builder
,
402 struct lp_type i16_type
,
403 LLVMValueRef a
, LLVMValueRef b
)
408 assert(!i16_type
.floating
);
409 assert(lp_check_value(i16_type
, a
));
410 assert(lp_check_value(i16_type
, b
));
412 c8
= lp_build_const_int_vec(i16_type
, 8);
416 /* a*b/255 ~= (a*(b + 1)) >> 256 */
417 b
= LLVMBuildAdd(builder
, b
, lp_build_const_int_vec(i16_type
, 1), "");
418 ab
= LLVMBuildMul(builder
, a
, b
, "");
422 /* ab/255 ~= (ab + (ab >> 8) + 0x80) >> 8 */
423 ab
= LLVMBuildMul(builder
, a
, b
, "");
424 ab
= LLVMBuildAdd(builder
, ab
, LLVMBuildLShr(builder
, ab
, c8
, ""), "");
425 ab
= LLVMBuildAdd(builder
, ab
, lp_build_const_int_vec(i16_type
, 0x80), "");
429 ab
= LLVMBuildLShr(builder
, ab
, c8
, "");
439 lp_build_mul(struct lp_build_context
*bld
,
443 const struct lp_type type
= bld
->type
;
447 assert(lp_check_value(type
, a
));
448 assert(lp_check_value(type
, b
));
458 if(a
== bld
->undef
|| b
== bld
->undef
)
461 if(!type
.floating
&& !type
.fixed
&& type
.norm
) {
462 if(type
.width
== 8) {
463 struct lp_type i16_type
= lp_wider_type(type
);
464 LLVMValueRef al
, ah
, bl
, bh
, abl
, abh
, ab
;
466 lp_build_unpack2(bld
->builder
, type
, i16_type
, a
, &al
, &ah
);
467 lp_build_unpack2(bld
->builder
, type
, i16_type
, b
, &bl
, &bh
);
469 /* PMULLW, PSRLW, PADDW */
470 abl
= lp_build_mul_u8n(bld
->builder
, i16_type
, al
, bl
);
471 abh
= lp_build_mul_u8n(bld
->builder
, i16_type
, ah
, bh
);
473 ab
= lp_build_pack2(bld
->builder
, i16_type
, type
, abl
, abh
);
483 shift
= lp_build_const_int_vec(type
, type
.width
/2);
487 if(LLVMIsConstant(a
) && LLVMIsConstant(b
)) {
489 res
= LLVMConstFMul(a
, b
);
491 res
= LLVMConstMul(a
, b
);
494 res
= LLVMConstAShr(res
, shift
);
496 res
= LLVMConstLShr(res
, shift
);
501 res
= LLVMBuildFMul(bld
->builder
, a
, b
, "");
503 res
= LLVMBuildMul(bld
->builder
, a
, b
, "");
506 res
= LLVMBuildAShr(bld
->builder
, res
, shift
, "");
508 res
= LLVMBuildLShr(bld
->builder
, res
, shift
, "");
517 * Small vector x scale multiplication optimization.
520 lp_build_mul_imm(struct lp_build_context
*bld
,
526 assert(lp_check_value(bld
->type
, a
));
535 return lp_build_negate(bld
, a
);
537 if(b
== 2 && bld
->type
.floating
)
538 return lp_build_add(bld
, a
, a
);
540 if(util_is_power_of_two(b
)) {
541 unsigned shift
= ffs(b
) - 1;
543 if(bld
->type
.floating
) {
546 * Power of two multiplication by directly manipulating the mantissa.
548 * XXX: This might not be always faster, it will introduce a small error
549 * for multiplication by zero, and it will produce wrong results
552 unsigned mantissa
= lp_mantissa(bld
->type
);
553 factor
= lp_build_const_int_vec(bld
->type
, (unsigned long long)shift
<< mantissa
);
554 a
= LLVMBuildBitCast(bld
->builder
, a
, lp_build_int_vec_type(bld
->type
), "");
555 a
= LLVMBuildAdd(bld
->builder
, a
, factor
, "");
556 a
= LLVMBuildBitCast(bld
->builder
, a
, lp_build_vec_type(bld
->type
), "");
561 factor
= lp_build_const_vec(bld
->type
, shift
);
562 return LLVMBuildShl(bld
->builder
, a
, factor
, "");
566 factor
= lp_build_const_vec(bld
->type
, (double)b
);
567 return lp_build_mul(bld
, a
, factor
);
575 lp_build_div(struct lp_build_context
*bld
,
579 const struct lp_type type
= bld
->type
;
581 assert(lp_check_value(type
, a
));
582 assert(lp_check_value(type
, b
));
587 return lp_build_rcp(bld
, b
);
592 if(a
== bld
->undef
|| b
== bld
->undef
)
595 if(LLVMIsConstant(a
) && LLVMIsConstant(b
)) {
597 return LLVMConstFDiv(a
, b
);
599 return LLVMConstSDiv(a
, b
);
601 return LLVMConstUDiv(a
, b
);
604 if(util_cpu_caps
.has_sse
&& type
.width
== 32 && type
.length
== 4)
605 return lp_build_mul(bld
, a
, lp_build_rcp(bld
, b
));
608 return LLVMBuildFDiv(bld
->builder
, a
, b
, "");
610 return LLVMBuildSDiv(bld
->builder
, a
, b
, "");
612 return LLVMBuildUDiv(bld
->builder
, a
, b
, "");
617 * Linear interpolation -- without any checks.
619 * @sa http://www.stereopsis.com/doubleblend.html
621 static INLINE LLVMValueRef
622 lp_build_lerp_simple(struct lp_build_context
*bld
,
630 assert(lp_check_value(bld
->type
, x
));
631 assert(lp_check_value(bld
->type
, v0
));
632 assert(lp_check_value(bld
->type
, v1
));
634 delta
= lp_build_sub(bld
, v1
, v0
);
636 res
= lp_build_mul(bld
, x
, delta
);
638 res
= lp_build_add(bld
, v0
, res
);
640 if (bld
->type
.fixed
) {
641 /* XXX: This step is necessary for lerping 8bit colors stored on 16bits,
642 * but it will be wrong for other uses. Basically we need a more
643 * powerful lp_type, capable of further distinguishing the values
644 * interpretation from the value storage. */
645 res
= LLVMBuildAnd(bld
->builder
, res
, lp_build_const_int_vec(bld
->type
, (1 << bld
->type
.width
/2) - 1), "");
653 * Linear interpolation.
656 lp_build_lerp(struct lp_build_context
*bld
,
661 const struct lp_type type
= bld
->type
;
664 assert(lp_check_value(type
, x
));
665 assert(lp_check_value(type
, v0
));
666 assert(lp_check_value(type
, v1
));
669 struct lp_type wide_type
;
670 struct lp_build_context wide_bld
;
671 LLVMValueRef xl
, xh
, v0l
, v0h
, v1l
, v1h
, resl
, resh
;
674 assert(type
.length
>= 2);
678 * Create a wider type, enough to hold the intermediate result of the
681 memset(&wide_type
, 0, sizeof wide_type
);
682 wide_type
.fixed
= TRUE
;
683 wide_type
.width
= type
.width
*2;
684 wide_type
.length
= type
.length
/2;
686 lp_build_context_init(&wide_bld
, bld
->builder
, wide_type
);
688 lp_build_unpack2(bld
->builder
, type
, wide_type
, x
, &xl
, &xh
);
689 lp_build_unpack2(bld
->builder
, type
, wide_type
, v0
, &v0l
, &v0h
);
690 lp_build_unpack2(bld
->builder
, type
, wide_type
, v1
, &v1l
, &v1h
);
693 * Scale x from [0, 255] to [0, 256]
696 shift
= lp_build_const_int_vec(wide_type
, type
.width
- 1);
698 xl
= lp_build_add(&wide_bld
, xl
,
699 LLVMBuildAShr(bld
->builder
, xl
, shift
, ""));
700 xh
= lp_build_add(&wide_bld
, xh
,
701 LLVMBuildAShr(bld
->builder
, xh
, shift
, ""));
707 resl
= lp_build_lerp_simple(&wide_bld
, xl
, v0l
, v1l
);
708 resh
= lp_build_lerp_simple(&wide_bld
, xh
, v0h
, v1h
);
710 res
= lp_build_pack2(bld
->builder
, wide_type
, type
, resl
, resh
);
712 res
= lp_build_lerp_simple(bld
, x
, v0
, v1
);
720 lp_build_lerp_2d(struct lp_build_context
*bld
,
728 LLVMValueRef v0
= lp_build_lerp(bld
, x
, v00
, v01
);
729 LLVMValueRef v1
= lp_build_lerp(bld
, x
, v10
, v11
);
730 return lp_build_lerp(bld
, y
, v0
, v1
);
736 * Do checks for special cases.
739 lp_build_min(struct lp_build_context
*bld
,
743 assert(lp_check_value(bld
->type
, a
));
744 assert(lp_check_value(bld
->type
, b
));
746 if(a
== bld
->undef
|| b
== bld
->undef
)
753 if(a
== bld
->zero
|| b
== bld
->zero
)
761 return lp_build_min_simple(bld
, a
, b
);
767 * Do checks for special cases.
770 lp_build_max(struct lp_build_context
*bld
,
774 assert(lp_check_value(bld
->type
, a
));
775 assert(lp_check_value(bld
->type
, b
));
777 if(a
== bld
->undef
|| b
== bld
->undef
)
784 if(a
== bld
->one
|| b
== bld
->one
)
792 return lp_build_max_simple(bld
, a
, b
);
797 * Generate clamp(a, min, max)
798 * Do checks for special cases.
801 lp_build_clamp(struct lp_build_context
*bld
,
806 assert(lp_check_value(bld
->type
, a
));
807 assert(lp_check_value(bld
->type
, min
));
808 assert(lp_check_value(bld
->type
, max
));
810 a
= lp_build_min(bld
, a
, max
);
811 a
= lp_build_max(bld
, a
, min
);
820 lp_build_abs(struct lp_build_context
*bld
,
823 const struct lp_type type
= bld
->type
;
824 LLVMTypeRef vec_type
= lp_build_vec_type(type
);
826 assert(lp_check_value(type
, a
));
832 /* Mask out the sign bit */
833 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(type
);
834 unsigned long long absMask
= ~(1ULL << (type
.width
- 1));
835 LLVMValueRef mask
= lp_build_const_int_vec(type
, ((unsigned long long) absMask
));
836 a
= LLVMBuildBitCast(bld
->builder
, a
, int_vec_type
, "");
837 a
= LLVMBuildAnd(bld
->builder
, a
, mask
, "");
838 a
= LLVMBuildBitCast(bld
->builder
, a
, vec_type
, "");
842 if(type
.width
*type
.length
== 128 && util_cpu_caps
.has_ssse3
) {
845 return lp_build_intrinsic_unary(bld
->builder
, "llvm.x86.ssse3.pabs.b.128", vec_type
, a
);
847 return lp_build_intrinsic_unary(bld
->builder
, "llvm.x86.ssse3.pabs.w.128", vec_type
, a
);
849 return lp_build_intrinsic_unary(bld
->builder
, "llvm.x86.ssse3.pabs.d.128", vec_type
, a
);
853 return lp_build_max(bld
, a
, LLVMBuildNeg(bld
->builder
, a
, ""));
858 lp_build_negate(struct lp_build_context
*bld
,
861 assert(lp_check_value(bld
->type
, a
));
863 #if HAVE_LLVM >= 0x0207
864 if (bld
->type
.floating
)
865 a
= LLVMBuildFNeg(bld
->builder
, a
, "");
868 a
= LLVMBuildNeg(bld
->builder
, a
, "");
874 /** Return -1, 0 or +1 depending on the sign of a */
876 lp_build_sgn(struct lp_build_context
*bld
,
879 const struct lp_type type
= bld
->type
;
883 assert(lp_check_value(type
, a
));
885 /* Handle non-zero case */
887 /* if not zero then sign must be positive */
890 else if(type
.floating
) {
891 LLVMTypeRef vec_type
;
892 LLVMTypeRef int_type
;
896 unsigned long long maskBit
= (unsigned long long)1 << (type
.width
- 1);
898 int_type
= lp_build_int_vec_type(type
);
899 vec_type
= lp_build_vec_type(type
);
900 mask
= lp_build_const_int_vec(type
, maskBit
);
902 /* Take the sign bit and add it to 1 constant */
903 sign
= LLVMBuildBitCast(bld
->builder
, a
, int_type
, "");
904 sign
= LLVMBuildAnd(bld
->builder
, sign
, mask
, "");
905 one
= LLVMConstBitCast(bld
->one
, int_type
);
906 res
= LLVMBuildOr(bld
->builder
, sign
, one
, "");
907 res
= LLVMBuildBitCast(bld
->builder
, res
, vec_type
, "");
911 LLVMValueRef minus_one
= lp_build_const_vec(type
, -1.0);
912 cond
= lp_build_cmp(bld
, PIPE_FUNC_GREATER
, a
, bld
->zero
);
913 res
= lp_build_select(bld
, cond
, bld
->one
, minus_one
);
917 cond
= lp_build_cmp(bld
, PIPE_FUNC_EQUAL
, a
, bld
->zero
);
918 res
= lp_build_select(bld
, cond
, bld
->zero
, res
);
925 * Set the sign of float vector 'a' according to 'sign'.
926 * If sign==0, return abs(a).
927 * If sign==1, return -abs(a);
928 * Other values for sign produce undefined results.
931 lp_build_set_sign(struct lp_build_context
*bld
,
932 LLVMValueRef a
, LLVMValueRef sign
)
934 const struct lp_type type
= bld
->type
;
935 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(type
);
936 LLVMTypeRef vec_type
= lp_build_vec_type(type
);
937 LLVMValueRef shift
= lp_build_const_int_vec(type
, type
.width
- 1);
938 LLVMValueRef mask
= lp_build_const_int_vec(type
,
939 ~((unsigned long long) 1 << (type
.width
- 1)));
940 LLVMValueRef val
, res
;
942 assert(type
.floating
);
943 assert(lp_check_value(type
, a
));
945 /* val = reinterpret_cast<int>(a) */
946 val
= LLVMBuildBitCast(bld
->builder
, a
, int_vec_type
, "");
947 /* val = val & mask */
948 val
= LLVMBuildAnd(bld
->builder
, val
, mask
, "");
949 /* sign = sign << shift */
950 sign
= LLVMBuildShl(bld
->builder
, sign
, shift
, "");
951 /* res = val | sign */
952 res
= LLVMBuildOr(bld
->builder
, val
, sign
, "");
953 /* res = reinterpret_cast<float>(res) */
954 res
= LLVMBuildBitCast(bld
->builder
, res
, vec_type
, "");
961 * Convert vector of (or scalar) int to vector of (or scalar) float.
964 lp_build_int_to_float(struct lp_build_context
*bld
,
967 const struct lp_type type
= bld
->type
;
968 LLVMTypeRef vec_type
= lp_build_vec_type(type
);
970 assert(type
.floating
);
972 return LLVMBuildSIToFP(bld
->builder
, a
, vec_type
, "");
977 enum lp_build_round_sse41_mode
979 LP_BUILD_ROUND_SSE41_NEAREST
= 0,
980 LP_BUILD_ROUND_SSE41_FLOOR
= 1,
981 LP_BUILD_ROUND_SSE41_CEIL
= 2,
982 LP_BUILD_ROUND_SSE41_TRUNCATE
= 3
986 static INLINE LLVMValueRef
987 lp_build_round_sse41(struct lp_build_context
*bld
,
989 enum lp_build_round_sse41_mode mode
)
991 const struct lp_type type
= bld
->type
;
992 LLVMTypeRef i32t
= LLVMInt32Type();
993 const char *intrinsic
;
996 assert(type
.floating
);
998 assert(lp_check_value(type
, a
));
999 assert(util_cpu_caps
.has_sse4_1
);
1001 if (type
.length
== 1) {
1002 LLVMTypeRef vec_type
;
1004 LLVMValueRef args
[3];
1005 LLVMValueRef index0
= LLVMConstInt(i32t
, 0, 0);
1007 switch(type
.width
) {
1009 intrinsic
= "llvm.x86.sse41.round.ss";
1012 intrinsic
= "llvm.x86.sse41.round.sd";
1019 vec_type
= LLVMVectorType(bld
->elem_type
, 4);
1021 undef
= LLVMGetUndef(vec_type
);
1024 args
[1] = LLVMBuildInsertElement(bld
->builder
, undef
, a
, index0
, "");
1025 args
[2] = LLVMConstInt(i32t
, mode
, 0);
1027 res
= lp_build_intrinsic(bld
->builder
, intrinsic
,
1028 vec_type
, args
, Elements(args
));
1030 res
= LLVMBuildExtractElement(bld
->builder
, res
, index0
, "");
1033 assert(type
.width
*type
.length
== 128);
1035 switch(type
.width
) {
1037 intrinsic
= "llvm.x86.sse41.round.ps";
1040 intrinsic
= "llvm.x86.sse41.round.pd";
1047 res
= lp_build_intrinsic_binary(bld
->builder
, intrinsic
,
1049 LLVMConstInt(i32t
, mode
, 0));
1057 * Return the integer part of a float (vector) value. The returned value is
1059 * Ex: trunc(-1.5) = 1.0
1062 lp_build_trunc(struct lp_build_context
*bld
,
1065 const struct lp_type type
= bld
->type
;
1067 assert(type
.floating
);
1068 assert(lp_check_value(type
, a
));
1070 if (util_cpu_caps
.has_sse4_1
&&
1071 (type
.length
== 1 || type
.width
*type
.length
== 128)) {
1072 return lp_build_round_sse41(bld
, a
, LP_BUILD_ROUND_SSE41_TRUNCATE
);
1075 LLVMTypeRef vec_type
= lp_build_vec_type(type
);
1076 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(type
);
1078 res
= LLVMBuildFPToSI(bld
->builder
, a
, int_vec_type
, "");
1079 res
= LLVMBuildSIToFP(bld
->builder
, res
, vec_type
, "");
1086 * Return float (vector) rounded to nearest integer (vector). The returned
1087 * value is a float (vector).
1088 * Ex: round(0.9) = 1.0
1089 * Ex: round(-1.5) = -2.0
1092 lp_build_round(struct lp_build_context
*bld
,
1095 const struct lp_type type
= bld
->type
;
1097 assert(type
.floating
);
1098 assert(lp_check_value(type
, a
));
1100 if (util_cpu_caps
.has_sse4_1
&&
1101 (type
.length
== 1 || type
.width
*type
.length
== 128)) {
1102 return lp_build_round_sse41(bld
, a
, LP_BUILD_ROUND_SSE41_NEAREST
);
1105 LLVMTypeRef vec_type
= lp_build_vec_type(type
);
1107 res
= lp_build_iround(bld
, a
);
1108 res
= LLVMBuildSIToFP(bld
->builder
, res
, vec_type
, "");
1115 * Return floor of float (vector), result is a float (vector)
1116 * Ex: floor(1.1) = 1.0
1117 * Ex: floor(-1.1) = -2.0
1120 lp_build_floor(struct lp_build_context
*bld
,
1123 const struct lp_type type
= bld
->type
;
1125 assert(type
.floating
);
1126 assert(lp_check_value(type
, a
));
1128 if (util_cpu_caps
.has_sse4_1
&&
1129 (type
.length
== 1 || type
.width
*type
.length
== 128)) {
1130 return lp_build_round_sse41(bld
, a
, LP_BUILD_ROUND_SSE41_FLOOR
);
1133 LLVMTypeRef vec_type
= lp_build_vec_type(type
);
1135 res
= lp_build_ifloor(bld
, a
);
1136 res
= LLVMBuildSIToFP(bld
->builder
, res
, vec_type
, "");
1143 * Return ceiling of float (vector), returning float (vector).
1144 * Ex: ceil( 1.1) = 2.0
1145 * Ex: ceil(-1.1) = -1.0
1148 lp_build_ceil(struct lp_build_context
*bld
,
1151 const struct lp_type type
= bld
->type
;
1153 assert(type
.floating
);
1154 assert(lp_check_value(type
, a
));
1156 if (util_cpu_caps
.has_sse4_1
&&
1157 (type
.length
== 1 || type
.width
*type
.length
== 128)) {
1158 return lp_build_round_sse41(bld
, a
, LP_BUILD_ROUND_SSE41_CEIL
);
1161 LLVMTypeRef vec_type
= lp_build_vec_type(type
);
1163 res
= lp_build_iceil(bld
, a
);
1164 res
= LLVMBuildSIToFP(bld
->builder
, res
, vec_type
, "");
1171 * Return fractional part of 'a' computed as a - floor(a)
1172 * Typically used in texture coord arithmetic.
1175 lp_build_fract(struct lp_build_context
*bld
,
1178 assert(bld
->type
.floating
);
1179 return lp_build_sub(bld
, a
, lp_build_floor(bld
, a
));
1184 * Return the integer part of a float (vector) value. The returned value is
1185 * an integer (vector).
1186 * Ex: itrunc(-1.5) = 1
1189 lp_build_itrunc(struct lp_build_context
*bld
,
1192 const struct lp_type type
= bld
->type
;
1193 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(type
);
1195 assert(type
.floating
);
1196 assert(lp_check_value(type
, a
));
1198 return LLVMBuildFPToSI(bld
->builder
, a
, int_vec_type
, "");
1203 * Return float (vector) rounded to nearest integer (vector). The returned
1204 * value is an integer (vector).
1205 * Ex: iround(0.9) = 1
1206 * Ex: iround(-1.5) = -2
1209 lp_build_iround(struct lp_build_context
*bld
,
1212 const struct lp_type type
= bld
->type
;
1213 LLVMTypeRef int_vec_type
= bld
->int_vec_type
;
1216 assert(type
.floating
);
1218 assert(lp_check_value(type
, a
));
1220 if (util_cpu_caps
.has_sse4_1
&&
1221 (type
.length
== 1 || type
.width
*type
.length
== 128)) {
1222 res
= lp_build_round_sse41(bld
, a
, LP_BUILD_ROUND_SSE41_NEAREST
);
1227 half
= lp_build_const_vec(type
, 0.5);
1230 LLVMTypeRef vec_type
= bld
->vec_type
;
1231 LLVMValueRef mask
= lp_build_const_int_vec(type
, (unsigned long long)1 << (type
.width
- 1));
1235 sign
= LLVMBuildBitCast(bld
->builder
, a
, int_vec_type
, "");
1236 sign
= LLVMBuildAnd(bld
->builder
, sign
, mask
, "");
1239 half
= LLVMBuildBitCast(bld
->builder
, half
, int_vec_type
, "");
1240 half
= LLVMBuildOr(bld
->builder
, sign
, half
, "");
1241 half
= LLVMBuildBitCast(bld
->builder
, half
, vec_type
, "");
1244 res
= LLVMBuildFAdd(bld
->builder
, a
, half
, "");
1247 res
= LLVMBuildFPToSI(bld
->builder
, res
, int_vec_type
, "");
1254 * Return floor of float (vector), result is an int (vector)
1255 * Ex: ifloor(1.1) = 1.0
1256 * Ex: ifloor(-1.1) = -2.0
1259 lp_build_ifloor(struct lp_build_context
*bld
,
1262 const struct lp_type type
= bld
->type
;
1263 LLVMTypeRef int_vec_type
= bld
->int_vec_type
;
1266 assert(type
.floating
);
1267 assert(lp_check_value(type
, a
));
1269 if (util_cpu_caps
.has_sse4_1
&&
1270 (type
.length
== 1 || type
.width
*type
.length
== 128)) {
1271 res
= lp_build_round_sse41(bld
, a
, LP_BUILD_ROUND_SSE41_FLOOR
);
1277 /* Take the sign bit and add it to 1 constant */
1278 LLVMTypeRef vec_type
= bld
->vec_type
;
1279 unsigned mantissa
= lp_mantissa(type
);
1280 LLVMValueRef mask
= lp_build_const_int_vec(type
, (unsigned long long)1 << (type
.width
- 1));
1282 LLVMValueRef offset
;
1284 /* sign = a < 0 ? ~0 : 0 */
1285 sign
= LLVMBuildBitCast(bld
->builder
, a
, int_vec_type
, "");
1286 sign
= LLVMBuildAnd(bld
->builder
, sign
, mask
, "");
1287 sign
= LLVMBuildAShr(bld
->builder
, sign
, lp_build_const_int_vec(type
, type
.width
- 1), "ifloor.sign");
1289 /* offset = -0.99999(9)f */
1290 offset
= lp_build_const_vec(type
, -(double)(((unsigned long long)1 << mantissa
) - 10)/((unsigned long long)1 << mantissa
));
1291 offset
= LLVMConstBitCast(offset
, int_vec_type
);
1293 /* offset = a < 0 ? offset : 0.0f */
1294 offset
= LLVMBuildAnd(bld
->builder
, offset
, sign
, "");
1295 offset
= LLVMBuildBitCast(bld
->builder
, offset
, vec_type
, "ifloor.offset");
1297 res
= LLVMBuildFAdd(bld
->builder
, res
, offset
, "ifloor.res");
1301 /* round to nearest (toward zero) */
1302 res
= LLVMBuildFPToSI(bld
->builder
, res
, int_vec_type
, "ifloor.res");
1309 * Return ceiling of float (vector), returning int (vector).
1310 * Ex: iceil( 1.1) = 2
1311 * Ex: iceil(-1.1) = -1
1314 lp_build_iceil(struct lp_build_context
*bld
,
1317 const struct lp_type type
= bld
->type
;
1318 LLVMTypeRef int_vec_type
= bld
->int_vec_type
;
1321 assert(type
.floating
);
1322 assert(lp_check_value(type
, a
));
1324 if (util_cpu_caps
.has_sse4_1
&&
1325 (type
.length
== 1 || type
.width
*type
.length
== 128)) {
1326 res
= lp_build_round_sse41(bld
, a
, LP_BUILD_ROUND_SSE41_CEIL
);
1329 LLVMTypeRef vec_type
= bld
->vec_type
;
1330 unsigned mantissa
= lp_mantissa(type
);
1331 LLVMValueRef offset
;
1333 /* offset = 0.99999(9)f */
1334 offset
= lp_build_const_vec(type
, (double)(((unsigned long long)1 << mantissa
) - 10)/((unsigned long long)1 << mantissa
));
1337 LLVMValueRef mask
= lp_build_const_int_vec(type
, (unsigned long long)1 << (type
.width
- 1));
1340 /* sign = a < 0 ? 0 : ~0 */
1341 sign
= LLVMBuildBitCast(bld
->builder
, a
, int_vec_type
, "");
1342 sign
= LLVMBuildAnd(bld
->builder
, sign
, mask
, "");
1343 sign
= LLVMBuildAShr(bld
->builder
, sign
, lp_build_const_int_vec(type
, type
.width
- 1), "iceil.sign");
1344 sign
= LLVMBuildNot(bld
->builder
, sign
, "iceil.not");
1346 /* offset = a < 0 ? 0.0 : offset */
1347 offset
= LLVMConstBitCast(offset
, int_vec_type
);
1348 offset
= LLVMBuildAnd(bld
->builder
, offset
, sign
, "");
1349 offset
= LLVMBuildBitCast(bld
->builder
, offset
, vec_type
, "iceil.offset");
1352 res
= LLVMBuildFAdd(bld
->builder
, a
, offset
, "iceil.res");
1355 /* round to nearest (toward zero) */
1356 res
= LLVMBuildFPToSI(bld
->builder
, res
, int_vec_type
, "iceil.res");
1363 * Combined ifloor() & fract().
1365 * Preferred to calling the functions separately, as it will ensure that the
1366 * stratergy (floor() vs ifloor()) that results in less redundant work is used.
1369 lp_build_ifloor_fract(struct lp_build_context
*bld
,
1371 LLVMValueRef
*out_ipart
,
1372 LLVMValueRef
*out_fpart
)
1376 const struct lp_type type
= bld
->type
;
1379 assert(type
.floating
);
1380 assert(lp_check_value(type
, a
));
1382 if (util_cpu_caps
.has_sse4_1
&&
1383 (type
.length
== 1 || type
.width
*type
.length
== 128)) {
1385 * floor() is easier.
1388 ipart
= lp_build_floor(bld
, a
);
1389 *out_fpart
= LLVMBuildFSub(bld
->builder
, a
, ipart
, "fpart");
1390 *out_ipart
= LLVMBuildFPToSI(bld
->builder
, ipart
, bld
->int_vec_type
, "ipart");
1394 * ifloor() is easier.
1397 *out_ipart
= lp_build_ifloor(bld
, a
);
1398 ipart
= LLVMBuildSIToFP(bld
->builder
, *out_ipart
, bld
->vec_type
, "ipart");
1399 *out_fpart
= LLVMBuildFSub(bld
->builder
, a
, ipart
, "fpart");
1405 lp_build_sqrt(struct lp_build_context
*bld
,
1408 const struct lp_type type
= bld
->type
;
1409 LLVMTypeRef vec_type
= lp_build_vec_type(type
);
1412 assert(lp_check_value(type
, a
));
1414 /* TODO: optimize the constant case */
1415 /* TODO: optimize the constant case */
1417 assert(type
.floating
);
1418 util_snprintf(intrinsic
, sizeof intrinsic
, "llvm.sqrt.v%uf%u", type
.length
, type
.width
);
1420 return lp_build_intrinsic_unary(bld
->builder
, intrinsic
, vec_type
, a
);
1425 * Do one Newton-Raphson step to improve reciprocate precision:
1427 * x_{i+1} = x_i * (2 - a * x_i)
1429 * XXX: Unfortunately this won't give IEEE-754 conformant results for 0 or
1430 * +/-Inf, giving NaN instead. Certain applications rely on this behavior,
1431 * such as Google Earth, which does RCP(RSQRT(0.0) when drawing the Earth's
1432 * halo. It would be necessary to clamp the argument to prevent this.
1435 * - http://en.wikipedia.org/wiki/Division_(digital)#Newton.E2.80.93Raphson_division
1436 * - http://softwarecommunity.intel.com/articles/eng/1818.htm
1438 static INLINE LLVMValueRef
1439 lp_build_rcp_refine(struct lp_build_context
*bld
,
1443 LLVMValueRef two
= lp_build_const_vec(bld
->type
, 2.0);
1446 res
= LLVMBuildFMul(bld
->builder
, a
, rcp_a
, "");
1447 res
= LLVMBuildFSub(bld
->builder
, two
, res
, "");
1448 res
= LLVMBuildFMul(bld
->builder
, rcp_a
, res
, "");
1455 lp_build_rcp(struct lp_build_context
*bld
,
1458 const struct lp_type type
= bld
->type
;
1460 assert(lp_check_value(type
, a
));
1469 assert(type
.floating
);
1471 if(LLVMIsConstant(a
))
1472 return LLVMConstFDiv(bld
->one
, a
);
1475 * We don't use RCPPS because:
1476 * - it only has 10bits of precision
1477 * - it doesn't even get the reciprocate of 1.0 exactly
1478 * - doing Newton-Rapshon steps yields wrong (NaN) values for 0.0 or Inf
1479 * - for recent processors the benefit over DIVPS is marginal, a case
1482 * We could still use it on certain processors if benchmarks show that the
1483 * RCPPS plus necessary workarounds are still preferrable to DIVPS; or for
1484 * particular uses that require less workarounds.
1487 if (FALSE
&& util_cpu_caps
.has_sse
&& type
.width
== 32 && type
.length
== 4) {
1488 const unsigned num_iterations
= 0;
1492 res
= lp_build_intrinsic_unary(bld
->builder
, "llvm.x86.sse.rcp.ps", bld
->vec_type
, a
);
1494 for (i
= 0; i
< num_iterations
; ++i
) {
1495 res
= lp_build_rcp_refine(bld
, a
, res
);
1501 return LLVMBuildFDiv(bld
->builder
, bld
->one
, a
, "");
1506 * Do one Newton-Raphson step to improve rsqrt precision:
1508 * x_{i+1} = 0.5 * x_i * (3.0 - a * x_i * x_i)
1511 * - http://softwarecommunity.intel.com/articles/eng/1818.htm
1513 static INLINE LLVMValueRef
1514 lp_build_rsqrt_refine(struct lp_build_context
*bld
,
1516 LLVMValueRef rsqrt_a
)
1518 LLVMValueRef half
= lp_build_const_vec(bld
->type
, 0.5);
1519 LLVMValueRef three
= lp_build_const_vec(bld
->type
, 3.0);
1522 res
= LLVMBuildFMul(bld
->builder
, rsqrt_a
, rsqrt_a
, "");
1523 res
= LLVMBuildFMul(bld
->builder
, a
, res
, "");
1524 res
= LLVMBuildFSub(bld
->builder
, three
, res
, "");
1525 res
= LLVMBuildFMul(bld
->builder
, rsqrt_a
, res
, "");
1526 res
= LLVMBuildFMul(bld
->builder
, half
, res
, "");
1533 * Generate 1/sqrt(a)
1536 lp_build_rsqrt(struct lp_build_context
*bld
,
1539 const struct lp_type type
= bld
->type
;
1541 assert(lp_check_value(type
, a
));
1543 assert(type
.floating
);
1545 if (util_cpu_caps
.has_sse
&& type
.width
== 32 && type
.length
== 4) {
1546 const unsigned num_iterations
= 0;
1550 res
= lp_build_intrinsic_unary(bld
->builder
, "llvm.x86.sse.rsqrt.ps", bld
->vec_type
, a
);
1552 for (i
= 0; i
< num_iterations
; ++i
) {
1553 res
= lp_build_rsqrt_refine(bld
, a
, res
);
1559 return lp_build_rcp(bld
, lp_build_sqrt(bld
, a
));
1563 static inline LLVMValueRef
1564 lp_build_const_v4si(unsigned long value
)
1566 LLVMValueRef element
= LLVMConstInt(LLVMInt32Type(), value
, 0);
1567 LLVMValueRef elements
[4] = { element
, element
, element
, element
};
1568 return LLVMConstVector(elements
, 4);
1571 static inline LLVMValueRef
1572 lp_build_const_v4sf(float value
)
1574 LLVMValueRef element
= LLVMConstReal(LLVMFloatType(), value
);
1575 LLVMValueRef elements
[4] = { element
, element
, element
, element
};
1576 return LLVMConstVector(elements
, 4);
1581 * Generate sin(a) using SSE2
1584 lp_build_sin(struct lp_build_context
*bld
,
1587 struct lp_type int_type
= lp_int_type(bld
->type
);
1588 LLVMBuilderRef b
= bld
->builder
;
1589 LLVMTypeRef v4sf
= LLVMVectorType(LLVMFloatType(), 4);
1590 LLVMTypeRef v4si
= LLVMVectorType(LLVMInt32Type(), 4);
1593 * take the absolute value,
1594 * x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
1597 LLVMValueRef inv_sig_mask
= lp_build_const_v4si(~0x80000000);
1598 LLVMValueRef a_v4si
= LLVMBuildBitCast(b
, a
, v4si
, "a_v4si");
1600 LLVMValueRef absi
= LLVMBuildAnd(b
, a_v4si
, inv_sig_mask
, "absi");
1601 LLVMValueRef x_abs
= LLVMBuildBitCast(b
, absi
, v4sf
, "x_abs");
1604 * extract the sign bit (upper one)
1605 * sign_bit = _mm_and_ps(sign_bit, *(v4sf*)_ps_sign_mask);
1607 LLVMValueRef sig_mask
= lp_build_const_v4si(0x80000000);
1608 LLVMValueRef sign_bit_i
= LLVMBuildAnd(b
, a_v4si
, sig_mask
, "sign_bit_i");
1612 * y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
1615 LLVMValueRef FOPi
= lp_build_const_v4sf(1.27323954473516);
1616 LLVMValueRef scale_y
= LLVMBuildFMul(b
, x_abs
, FOPi
, "scale_y");
1619 * store the integer part of y in mm0
1620 * emm2 = _mm_cvttps_epi32(y);
1623 LLVMValueRef emm2_i
= LLVMBuildFPToSI(b
, scale_y
, v4si
, "emm2_i");
1626 * j=(j+1) & (~1) (see the cephes sources)
1627 * emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
1630 LLVMValueRef all_one
= lp_build_const_v4si(1);
1631 LLVMValueRef emm2_add
= LLVMBuildAdd(b
, emm2_i
, all_one
, "emm2_add");
1633 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
1635 LLVMValueRef inv_one
= lp_build_const_v4si(~1);
1636 LLVMValueRef emm2_and
= LLVMBuildAnd(b
, emm2_add
, inv_one
, "emm2_and");
1639 * y = _mm_cvtepi32_ps(emm2);
1641 LLVMValueRef y_2
= LLVMBuildSIToFP(b
, emm2_and
, v4sf
, "y_2");
1643 /* get the swap sign flag
1644 * emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
1646 LLVMValueRef pi32_4
= lp_build_const_v4si(4);
1647 LLVMValueRef emm0_and
= LLVMBuildAnd(b
, emm2_add
, pi32_4
, "emm0_and");
1650 * emm2 = _mm_slli_epi32(emm0, 29);
1652 LLVMValueRef const_29
= lp_build_const_v4si(29);
1653 LLVMValueRef swap_sign_bit
= LLVMBuildShl(b
, emm0_and
, const_29
, "swap_sign_bit");
1656 * get the polynom selection mask
1657 * there is one polynom for 0 <= x <= Pi/4
1658 * and another one for Pi/4<x<=Pi/2
1659 * Both branches will be computed.
1661 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
1662 * emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
1665 LLVMValueRef pi32_2
= lp_build_const_v4si(2);
1666 LLVMValueRef emm2_3
= LLVMBuildAnd(b
, emm2_and
, pi32_2
, "emm2_3");
1667 LLVMValueRef poly_mask
= lp_build_compare(b
, int_type
, PIPE_FUNC_EQUAL
,
1668 emm2_3
, lp_build_const_v4si(0));
1670 * sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
1672 LLVMValueRef sign_bit_1
= LLVMBuildXor(b
, sign_bit_i
, swap_sign_bit
, "sign_bit");
1675 * _PS_CONST(minus_cephes_DP1, -0.78515625);
1676 * _PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
1677 * _PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
1679 LLVMValueRef DP1
= lp_build_const_v4sf(-0.78515625);
1680 LLVMValueRef DP2
= lp_build_const_v4sf(-2.4187564849853515625e-4);
1681 LLVMValueRef DP3
= lp_build_const_v4sf(-3.77489497744594108e-8);
1684 * The magic pass: "Extended precision modular arithmetic"
1685 * x = ((x - y * DP1) - y * DP2) - y * DP3;
1686 * xmm1 = _mm_mul_ps(y, xmm1);
1687 * xmm2 = _mm_mul_ps(y, xmm2);
1688 * xmm3 = _mm_mul_ps(y, xmm3);
1690 LLVMValueRef xmm1
= LLVMBuildFMul(b
, y_2
, DP1
, "xmm1");
1691 LLVMValueRef xmm2
= LLVMBuildFMul(b
, y_2
, DP2
, "xmm2");
1692 LLVMValueRef xmm3
= LLVMBuildFMul(b
, y_2
, DP3
, "xmm3");
1695 * x = _mm_add_ps(x, xmm1);
1696 * x = _mm_add_ps(x, xmm2);
1697 * x = _mm_add_ps(x, xmm3);
1700 LLVMValueRef x_1
= LLVMBuildFAdd(b
, x_abs
, xmm1
, "x_1");
1701 LLVMValueRef x_2
= LLVMBuildFAdd(b
, x_1
, xmm2
, "x_2");
1702 LLVMValueRef x_3
= LLVMBuildFAdd(b
, x_2
, xmm3
, "x_3");
1705 * Evaluate the first polynom (0 <= x <= Pi/4)
1707 * z = _mm_mul_ps(x,x);
1709 LLVMValueRef z
= LLVMBuildFMul(b
, x_3
, x_3
, "z");
1712 * _PS_CONST(coscof_p0, 2.443315711809948E-005);
1713 * _PS_CONST(coscof_p1, -1.388731625493765E-003);
1714 * _PS_CONST(coscof_p2, 4.166664568298827E-002);
1716 LLVMValueRef coscof_p0
= lp_build_const_v4sf(2.443315711809948E-005);
1717 LLVMValueRef coscof_p1
= lp_build_const_v4sf(-1.388731625493765E-003);
1718 LLVMValueRef coscof_p2
= lp_build_const_v4sf(4.166664568298827E-002);
1721 * y = *(v4sf*)_ps_coscof_p0;
1722 * y = _mm_mul_ps(y, z);
1724 LLVMValueRef y_3
= LLVMBuildFMul(b
, z
, coscof_p0
, "y_3");
1725 LLVMValueRef y_4
= LLVMBuildFAdd(b
, y_3
, coscof_p1
, "y_4");
1726 LLVMValueRef y_5
= LLVMBuildFMul(b
, y_4
, z
, "y_5");
1727 LLVMValueRef y_6
= LLVMBuildFAdd(b
, y_5
, coscof_p2
, "y_6");
1728 LLVMValueRef y_7
= LLVMBuildFMul(b
, y_6
, z
, "y_7");
1729 LLVMValueRef y_8
= LLVMBuildFMul(b
, y_7
, z
, "y_8");
1733 * tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
1734 * y = _mm_sub_ps(y, tmp);
1735 * y = _mm_add_ps(y, *(v4sf*)_ps_1);
1737 LLVMValueRef half
= lp_build_const_v4sf(0.5);
1738 LLVMValueRef tmp
= LLVMBuildFMul(b
, z
, half
, "tmp");
1739 LLVMValueRef y_9
= LLVMBuildFSub(b
, y_8
, tmp
, "y_8");
1740 LLVMValueRef one
= lp_build_const_v4sf(1.0);
1741 LLVMValueRef y_10
= LLVMBuildFAdd(b
, y_9
, one
, "y_9");
1744 * _PS_CONST(sincof_p0, -1.9515295891E-4);
1745 * _PS_CONST(sincof_p1, 8.3321608736E-3);
1746 * _PS_CONST(sincof_p2, -1.6666654611E-1);
1748 LLVMValueRef sincof_p0
= lp_build_const_v4sf(-1.9515295891E-4);
1749 LLVMValueRef sincof_p1
= lp_build_const_v4sf(8.3321608736E-3);
1750 LLVMValueRef sincof_p2
= lp_build_const_v4sf(-1.6666654611E-1);
1753 * Evaluate the second polynom (Pi/4 <= x <= 0)
1755 * y2 = *(v4sf*)_ps_sincof_p0;
1756 * y2 = _mm_mul_ps(y2, z);
1757 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
1758 * y2 = _mm_mul_ps(y2, z);
1759 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
1760 * y2 = _mm_mul_ps(y2, z);
1761 * y2 = _mm_mul_ps(y2, x);
1762 * y2 = _mm_add_ps(y2, x);
1765 LLVMValueRef y2_3
= LLVMBuildFMul(b
, z
, sincof_p0
, "y2_3");
1766 LLVMValueRef y2_4
= LLVMBuildFAdd(b
, y2_3
, sincof_p1
, "y2_4");
1767 LLVMValueRef y2_5
= LLVMBuildFMul(b
, y2_4
, z
, "y2_5");
1768 LLVMValueRef y2_6
= LLVMBuildFAdd(b
, y2_5
, sincof_p2
, "y2_6");
1769 LLVMValueRef y2_7
= LLVMBuildFMul(b
, y2_6
, z
, "y2_7");
1770 LLVMValueRef y2_8
= LLVMBuildFMul(b
, y2_7
, x_3
, "y2_8");
1771 LLVMValueRef y2_9
= LLVMBuildFAdd(b
, y2_8
, x_3
, "y2_9");
1774 * select the correct result from the two polynoms
1776 * y2 = _mm_and_ps(xmm3, y2); //, xmm3);
1777 * y = _mm_andnot_ps(xmm3, y);
1778 * y = _mm_add_ps(y,y2);
1780 LLVMValueRef y2_i
= LLVMBuildBitCast(b
, y2_9
, v4si
, "y2_i");
1781 LLVMValueRef y_i
= LLVMBuildBitCast(b
, y_10
, v4si
, "y_i");
1782 LLVMValueRef y2_and
= LLVMBuildAnd(b
, y2_i
, poly_mask
, "y2_and");
1783 LLVMValueRef inv
= lp_build_const_v4si(~0);
1784 LLVMValueRef poly_mask_inv
= LLVMBuildXor(b
, poly_mask
, inv
, "poly_mask_inv");
1785 LLVMValueRef y_and
= LLVMBuildAnd(b
, y_i
, poly_mask_inv
, "y_and");
1786 LLVMValueRef y_combine
= LLVMBuildAdd(b
, y_and
, y2_and
, "y_combine");
1790 * y = _mm_xor_ps(y, sign_bit);
1792 LLVMValueRef y_sign
= LLVMBuildXor(b
, y_combine
, sign_bit_1
, "y_sin");
1793 LLVMValueRef y_result
= LLVMBuildBitCast(b
, y_sign
, v4sf
, "y_result");
1799 * Generate cos(a) using SSE2
1802 lp_build_cos(struct lp_build_context
*bld
,
1805 struct lp_type int_type
= lp_int_type(bld
->type
);
1806 LLVMBuilderRef b
= bld
->builder
;
1807 LLVMTypeRef v4sf
= LLVMVectorType(LLVMFloatType(), 4);
1808 LLVMTypeRef v4si
= LLVMVectorType(LLVMInt32Type(), 4);
1811 * take the absolute value,
1812 * x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
1815 LLVMValueRef inv_sig_mask
= lp_build_const_v4si(~0x80000000);
1816 LLVMValueRef a_v4si
= LLVMBuildBitCast(b
, a
, v4si
, "a_v4si");
1818 LLVMValueRef absi
= LLVMBuildAnd(b
, a_v4si
, inv_sig_mask
, "absi");
1819 LLVMValueRef x_abs
= LLVMBuildBitCast(b
, absi
, v4sf
, "x_abs");
1823 * y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
1826 LLVMValueRef FOPi
= lp_build_const_v4sf(1.27323954473516);
1827 LLVMValueRef scale_y
= LLVMBuildFMul(b
, x_abs
, FOPi
, "scale_y");
1830 * store the integer part of y in mm0
1831 * emm2 = _mm_cvttps_epi32(y);
1834 LLVMValueRef emm2_i
= LLVMBuildFPToSI(b
, scale_y
, v4si
, "emm2_i");
1837 * j=(j+1) & (~1) (see the cephes sources)
1838 * emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
1841 LLVMValueRef all_one
= lp_build_const_v4si(1);
1842 LLVMValueRef emm2_add
= LLVMBuildAdd(b
, emm2_i
, all_one
, "emm2_add");
1844 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
1846 LLVMValueRef inv_one
= lp_build_const_v4si(~1);
1847 LLVMValueRef emm2_and
= LLVMBuildAnd(b
, emm2_add
, inv_one
, "emm2_and");
1850 * y = _mm_cvtepi32_ps(emm2);
1852 LLVMValueRef y_2
= LLVMBuildSIToFP(b
, emm2_and
, v4sf
, "y_2");
1856 * emm2 = _mm_sub_epi32(emm2, *(v4si*)_pi32_2);
1858 LLVMValueRef const_2
= lp_build_const_v4si(2);
1859 LLVMValueRef emm2_2
= LLVMBuildSub(b
, emm2_and
, const_2
, "emm2_2");
1862 /* get the swap sign flag
1863 * emm0 = _mm_andnot_si128(emm2, *(v4si*)_pi32_4);
1865 LLVMValueRef inv
= lp_build_const_v4si(~0);
1866 LLVMValueRef emm0_not
= LLVMBuildXor(b
, emm2_2
, inv
, "emm0_not");
1867 LLVMValueRef pi32_4
= lp_build_const_v4si(4);
1868 LLVMValueRef emm0_and
= LLVMBuildAnd(b
, emm0_not
, pi32_4
, "emm0_and");
1871 * emm2 = _mm_slli_epi32(emm0, 29);
1873 LLVMValueRef const_29
= lp_build_const_v4si(29);
1874 LLVMValueRef sign_bit
= LLVMBuildShl(b
, emm0_and
, const_29
, "sign_bit");
1877 * get the polynom selection mask
1878 * there is one polynom for 0 <= x <= Pi/4
1879 * and another one for Pi/4<x<=Pi/2
1880 * Both branches will be computed.
1882 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
1883 * emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
1886 LLVMValueRef pi32_2
= lp_build_const_v4si(2);
1887 LLVMValueRef emm2_3
= LLVMBuildAnd(b
, emm2_2
, pi32_2
, "emm2_3");
1888 LLVMValueRef poly_mask
= lp_build_compare(b
, int_type
, PIPE_FUNC_EQUAL
,
1889 emm2_3
, lp_build_const_v4si(0));
1892 * _PS_CONST(minus_cephes_DP1, -0.78515625);
1893 * _PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
1894 * _PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
1896 LLVMValueRef DP1
= lp_build_const_v4sf(-0.78515625);
1897 LLVMValueRef DP2
= lp_build_const_v4sf(-2.4187564849853515625e-4);
1898 LLVMValueRef DP3
= lp_build_const_v4sf(-3.77489497744594108e-8);
1901 * The magic pass: "Extended precision modular arithmetic"
1902 * x = ((x - y * DP1) - y * DP2) - y * DP3;
1903 * xmm1 = _mm_mul_ps(y, xmm1);
1904 * xmm2 = _mm_mul_ps(y, xmm2);
1905 * xmm3 = _mm_mul_ps(y, xmm3);
1907 LLVMValueRef xmm1
= LLVMBuildFMul(b
, y_2
, DP1
, "xmm1");
1908 LLVMValueRef xmm2
= LLVMBuildFMul(b
, y_2
, DP2
, "xmm2");
1909 LLVMValueRef xmm3
= LLVMBuildFMul(b
, y_2
, DP3
, "xmm3");
1912 * x = _mm_add_ps(x, xmm1);
1913 * x = _mm_add_ps(x, xmm2);
1914 * x = _mm_add_ps(x, xmm3);
1917 LLVMValueRef x_1
= LLVMBuildFAdd(b
, x_abs
, xmm1
, "x_1");
1918 LLVMValueRef x_2
= LLVMBuildFAdd(b
, x_1
, xmm2
, "x_2");
1919 LLVMValueRef x_3
= LLVMBuildFAdd(b
, x_2
, xmm3
, "x_3");
1922 * Evaluate the first polynom (0 <= x <= Pi/4)
1924 * z = _mm_mul_ps(x,x);
1926 LLVMValueRef z
= LLVMBuildFMul(b
, x_3
, x_3
, "z");
1929 * _PS_CONST(coscof_p0, 2.443315711809948E-005);
1930 * _PS_CONST(coscof_p1, -1.388731625493765E-003);
1931 * _PS_CONST(coscof_p2, 4.166664568298827E-002);
1933 LLVMValueRef coscof_p0
= lp_build_const_v4sf(2.443315711809948E-005);
1934 LLVMValueRef coscof_p1
= lp_build_const_v4sf(-1.388731625493765E-003);
1935 LLVMValueRef coscof_p2
= lp_build_const_v4sf(4.166664568298827E-002);
1938 * y = *(v4sf*)_ps_coscof_p0;
1939 * y = _mm_mul_ps(y, z);
1941 LLVMValueRef y_3
= LLVMBuildFMul(b
, z
, coscof_p0
, "y_3");
1942 LLVMValueRef y_4
= LLVMBuildFAdd(b
, y_3
, coscof_p1
, "y_4");
1943 LLVMValueRef y_5
= LLVMBuildFMul(b
, y_4
, z
, "y_5");
1944 LLVMValueRef y_6
= LLVMBuildFAdd(b
, y_5
, coscof_p2
, "y_6");
1945 LLVMValueRef y_7
= LLVMBuildFMul(b
, y_6
, z
, "y_7");
1946 LLVMValueRef y_8
= LLVMBuildFMul(b
, y_7
, z
, "y_8");
1950 * tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
1951 * y = _mm_sub_ps(y, tmp);
1952 * y = _mm_add_ps(y, *(v4sf*)_ps_1);
1954 LLVMValueRef half
= lp_build_const_v4sf(0.5);
1955 LLVMValueRef tmp
= LLVMBuildFMul(b
, z
, half
, "tmp");
1956 LLVMValueRef y_9
= LLVMBuildFSub(b
, y_8
, tmp
, "y_8");
1957 LLVMValueRef one
= lp_build_const_v4sf(1.0);
1958 LLVMValueRef y_10
= LLVMBuildFAdd(b
, y_9
, one
, "y_9");
1961 * _PS_CONST(sincof_p0, -1.9515295891E-4);
1962 * _PS_CONST(sincof_p1, 8.3321608736E-3);
1963 * _PS_CONST(sincof_p2, -1.6666654611E-1);
1965 LLVMValueRef sincof_p0
= lp_build_const_v4sf(-1.9515295891E-4);
1966 LLVMValueRef sincof_p1
= lp_build_const_v4sf(8.3321608736E-3);
1967 LLVMValueRef sincof_p2
= lp_build_const_v4sf(-1.6666654611E-1);
1970 * Evaluate the second polynom (Pi/4 <= x <= 0)
1972 * y2 = *(v4sf*)_ps_sincof_p0;
1973 * y2 = _mm_mul_ps(y2, z);
1974 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
1975 * y2 = _mm_mul_ps(y2, z);
1976 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
1977 * y2 = _mm_mul_ps(y2, z);
1978 * y2 = _mm_mul_ps(y2, x);
1979 * y2 = _mm_add_ps(y2, x);
1982 LLVMValueRef y2_3
= LLVMBuildFMul(b
, z
, sincof_p0
, "y2_3");
1983 LLVMValueRef y2_4
= LLVMBuildFAdd(b
, y2_3
, sincof_p1
, "y2_4");
1984 LLVMValueRef y2_5
= LLVMBuildFMul(b
, y2_4
, z
, "y2_5");
1985 LLVMValueRef y2_6
= LLVMBuildFAdd(b
, y2_5
, sincof_p2
, "y2_6");
1986 LLVMValueRef y2_7
= LLVMBuildFMul(b
, y2_6
, z
, "y2_7");
1987 LLVMValueRef y2_8
= LLVMBuildFMul(b
, y2_7
, x_3
, "y2_8");
1988 LLVMValueRef y2_9
= LLVMBuildFAdd(b
, y2_8
, x_3
, "y2_9");
1991 * select the correct result from the two polynoms
1993 * y2 = _mm_and_ps(xmm3, y2); //, xmm3);
1994 * y = _mm_andnot_ps(xmm3, y);
1995 * y = _mm_add_ps(y,y2);
1997 LLVMValueRef y2_i
= LLVMBuildBitCast(b
, y2_9
, v4si
, "y2_i");
1998 LLVMValueRef y_i
= LLVMBuildBitCast(b
, y_10
, v4si
, "y_i");
1999 LLVMValueRef y2_and
= LLVMBuildAnd(b
, y2_i
, poly_mask
, "y2_and");
2000 LLVMValueRef poly_mask_inv
= LLVMBuildXor(b
, poly_mask
, inv
, "poly_mask_inv");
2001 LLVMValueRef y_and
= LLVMBuildAnd(b
, y_i
, poly_mask_inv
, "y_and");
2002 LLVMValueRef y_combine
= LLVMBuildAdd(b
, y_and
, y2_and
, "y_combine");
2006 * y = _mm_xor_ps(y, sign_bit);
2008 LLVMValueRef y_sign
= LLVMBuildXor(b
, y_combine
, sign_bit
, "y_sin");
2009 LLVMValueRef y_result
= LLVMBuildBitCast(b
, y_sign
, v4sf
, "y_result");
2015 * Generate pow(x, y)
2018 lp_build_pow(struct lp_build_context
*bld
,
2022 /* TODO: optimize the constant case */
2023 if (gallivm_debug
& GALLIVM_DEBUG_PERF
&&
2024 LLVMIsConstant(x
) && LLVMIsConstant(y
)) {
2025 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
2029 return lp_build_exp2(bld
, lp_build_mul(bld
, lp_build_log2(bld
, x
), y
));
2037 lp_build_exp(struct lp_build_context
*bld
,
2040 /* log2(e) = 1/log(2) */
2041 LLVMValueRef log2e
= lp_build_const_vec(bld
->type
, 1.4426950408889634);
2043 assert(lp_check_value(bld
->type
, x
));
2045 return lp_build_mul(bld
, log2e
, lp_build_exp2(bld
, x
));
2053 lp_build_log(struct lp_build_context
*bld
,
2057 LLVMValueRef log2
= lp_build_const_vec(bld
->type
, 0.69314718055994529);
2059 assert(lp_check_value(bld
->type
, x
));
2061 return lp_build_mul(bld
, log2
, lp_build_exp2(bld
, x
));
2066 * Generate polynomial.
2067 * Ex: coeffs[0] + x * coeffs[1] + x^2 * coeffs[2].
2070 lp_build_polynomial(struct lp_build_context
*bld
,
2072 const double *coeffs
,
2073 unsigned num_coeffs
)
2075 const struct lp_type type
= bld
->type
;
2076 LLVMValueRef res
= NULL
;
2079 assert(lp_check_value(bld
->type
, x
));
2081 /* TODO: optimize the constant case */
2082 if (gallivm_debug
& GALLIVM_DEBUG_PERF
&&
2083 LLVMIsConstant(x
)) {
2084 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
2088 for (i
= num_coeffs
; i
--; ) {
2091 coeff
= lp_build_const_vec(type
, coeffs
[i
]);
2094 res
= lp_build_add(bld
, coeff
, lp_build_mul(bld
, x
, res
));
2107 * Minimax polynomial fit of 2**x, in range [0, 1[
2109 const double lp_build_exp2_polynomial
[] = {
2110 #if EXP_POLY_DEGREE == 5
2111 0.999999999690134838155,
2112 0.583974334321735217258,
2113 0.164553105719676828492,
2114 0.0292811063701710962255,
2115 0.00354944426657875141846,
2116 0.000296253726543423377365
2117 #elif EXP_POLY_DEGREE == 4
2118 1.00000001502262084505,
2119 0.563586057338685991394,
2120 0.150436017652442413623,
2121 0.0243220604213317927308,
2122 0.0025359088446580436489
2123 #elif EXP_POLY_DEGREE == 3
2124 0.999925218562710312959,
2125 0.695833540494823811697,
2126 0.226067155427249155588,
2127 0.0780245226406372992967
2128 #elif EXP_POLY_DEGREE == 2
2129 1.00172476321474503578,
2130 0.657636275736077639316,
2131 0.33718943461968720704
2139 lp_build_exp2_approx(struct lp_build_context
*bld
,
2141 LLVMValueRef
*p_exp2_int_part
,
2142 LLVMValueRef
*p_frac_part
,
2143 LLVMValueRef
*p_exp2
)
2145 const struct lp_type type
= bld
->type
;
2146 LLVMTypeRef vec_type
= lp_build_vec_type(type
);
2147 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(type
);
2148 LLVMValueRef ipart
= NULL
;
2149 LLVMValueRef fpart
= NULL
;
2150 LLVMValueRef expipart
= NULL
;
2151 LLVMValueRef expfpart
= NULL
;
2152 LLVMValueRef res
= NULL
;
2154 assert(lp_check_value(bld
->type
, x
));
2156 if(p_exp2_int_part
|| p_frac_part
|| p_exp2
) {
2157 /* TODO: optimize the constant case */
2158 if (gallivm_debug
& GALLIVM_DEBUG_PERF
&&
2159 LLVMIsConstant(x
)) {
2160 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
2164 assert(type
.floating
&& type
.width
== 32);
2166 x
= lp_build_min(bld
, x
, lp_build_const_vec(type
, 129.0));
2167 x
= lp_build_max(bld
, x
, lp_build_const_vec(type
, -126.99999));
2169 /* ipart = floor(x) */
2170 ipart
= lp_build_floor(bld
, x
);
2172 /* fpart = x - ipart */
2173 fpart
= LLVMBuildFSub(bld
->builder
, x
, ipart
, "");
2176 if(p_exp2_int_part
|| p_exp2
) {
2177 /* expipart = (float) (1 << ipart) */
2178 ipart
= LLVMBuildFPToSI(bld
->builder
, ipart
, int_vec_type
, "");
2179 expipart
= LLVMBuildAdd(bld
->builder
, ipart
, lp_build_const_int_vec(type
, 127), "");
2180 expipart
= LLVMBuildShl(bld
->builder
, expipart
, lp_build_const_int_vec(type
, 23), "");
2181 expipart
= LLVMBuildBitCast(bld
->builder
, expipart
, vec_type
, "");
2185 expfpart
= lp_build_polynomial(bld
, fpart
, lp_build_exp2_polynomial
,
2186 Elements(lp_build_exp2_polynomial
));
2188 res
= LLVMBuildFMul(bld
->builder
, expipart
, expfpart
, "");
2192 *p_exp2_int_part
= expipart
;
2195 *p_frac_part
= fpart
;
2203 lp_build_exp2(struct lp_build_context
*bld
,
2207 lp_build_exp2_approx(bld
, x
, NULL
, NULL
, &res
);
2213 * Minimax polynomial fit of log2(x)/(x - 1), for x in range [1, 2[
2214 * These coefficients can be generate with
2215 * http://www.boost.org/doc/libs/1_36_0/libs/math/doc/sf_and_dist/html/math_toolkit/toolkit/internals2/minimax.html
2217 const double lp_build_log2_polynomial
[] = {
2218 #if LOG_POLY_DEGREE == 6
2219 3.11578814719469302614,
2220 -3.32419399085241980044,
2221 2.59883907202499966007,
2222 -1.23152682416275988241,
2223 0.318212422185251071475,
2224 -0.0344359067839062357313
2225 #elif LOG_POLY_DEGREE == 5
2226 2.8882704548164776201,
2227 -2.52074962577807006663,
2228 1.48116647521213171641,
2229 -0.465725644288844778798,
2230 0.0596515482674574969533
2231 #elif LOG_POLY_DEGREE == 4
2232 2.61761038894603480148,
2233 -1.75647175389045657003,
2234 0.688243882994381274313,
2235 -0.107254423828329604454
2236 #elif LOG_POLY_DEGREE == 3
2237 2.28330284476918490682,
2238 -1.04913055217340124191,
2239 0.204446009836232697516
2247 * See http://www.devmaster.net/forums/showthread.php?p=43580
2250 lp_build_log2_approx(struct lp_build_context
*bld
,
2252 LLVMValueRef
*p_exp
,
2253 LLVMValueRef
*p_floor_log2
,
2254 LLVMValueRef
*p_log2
)
2256 const struct lp_type type
= bld
->type
;
2257 LLVMTypeRef vec_type
= lp_build_vec_type(type
);
2258 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(type
);
2260 LLVMValueRef expmask
= lp_build_const_int_vec(type
, 0x7f800000);
2261 LLVMValueRef mantmask
= lp_build_const_int_vec(type
, 0x007fffff);
2262 LLVMValueRef one
= LLVMConstBitCast(bld
->one
, int_vec_type
);
2264 LLVMValueRef i
= NULL
;
2265 LLVMValueRef exp
= NULL
;
2266 LLVMValueRef mant
= NULL
;
2267 LLVMValueRef logexp
= NULL
;
2268 LLVMValueRef logmant
= NULL
;
2269 LLVMValueRef res
= NULL
;
2271 assert(lp_check_value(bld
->type
, x
));
2273 if(p_exp
|| p_floor_log2
|| p_log2
) {
2274 /* TODO: optimize the constant case */
2275 if (gallivm_debug
& GALLIVM_DEBUG_PERF
&&
2276 LLVMIsConstant(x
)) {
2277 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
2281 assert(type
.floating
&& type
.width
== 32);
2283 i
= LLVMBuildBitCast(bld
->builder
, x
, int_vec_type
, "");
2285 /* exp = (float) exponent(x) */
2286 exp
= LLVMBuildAnd(bld
->builder
, i
, expmask
, "");
2289 if(p_floor_log2
|| p_log2
) {
2290 logexp
= LLVMBuildLShr(bld
->builder
, exp
, lp_build_const_int_vec(type
, 23), "");
2291 logexp
= LLVMBuildSub(bld
->builder
, logexp
, lp_build_const_int_vec(type
, 127), "");
2292 logexp
= LLVMBuildSIToFP(bld
->builder
, logexp
, vec_type
, "");
2296 /* mant = (float) mantissa(x) */
2297 mant
= LLVMBuildAnd(bld
->builder
, i
, mantmask
, "");
2298 mant
= LLVMBuildOr(bld
->builder
, mant
, one
, "");
2299 mant
= LLVMBuildBitCast(bld
->builder
, mant
, vec_type
, "");
2301 logmant
= lp_build_polynomial(bld
, mant
, lp_build_log2_polynomial
,
2302 Elements(lp_build_log2_polynomial
));
2304 /* This effectively increases the polynomial degree by one, but ensures that log2(1) == 0*/
2305 logmant
= LLVMBuildFMul(bld
->builder
, logmant
, LLVMBuildFSub(bld
->builder
, mant
, bld
->one
, ""), "");
2307 res
= LLVMBuildFAdd(bld
->builder
, logmant
, logexp
, "");
2311 exp
= LLVMBuildBitCast(bld
->builder
, exp
, vec_type
, "");
2316 *p_floor_log2
= logexp
;
2324 lp_build_log2(struct lp_build_context
*bld
,
2328 lp_build_log2_approx(bld
, x
, NULL
, NULL
, &res
);
2334 * Faster (and less accurate) log2.
2336 * log2(x) = floor(log2(x)) + frac(x)
2338 * See http://www.flipcode.com/archives/Fast_log_Function.shtml
2341 lp_build_fast_log2(struct lp_build_context
*bld
,
2344 const struct lp_type type
= bld
->type
;
2345 LLVMTypeRef vec_type
= bld
->vec_type
;
2346 LLVMTypeRef int_vec_type
= bld
->int_vec_type
;
2348 unsigned mantissa
= lp_mantissa(type
);
2349 LLVMValueRef mantmask
= lp_build_const_int_vec(type
, (1ULL << mantissa
) - 1);
2350 LLVMValueRef one
= LLVMConstBitCast(bld
->one
, int_vec_type
);
2355 assert(lp_check_value(bld
->type
, x
));
2357 assert(type
.floating
);
2359 x
= LLVMBuildBitCast(bld
->builder
, x
, int_vec_type
, "");
2361 /* ipart = floor(log2(x)) - 1 */
2362 ipart
= LLVMBuildLShr(bld
->builder
, x
, lp_build_const_int_vec(type
, mantissa
), "");
2363 ipart
= LLVMBuildAnd(bld
->builder
, ipart
, lp_build_const_int_vec(type
, 255), "");
2364 ipart
= LLVMBuildSub(bld
->builder
, ipart
, lp_build_const_int_vec(type
, 128), "");
2365 ipart
= LLVMBuildSIToFP(bld
->builder
, ipart
, vec_type
, "");
2367 /* fpart = 1.0 + frac(x) */
2368 fpart
= LLVMBuildAnd(bld
->builder
, x
, mantmask
, "");
2369 fpart
= LLVMBuildOr(bld
->builder
, fpart
, one
, "");
2370 fpart
= LLVMBuildBitCast(bld
->builder
, fpart
, vec_type
, "");
2372 /* floor(log2(x)) + frac(x) */
2373 return LLVMBuildFAdd(bld
->builder
, ipart
, fpart
, "");
2378 * Fast implementation of iround(log2(x)).
2380 * Not an approximation -- it should give accurate results all the time.
2383 lp_build_ilog2(struct lp_build_context
*bld
,
2386 const struct lp_type type
= bld
->type
;
2387 LLVMTypeRef int_vec_type
= bld
->int_vec_type
;
2389 unsigned mantissa
= lp_mantissa(type
);
2390 LLVMValueRef sqrt2
= lp_build_const_vec(type
, 1.4142135623730951);
2394 assert(lp_check_value(bld
->type
, x
));
2396 assert(type
.floating
);
2398 /* x * 2^(0.5) i.e., add 0.5 to the log2(x) */
2399 x
= LLVMBuildFMul(bld
->builder
, x
, sqrt2
, "");
2401 x
= LLVMBuildBitCast(bld
->builder
, x
, int_vec_type
, "");
2403 /* ipart = floor(log2(x) + 0.5) */
2404 ipart
= LLVMBuildLShr(bld
->builder
, x
, lp_build_const_int_vec(type
, mantissa
), "");
2405 ipart
= LLVMBuildAnd(bld
->builder
, ipart
, lp_build_const_int_vec(type
, 255), "");
2406 ipart
= LLVMBuildSub(bld
->builder
, ipart
, lp_build_const_int_vec(type
, 127), "");