gallivm: finish implementation of lp_build_iceil()
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_arit.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper
32 *
33 * LLVM IR doesn't support all basic arithmetic operations we care about (most
34 * notably min/max and saturated operations), and it is often necessary to
35 * resort machine-specific intrinsics directly. The functions here hide all
36 * these implementation details from the other modules.
37 *
38 * We also do simple expressions simplification here. Reasons are:
39 * - it is very easy given we have all necessary information readily available
40 * - LLVM optimization passes fail to simplify several vector expressions
41 * - We often know value constraints which the optimization passes have no way
42 * of knowing, such as when source arguments are known to be in [0, 1] range.
43 *
44 * @author Jose Fonseca <jfonseca@vmware.com>
45 */
46
47
48 #include "util/u_memory.h"
49 #include "util/u_debug.h"
50 #include "util/u_math.h"
51 #include "util/u_string.h"
52 #include "util/u_cpu_detect.h"
53
54 #include "lp_bld_type.h"
55 #include "lp_bld_const.h"
56 #include "lp_bld_intr.h"
57 #include "lp_bld_logic.h"
58 #include "lp_bld_pack.h"
59 #include "lp_bld_debug.h"
60 #include "lp_bld_arit.h"
61
62
63 /**
64 * Generate min(a, b)
65 * No checks for special case values of a or b = 1 or 0 are done.
66 */
67 static LLVMValueRef
68 lp_build_min_simple(struct lp_build_context *bld,
69 LLVMValueRef a,
70 LLVMValueRef b)
71 {
72 const struct lp_type type = bld->type;
73 const char *intrinsic = NULL;
74 LLVMValueRef cond;
75
76 /* TODO: optimize the constant case */
77
78 if(type.width * type.length == 128) {
79 if(type.floating) {
80 if(type.width == 32 && util_cpu_caps.has_sse)
81 intrinsic = "llvm.x86.sse.min.ps";
82 if(type.width == 64 && util_cpu_caps.has_sse2)
83 intrinsic = "llvm.x86.sse2.min.pd";
84 }
85 else {
86 if(type.width == 8 && !type.sign && util_cpu_caps.has_sse2)
87 intrinsic = "llvm.x86.sse2.pminu.b";
88 if(type.width == 8 && type.sign && util_cpu_caps.has_sse4_1)
89 intrinsic = "llvm.x86.sse41.pminsb";
90 if(type.width == 16 && !type.sign && util_cpu_caps.has_sse4_1)
91 intrinsic = "llvm.x86.sse41.pminuw";
92 if(type.width == 16 && type.sign && util_cpu_caps.has_sse2)
93 intrinsic = "llvm.x86.sse2.pmins.w";
94 if(type.width == 32 && !type.sign && util_cpu_caps.has_sse4_1)
95 intrinsic = "llvm.x86.sse41.pminud";
96 if(type.width == 32 && type.sign && util_cpu_caps.has_sse4_1)
97 intrinsic = "llvm.x86.sse41.pminsd";
98 }
99 }
100
101 if(intrinsic)
102 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
103
104 cond = lp_build_cmp(bld, PIPE_FUNC_LESS, a, b);
105 return lp_build_select(bld, cond, a, b);
106 }
107
108
109 /**
110 * Generate max(a, b)
111 * No checks for special case values of a or b = 1 or 0 are done.
112 */
113 static LLVMValueRef
114 lp_build_max_simple(struct lp_build_context *bld,
115 LLVMValueRef a,
116 LLVMValueRef b)
117 {
118 const struct lp_type type = bld->type;
119 const char *intrinsic = NULL;
120 LLVMValueRef cond;
121
122 /* TODO: optimize the constant case */
123
124 if(type.width * type.length == 128) {
125 if(type.floating) {
126 if(type.width == 32 && util_cpu_caps.has_sse)
127 intrinsic = "llvm.x86.sse.max.ps";
128 if(type.width == 64 && util_cpu_caps.has_sse2)
129 intrinsic = "llvm.x86.sse2.max.pd";
130 }
131 else {
132 if(type.width == 8 && !type.sign && util_cpu_caps.has_sse2)
133 intrinsic = "llvm.x86.sse2.pmaxu.b";
134 if(type.width == 8 && type.sign && util_cpu_caps.has_sse4_1)
135 intrinsic = "llvm.x86.sse41.pmaxsb";
136 if(type.width == 16 && !type.sign && util_cpu_caps.has_sse4_1)
137 intrinsic = "llvm.x86.sse41.pmaxuw";
138 if(type.width == 16 && type.sign && util_cpu_caps.has_sse2)
139 intrinsic = "llvm.x86.sse2.pmaxs.w";
140 if(type.width == 32 && !type.sign && util_cpu_caps.has_sse4_1)
141 intrinsic = "llvm.x86.sse41.pmaxud";
142 if(type.width == 32 && type.sign && util_cpu_caps.has_sse4_1)
143 intrinsic = "llvm.x86.sse41.pmaxsd";
144 }
145 }
146
147 if(intrinsic)
148 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
149
150 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, b);
151 return lp_build_select(bld, cond, a, b);
152 }
153
154
155 /**
156 * Generate 1 - a, or ~a depending on bld->type.
157 */
158 LLVMValueRef
159 lp_build_comp(struct lp_build_context *bld,
160 LLVMValueRef a)
161 {
162 const struct lp_type type = bld->type;
163
164 if(a == bld->one)
165 return bld->zero;
166 if(a == bld->zero)
167 return bld->one;
168
169 if(type.norm && !type.floating && !type.fixed && !type.sign) {
170 if(LLVMIsConstant(a))
171 return LLVMConstNot(a);
172 else
173 return LLVMBuildNot(bld->builder, a, "");
174 }
175
176 if(LLVMIsConstant(a))
177 return LLVMConstSub(bld->one, a);
178 else
179 return LLVMBuildSub(bld->builder, bld->one, a, "");
180 }
181
182
183 /**
184 * Generate a + b
185 */
186 LLVMValueRef
187 lp_build_add(struct lp_build_context *bld,
188 LLVMValueRef a,
189 LLVMValueRef b)
190 {
191 const struct lp_type type = bld->type;
192 LLVMValueRef res;
193
194 if(a == bld->zero)
195 return b;
196 if(b == bld->zero)
197 return a;
198 if(a == bld->undef || b == bld->undef)
199 return bld->undef;
200
201 if(bld->type.norm) {
202 const char *intrinsic = NULL;
203
204 if(a == bld->one || b == bld->one)
205 return bld->one;
206
207 if(util_cpu_caps.has_sse2 &&
208 type.width * type.length == 128 &&
209 !type.floating && !type.fixed) {
210 if(type.width == 8)
211 intrinsic = type.sign ? "llvm.x86.sse2.padds.b" : "llvm.x86.sse2.paddus.b";
212 if(type.width == 16)
213 intrinsic = type.sign ? "llvm.x86.sse2.padds.w" : "llvm.x86.sse2.paddus.w";
214 }
215
216 if(intrinsic)
217 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
218 }
219
220 if(LLVMIsConstant(a) && LLVMIsConstant(b))
221 res = LLVMConstAdd(a, b);
222 else
223 res = LLVMBuildAdd(bld->builder, a, b, "");
224
225 /* clamp to ceiling of 1.0 */
226 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
227 res = lp_build_min_simple(bld, res, bld->one);
228
229 /* XXX clamp to floor of -1 or 0??? */
230
231 return res;
232 }
233
234
235 /** Return the sum of the elements of a */
236 LLVMValueRef
237 lp_build_sum_vector(struct lp_build_context *bld,
238 LLVMValueRef a)
239 {
240 const struct lp_type type = bld->type;
241 LLVMValueRef index, res;
242 unsigned i;
243
244 if (a == bld->zero)
245 return bld->zero;
246 if (a == bld->undef)
247 return bld->undef;
248 assert(type.length > 1);
249
250 assert(!bld->type.norm);
251
252 index = LLVMConstInt(LLVMInt32Type(), 0, 0);
253 res = LLVMBuildExtractElement(bld->builder, a, index, "");
254
255 for (i = 1; i < type.length; i++) {
256 index = LLVMConstInt(LLVMInt32Type(), i, 0);
257 res = LLVMBuildAdd(bld->builder, res,
258 LLVMBuildExtractElement(bld->builder, a, index, ""),
259 "");
260 }
261
262 return res;
263 }
264
265
266 /**
267 * Generate a - b
268 */
269 LLVMValueRef
270 lp_build_sub(struct lp_build_context *bld,
271 LLVMValueRef a,
272 LLVMValueRef b)
273 {
274 const struct lp_type type = bld->type;
275 LLVMValueRef res;
276
277 if(b == bld->zero)
278 return a;
279 if(a == bld->undef || b == bld->undef)
280 return bld->undef;
281 if(a == b)
282 return bld->zero;
283
284 if(bld->type.norm) {
285 const char *intrinsic = NULL;
286
287 if(b == bld->one)
288 return bld->zero;
289
290 if(util_cpu_caps.has_sse2 &&
291 type.width * type.length == 128 &&
292 !type.floating && !type.fixed) {
293 if(type.width == 8)
294 intrinsic = type.sign ? "llvm.x86.sse2.psubs.b" : "llvm.x86.sse2.psubus.b";
295 if(type.width == 16)
296 intrinsic = type.sign ? "llvm.x86.sse2.psubs.w" : "llvm.x86.sse2.psubus.w";
297 }
298
299 if(intrinsic)
300 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
301 }
302
303 if(LLVMIsConstant(a) && LLVMIsConstant(b))
304 res = LLVMConstSub(a, b);
305 else
306 res = LLVMBuildSub(bld->builder, a, b, "");
307
308 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
309 res = lp_build_max_simple(bld, res, bld->zero);
310
311 return res;
312 }
313
314
315 /**
316 * Normalized 8bit multiplication.
317 *
318 * - alpha plus one
319 *
320 * makes the following approximation to the division (Sree)
321 *
322 * a*b/255 ~= (a*(b + 1)) >> 256
323 *
324 * which is the fastest method that satisfies the following OpenGL criteria
325 *
326 * 0*0 = 0 and 255*255 = 255
327 *
328 * - geometric series
329 *
330 * takes the geometric series approximation to the division
331 *
332 * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
333 *
334 * in this case just the first two terms to fit in 16bit arithmetic
335 *
336 * t/255 ~= (t + (t >> 8)) >> 8
337 *
338 * note that just by itself it doesn't satisfies the OpenGL criteria, as
339 * 255*255 = 254, so the special case b = 255 must be accounted or roundoff
340 * must be used
341 *
342 * - geometric series plus rounding
343 *
344 * when using a geometric series division instead of truncating the result
345 * use roundoff in the approximation (Jim Blinn)
346 *
347 * t/255 ~= (t + (t >> 8) + 0x80) >> 8
348 *
349 * achieving the exact results
350 *
351 * @sa Alvy Ray Smith, Image Compositing Fundamentals, Tech Memo 4, Aug 15, 1995,
352 * ftp://ftp.alvyray.com/Acrobat/4_Comp.pdf
353 * @sa Michael Herf, The "double blend trick", May 2000,
354 * http://www.stereopsis.com/doubleblend.html
355 */
356 static LLVMValueRef
357 lp_build_mul_u8n(LLVMBuilderRef builder,
358 struct lp_type i16_type,
359 LLVMValueRef a, LLVMValueRef b)
360 {
361 LLVMValueRef c8;
362 LLVMValueRef ab;
363
364 c8 = lp_build_const_int_vec(i16_type, 8);
365
366 #if 0
367
368 /* a*b/255 ~= (a*(b + 1)) >> 256 */
369 b = LLVMBuildAdd(builder, b, lp_build_const_int_vec(i16_type, 1), "");
370 ab = LLVMBuildMul(builder, a, b, "");
371
372 #else
373
374 /* ab/255 ~= (ab + (ab >> 8) + 0x80) >> 8 */
375 ab = LLVMBuildMul(builder, a, b, "");
376 ab = LLVMBuildAdd(builder, ab, LLVMBuildLShr(builder, ab, c8, ""), "");
377 ab = LLVMBuildAdd(builder, ab, lp_build_const_int_vec(i16_type, 0x80), "");
378
379 #endif
380
381 ab = LLVMBuildLShr(builder, ab, c8, "");
382
383 return ab;
384 }
385
386
387 /**
388 * Generate a * b
389 */
390 LLVMValueRef
391 lp_build_mul(struct lp_build_context *bld,
392 LLVMValueRef a,
393 LLVMValueRef b)
394 {
395 const struct lp_type type = bld->type;
396 LLVMValueRef shift;
397 LLVMValueRef res;
398
399 if(a == bld->zero)
400 return bld->zero;
401 if(a == bld->one)
402 return b;
403 if(b == bld->zero)
404 return bld->zero;
405 if(b == bld->one)
406 return a;
407 if(a == bld->undef || b == bld->undef)
408 return bld->undef;
409
410 if(!type.floating && !type.fixed && type.norm) {
411 if(type.width == 8) {
412 struct lp_type i16_type = lp_wider_type(type);
413 LLVMValueRef al, ah, bl, bh, abl, abh, ab;
414
415 lp_build_unpack2(bld->builder, type, i16_type, a, &al, &ah);
416 lp_build_unpack2(bld->builder, type, i16_type, b, &bl, &bh);
417
418 /* PMULLW, PSRLW, PADDW */
419 abl = lp_build_mul_u8n(bld->builder, i16_type, al, bl);
420 abh = lp_build_mul_u8n(bld->builder, i16_type, ah, bh);
421
422 ab = lp_build_pack2(bld->builder, i16_type, type, abl, abh);
423
424 return ab;
425 }
426
427 /* FIXME */
428 assert(0);
429 }
430
431 if(type.fixed)
432 shift = lp_build_const_int_vec(type, type.width/2);
433 else
434 shift = NULL;
435
436 if(LLVMIsConstant(a) && LLVMIsConstant(b)) {
437 res = LLVMConstMul(a, b);
438 if(shift) {
439 if(type.sign)
440 res = LLVMConstAShr(res, shift);
441 else
442 res = LLVMConstLShr(res, shift);
443 }
444 }
445 else {
446 res = LLVMBuildMul(bld->builder, a, b, "");
447 if(shift) {
448 if(type.sign)
449 res = LLVMBuildAShr(bld->builder, res, shift, "");
450 else
451 res = LLVMBuildLShr(bld->builder, res, shift, "");
452 }
453 }
454
455 return res;
456 }
457
458
459 /**
460 * Small vector x scale multiplication optimization.
461 */
462 LLVMValueRef
463 lp_build_mul_imm(struct lp_build_context *bld,
464 LLVMValueRef a,
465 int b)
466 {
467 LLVMValueRef factor;
468
469 if(b == 0)
470 return bld->zero;
471
472 if(b == 1)
473 return a;
474
475 if(b == -1)
476 return LLVMBuildNeg(bld->builder, a, "");
477
478 if(b == 2 && bld->type.floating)
479 return lp_build_add(bld, a, a);
480
481 if(util_is_pot(b)) {
482 unsigned shift = ffs(b) - 1;
483
484 if(bld->type.floating) {
485 #if 0
486 /*
487 * Power of two multiplication by directly manipulating the mantissa.
488 *
489 * XXX: This might not be always faster, it will introduce a small error
490 * for multiplication by zero, and it will produce wrong results
491 * for Inf and NaN.
492 */
493 unsigned mantissa = lp_mantissa(bld->type);
494 factor = lp_build_const_int_vec(bld->type, (unsigned long long)shift << mantissa);
495 a = LLVMBuildBitCast(bld->builder, a, lp_build_int_vec_type(bld->type), "");
496 a = LLVMBuildAdd(bld->builder, a, factor, "");
497 a = LLVMBuildBitCast(bld->builder, a, lp_build_vec_type(bld->type), "");
498 return a;
499 #endif
500 }
501 else {
502 factor = lp_build_const_vec(bld->type, shift);
503 return LLVMBuildShl(bld->builder, a, factor, "");
504 }
505 }
506
507 factor = lp_build_const_vec(bld->type, (double)b);
508 return lp_build_mul(bld, a, factor);
509 }
510
511
512 /**
513 * Generate a / b
514 */
515 LLVMValueRef
516 lp_build_div(struct lp_build_context *bld,
517 LLVMValueRef a,
518 LLVMValueRef b)
519 {
520 const struct lp_type type = bld->type;
521
522 if(a == bld->zero)
523 return bld->zero;
524 if(a == bld->one)
525 return lp_build_rcp(bld, b);
526 if(b == bld->zero)
527 return bld->undef;
528 if(b == bld->one)
529 return a;
530 if(a == bld->undef || b == bld->undef)
531 return bld->undef;
532
533 if(LLVMIsConstant(a) && LLVMIsConstant(b))
534 return LLVMConstFDiv(a, b);
535
536 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
537 return lp_build_mul(bld, a, lp_build_rcp(bld, b));
538
539 return LLVMBuildFDiv(bld->builder, a, b, "");
540 }
541
542
543 /**
544 * Linear interpolation.
545 *
546 * This also works for integer values with a few caveats.
547 *
548 * @sa http://www.stereopsis.com/doubleblend.html
549 */
550 LLVMValueRef
551 lp_build_lerp(struct lp_build_context *bld,
552 LLVMValueRef x,
553 LLVMValueRef v0,
554 LLVMValueRef v1)
555 {
556 LLVMValueRef delta;
557 LLVMValueRef res;
558
559 delta = lp_build_sub(bld, v1, v0);
560
561 res = lp_build_mul(bld, x, delta);
562
563 res = lp_build_add(bld, v0, res);
564
565 if(bld->type.fixed)
566 /* XXX: This step is necessary for lerping 8bit colors stored on 16bits,
567 * but it will be wrong for other uses. Basically we need a more
568 * powerful lp_type, capable of further distinguishing the values
569 * interpretation from the value storage. */
570 res = LLVMBuildAnd(bld->builder, res, lp_build_const_int_vec(bld->type, (1 << bld->type.width/2) - 1), "");
571
572 return res;
573 }
574
575
576 LLVMValueRef
577 lp_build_lerp_2d(struct lp_build_context *bld,
578 LLVMValueRef x,
579 LLVMValueRef y,
580 LLVMValueRef v00,
581 LLVMValueRef v01,
582 LLVMValueRef v10,
583 LLVMValueRef v11)
584 {
585 LLVMValueRef v0 = lp_build_lerp(bld, x, v00, v01);
586 LLVMValueRef v1 = lp_build_lerp(bld, x, v10, v11);
587 return lp_build_lerp(bld, y, v0, v1);
588 }
589
590
591 /**
592 * Generate min(a, b)
593 * Do checks for special cases.
594 */
595 LLVMValueRef
596 lp_build_min(struct lp_build_context *bld,
597 LLVMValueRef a,
598 LLVMValueRef b)
599 {
600 if(a == bld->undef || b == bld->undef)
601 return bld->undef;
602
603 if(a == b)
604 return a;
605
606 if(bld->type.norm) {
607 if(a == bld->zero || b == bld->zero)
608 return bld->zero;
609 if(a == bld->one)
610 return b;
611 if(b == bld->one)
612 return a;
613 }
614
615 return lp_build_min_simple(bld, a, b);
616 }
617
618
619 /**
620 * Generate max(a, b)
621 * Do checks for special cases.
622 */
623 LLVMValueRef
624 lp_build_max(struct lp_build_context *bld,
625 LLVMValueRef a,
626 LLVMValueRef b)
627 {
628 if(a == bld->undef || b == bld->undef)
629 return bld->undef;
630
631 if(a == b)
632 return a;
633
634 if(bld->type.norm) {
635 if(a == bld->one || b == bld->one)
636 return bld->one;
637 if(a == bld->zero)
638 return b;
639 if(b == bld->zero)
640 return a;
641 }
642
643 return lp_build_max_simple(bld, a, b);
644 }
645
646
647 /**
648 * Generate clamp(a, min, max)
649 * Do checks for special cases.
650 */
651 LLVMValueRef
652 lp_build_clamp(struct lp_build_context *bld,
653 LLVMValueRef a,
654 LLVMValueRef min,
655 LLVMValueRef max)
656 {
657 a = lp_build_min(bld, a, max);
658 a = lp_build_max(bld, a, min);
659 return a;
660 }
661
662
663 /**
664 * Generate abs(a)
665 */
666 LLVMValueRef
667 lp_build_abs(struct lp_build_context *bld,
668 LLVMValueRef a)
669 {
670 const struct lp_type type = bld->type;
671 LLVMTypeRef vec_type = lp_build_vec_type(type);
672
673 if(!type.sign)
674 return a;
675
676 if(type.floating) {
677 /* Mask out the sign bit */
678 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
679 unsigned long long absMask = ~(1ULL << (type.width - 1));
680 LLVMValueRef mask = lp_build_const_int_vec(type, ((unsigned long long) absMask));
681 a = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
682 a = LLVMBuildAnd(bld->builder, a, mask, "");
683 a = LLVMBuildBitCast(bld->builder, a, vec_type, "");
684 return a;
685 }
686
687 if(type.width*type.length == 128 && util_cpu_caps.has_ssse3) {
688 switch(type.width) {
689 case 8:
690 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.b.128", vec_type, a);
691 case 16:
692 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.w.128", vec_type, a);
693 case 32:
694 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.d.128", vec_type, a);
695 }
696 }
697
698 return lp_build_max(bld, a, LLVMBuildNeg(bld->builder, a, ""));
699 }
700
701
702 LLVMValueRef
703 lp_build_negate(struct lp_build_context *bld,
704 LLVMValueRef a)
705 {
706 return LLVMBuildNeg(bld->builder, a, "");
707 }
708
709
710 /** Return -1, 0 or +1 depending on the sign of a */
711 LLVMValueRef
712 lp_build_sgn(struct lp_build_context *bld,
713 LLVMValueRef a)
714 {
715 const struct lp_type type = bld->type;
716 LLVMValueRef cond;
717 LLVMValueRef res;
718
719 /* Handle non-zero case */
720 if(!type.sign) {
721 /* if not zero then sign must be positive */
722 res = bld->one;
723 }
724 else if(type.floating) {
725 LLVMTypeRef vec_type;
726 LLVMTypeRef int_type;
727 LLVMValueRef mask;
728 LLVMValueRef sign;
729 LLVMValueRef one;
730 unsigned long long maskBit = (unsigned long long)1 << (type.width - 1);
731
732 int_type = lp_build_int_vec_type(type);
733 vec_type = lp_build_vec_type(type);
734 mask = lp_build_const_int_vec(type, maskBit);
735
736 /* Take the sign bit and add it to 1 constant */
737 sign = LLVMBuildBitCast(bld->builder, a, int_type, "");
738 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
739 one = LLVMConstBitCast(bld->one, int_type);
740 res = LLVMBuildOr(bld->builder, sign, one, "");
741 res = LLVMBuildBitCast(bld->builder, res, vec_type, "");
742 }
743 else
744 {
745 LLVMValueRef minus_one = lp_build_const_vec(type, -1.0);
746 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, bld->zero);
747 res = lp_build_select(bld, cond, bld->one, minus_one);
748 }
749
750 /* Handle zero */
751 cond = lp_build_cmp(bld, PIPE_FUNC_EQUAL, a, bld->zero);
752 res = lp_build_select(bld, cond, bld->zero, res);
753
754 return res;
755 }
756
757
758 /**
759 * Set the sign of float vector 'a' according to 'sign'.
760 * If sign==0, return abs(a).
761 * If sign==1, return -abs(a);
762 * Other values for sign produce undefined results.
763 */
764 LLVMValueRef
765 lp_build_set_sign(struct lp_build_context *bld,
766 LLVMValueRef a, LLVMValueRef sign)
767 {
768 const struct lp_type type = bld->type;
769 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
770 LLVMTypeRef vec_type = lp_build_vec_type(type);
771 LLVMValueRef shift = lp_build_const_int_vec(type, type.width - 1);
772 LLVMValueRef mask = lp_build_const_int_vec(type,
773 ~((unsigned long long) 1 << (type.width - 1)));
774 LLVMValueRef val, res;
775
776 assert(type.floating);
777
778 /* val = reinterpret_cast<int>(a) */
779 val = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
780 /* val = val & mask */
781 val = LLVMBuildAnd(bld->builder, val, mask, "");
782 /* sign = sign << shift */
783 sign = LLVMBuildShl(bld->builder, sign, shift, "");
784 /* res = val | sign */
785 res = LLVMBuildOr(bld->builder, val, sign, "");
786 /* res = reinterpret_cast<float>(res) */
787 res = LLVMBuildBitCast(bld->builder, res, vec_type, "");
788
789 return res;
790 }
791
792
793 /**
794 * Convert vector of (or scalar) int to vector of (or scalar) float.
795 */
796 LLVMValueRef
797 lp_build_int_to_float(struct lp_build_context *bld,
798 LLVMValueRef a)
799 {
800 const struct lp_type type = bld->type;
801 LLVMTypeRef vec_type = lp_build_vec_type(type);
802
803 assert(type.floating);
804
805 return LLVMBuildSIToFP(bld->builder, a, vec_type, "");
806 }
807
808
809
810 enum lp_build_round_sse41_mode
811 {
812 LP_BUILD_ROUND_SSE41_NEAREST = 0,
813 LP_BUILD_ROUND_SSE41_FLOOR = 1,
814 LP_BUILD_ROUND_SSE41_CEIL = 2,
815 LP_BUILD_ROUND_SSE41_TRUNCATE = 3
816 };
817
818
819 static INLINE LLVMValueRef
820 lp_build_round_sse41(struct lp_build_context *bld,
821 LLVMValueRef a,
822 enum lp_build_round_sse41_mode mode)
823 {
824 const struct lp_type type = bld->type;
825 LLVMTypeRef vec_type = lp_build_vec_type(type);
826 const char *intrinsic;
827
828 assert(type.floating);
829 assert(type.width*type.length == 128);
830 assert(lp_check_value(type, a));
831 assert(util_cpu_caps.has_sse4_1);
832
833 switch(type.width) {
834 case 32:
835 intrinsic = "llvm.x86.sse41.round.ps";
836 break;
837 case 64:
838 intrinsic = "llvm.x86.sse41.round.pd";
839 break;
840 default:
841 assert(0);
842 return bld->undef;
843 }
844
845 return lp_build_intrinsic_binary(bld->builder, intrinsic, vec_type, a,
846 LLVMConstInt(LLVMInt32Type(), mode, 0));
847 }
848
849
850 /**
851 * Return the integer part of a float (vector) value. The returned value is
852 * a float (vector).
853 * Ex: trunc(-1.5) = 1.0
854 */
855 LLVMValueRef
856 lp_build_trunc(struct lp_build_context *bld,
857 LLVMValueRef a)
858 {
859 const struct lp_type type = bld->type;
860
861 assert(type.floating);
862 assert(lp_check_value(type, a));
863
864 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128)
865 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_TRUNCATE);
866 else {
867 LLVMTypeRef vec_type = lp_build_vec_type(type);
868 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
869 LLVMValueRef res;
870 res = LLVMBuildFPToSI(bld->builder, a, int_vec_type, "");
871 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
872 return res;
873 }
874 }
875
876
877 /**
878 * Return float (vector) rounded to nearest integer (vector). The returned
879 * value is a float (vector).
880 * Ex: round(0.9) = 1.0
881 * Ex: round(-1.5) = -2.0
882 */
883 LLVMValueRef
884 lp_build_round(struct lp_build_context *bld,
885 LLVMValueRef a)
886 {
887 const struct lp_type type = bld->type;
888
889 assert(type.floating);
890 assert(lp_check_value(type, a));
891
892 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128)
893 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
894 else {
895 LLVMTypeRef vec_type = lp_build_vec_type(type);
896 LLVMValueRef res;
897 res = lp_build_iround(bld, a);
898 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
899 return res;
900 }
901 }
902
903
904 /**
905 * Return floor of float (vector), result is a float (vector)
906 * Ex: floor(1.1) = 1.0
907 * Ex: floor(-1.1) = -2.0
908 */
909 LLVMValueRef
910 lp_build_floor(struct lp_build_context *bld,
911 LLVMValueRef a)
912 {
913 const struct lp_type type = bld->type;
914
915 assert(type.floating);
916 assert(lp_check_value(type, a));
917
918 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128)
919 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
920 else {
921 LLVMTypeRef vec_type = lp_build_vec_type(type);
922 LLVMValueRef res;
923 res = lp_build_ifloor(bld, a);
924 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
925 return res;
926 }
927 }
928
929
930 /**
931 * Return ceiling of float (vector), returning float (vector).
932 * Ex: ceil( 1.1) = 2.0
933 * Ex: ceil(-1.1) = -1.0
934 */
935 LLVMValueRef
936 lp_build_ceil(struct lp_build_context *bld,
937 LLVMValueRef a)
938 {
939 const struct lp_type type = bld->type;
940
941 assert(type.floating);
942 assert(lp_check_value(type, a));
943
944 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128)
945 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_CEIL);
946 else {
947 LLVMTypeRef vec_type = lp_build_vec_type(type);
948 LLVMValueRef res;
949 res = lp_build_iceil(bld, a);
950 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
951 return res;
952 }
953 }
954
955
956 /**
957 * Return fractional part of 'a' computed as a - floor(a)
958 * Typically used in texture coord arithmetic.
959 */
960 LLVMValueRef
961 lp_build_fract(struct lp_build_context *bld,
962 LLVMValueRef a)
963 {
964 assert(bld->type.floating);
965 return lp_build_sub(bld, a, lp_build_floor(bld, a));
966 }
967
968
969 /**
970 * Return the integer part of a float (vector) value. The returned value is
971 * an integer (vector).
972 * Ex: itrunc(-1.5) = 1
973 */
974 LLVMValueRef
975 lp_build_itrunc(struct lp_build_context *bld,
976 LLVMValueRef a)
977 {
978 const struct lp_type type = bld->type;
979 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
980
981 assert(type.floating);
982 assert(lp_check_value(type, a));
983
984 return LLVMBuildFPToSI(bld->builder, a, int_vec_type, "");
985 }
986
987
988 /**
989 * Return float (vector) rounded to nearest integer (vector). The returned
990 * value is an integer (vector).
991 * Ex: iround(0.9) = 1
992 * Ex: iround(-1.5) = -2
993 */
994 LLVMValueRef
995 lp_build_iround(struct lp_build_context *bld,
996 LLVMValueRef a)
997 {
998 const struct lp_type type = bld->type;
999 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1000 LLVMValueRef res;
1001
1002 assert(type.floating);
1003
1004 assert(lp_check_value(type, a));
1005
1006 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128) {
1007 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
1008 }
1009 else {
1010 LLVMTypeRef vec_type = lp_build_vec_type(type);
1011 LLVMValueRef mask = lp_build_const_int_vec(type, (unsigned long long)1 << (type.width - 1));
1012 LLVMValueRef sign;
1013 LLVMValueRef half;
1014
1015 /* get sign bit */
1016 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
1017 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
1018
1019 /* sign * 0.5 */
1020 half = lp_build_const_vec(type, 0.5);
1021 half = LLVMBuildBitCast(bld->builder, half, int_vec_type, "");
1022 half = LLVMBuildOr(bld->builder, sign, half, "");
1023 half = LLVMBuildBitCast(bld->builder, half, vec_type, "");
1024
1025 res = LLVMBuildAdd(bld->builder, a, half, "");
1026 }
1027
1028 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
1029
1030 return res;
1031 }
1032
1033
1034 /**
1035 * Return floor of float (vector), result is an int (vector)
1036 * Ex: ifloor(1.1) = 1.0
1037 * Ex: ifloor(-1.1) = -2.0
1038 */
1039 LLVMValueRef
1040 lp_build_ifloor(struct lp_build_context *bld,
1041 LLVMValueRef a)
1042 {
1043 const struct lp_type type = bld->type;
1044 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1045 LLVMValueRef res;
1046
1047 assert(type.floating);
1048 assert(lp_check_value(type, a));
1049
1050 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128) {
1051 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
1052 }
1053 else {
1054 /* Take the sign bit and add it to 1 constant */
1055 LLVMTypeRef vec_type = lp_build_vec_type(type);
1056 unsigned mantissa = lp_mantissa(type);
1057 LLVMValueRef mask = lp_build_const_int_vec(type, (unsigned long long)1 << (type.width - 1));
1058 LLVMValueRef sign;
1059 LLVMValueRef offset;
1060
1061 /* sign = a < 0 ? ~0 : 0 */
1062 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
1063 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
1064 sign = LLVMBuildAShr(bld->builder, sign, lp_build_const_int_vec(type, type.width - 1), "ifloor.sign");
1065
1066 /* offset = -0.99999(9)f */
1067 offset = lp_build_const_vec(type, -(double)(((unsigned long long)1 << mantissa) - 10)/((unsigned long long)1 << mantissa));
1068 offset = LLVMConstBitCast(offset, int_vec_type);
1069
1070 /* offset = a < 0 ? offset : 0.0f */
1071 offset = LLVMBuildAnd(bld->builder, offset, sign, "");
1072 offset = LLVMBuildBitCast(bld->builder, offset, vec_type, "ifloor.offset");
1073
1074 res = LLVMBuildAdd(bld->builder, a, offset, "ifloor.res");
1075 }
1076
1077 /* round to nearest (toward zero) */
1078 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "ifloor.res");
1079
1080 return res;
1081 }
1082
1083
1084 /**
1085 * Return ceiling of float (vector), returning int (vector).
1086 * Ex: iceil( 1.1) = 2
1087 * Ex: iceil(-1.1) = -1
1088 */
1089 LLVMValueRef
1090 lp_build_iceil(struct lp_build_context *bld,
1091 LLVMValueRef a)
1092 {
1093 const struct lp_type type = bld->type;
1094 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1095 LLVMValueRef res;
1096
1097 assert(type.floating);
1098 assert(lp_check_value(type, a));
1099
1100 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128) {
1101 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_CEIL);
1102 }
1103 else {
1104 LLVMTypeRef vec_type = lp_build_vec_type(type);
1105 unsigned mantissa = lp_mantissa(type);
1106 LLVMValueRef mask = lp_build_const_int_vec(type, (unsigned long long)1 << (type.width - 1));
1107 LLVMValueRef sign;
1108 LLVMValueRef offset;
1109
1110 /* sign = a < 0 ? 0 : ~0 */
1111 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
1112 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
1113 sign = LLVMBuildAShr(bld->builder, sign, lp_build_const_int_vec(type, type.width - 1), "iceil.sign");
1114 sign = LLVMBuildNot(bld->builder, sign, "iceil.not");
1115
1116 /* offset = 0.99999(9)f */
1117 offset = lp_build_const_vec(type, (double)(((unsigned long long)1 << mantissa) - 10)/((unsigned long long)1 << mantissa));
1118 offset = LLVMConstBitCast(offset, int_vec_type);
1119
1120 /* offset = a < 0 ? 0.0 : offset */
1121 offset = LLVMBuildAnd(bld->builder, offset, sign, "");
1122 offset = LLVMBuildBitCast(bld->builder, offset, vec_type, "iceil.offset");
1123
1124 res = LLVMBuildAdd(bld->builder, a, offset, "iceil.res");
1125 }
1126
1127 /* round to nearest (toward zero) */
1128 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "iceil.res");
1129
1130 return res;
1131 }
1132
1133
1134 LLVMValueRef
1135 lp_build_sqrt(struct lp_build_context *bld,
1136 LLVMValueRef a)
1137 {
1138 const struct lp_type type = bld->type;
1139 LLVMTypeRef vec_type = lp_build_vec_type(type);
1140 char intrinsic[32];
1141
1142 /* TODO: optimize the constant case */
1143 /* TODO: optimize the constant case */
1144
1145 assert(type.floating);
1146 util_snprintf(intrinsic, sizeof intrinsic, "llvm.sqrt.v%uf%u", type.length, type.width);
1147
1148 return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
1149 }
1150
1151
1152 LLVMValueRef
1153 lp_build_rcp(struct lp_build_context *bld,
1154 LLVMValueRef a)
1155 {
1156 const struct lp_type type = bld->type;
1157
1158 if(a == bld->zero)
1159 return bld->undef;
1160 if(a == bld->one)
1161 return bld->one;
1162 if(a == bld->undef)
1163 return bld->undef;
1164
1165 assert(type.floating);
1166
1167 if(LLVMIsConstant(a))
1168 return LLVMConstFDiv(bld->one, a);
1169
1170 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4) {
1171 /*
1172 * XXX: Added precision is not always necessary, so only enable this
1173 * when we have a better system in place to track minimum precision.
1174 */
1175
1176 #if 0
1177 /*
1178 * Do one Newton-Raphson step to improve precision:
1179 *
1180 * x1 = (2 - a * rcp(a)) * rcp(a)
1181 */
1182
1183 LLVMValueRef two = lp_build_const_vec(bld->type, 2.0);
1184 LLVMValueRef rcp_a;
1185 LLVMValueRef res;
1186
1187 rcp_a = lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rcp.ps", lp_build_vec_type(type), a);
1188
1189 res = LLVMBuildMul(bld->builder, a, rcp_a, "");
1190 res = LLVMBuildSub(bld->builder, two, res, "");
1191 res = LLVMBuildMul(bld->builder, res, rcp_a, "");
1192
1193 return rcp_a;
1194 #else
1195 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rcp.ps", lp_build_vec_type(type), a);
1196 #endif
1197 }
1198
1199 return LLVMBuildFDiv(bld->builder, bld->one, a, "");
1200 }
1201
1202
1203 /**
1204 * Generate 1/sqrt(a)
1205 */
1206 LLVMValueRef
1207 lp_build_rsqrt(struct lp_build_context *bld,
1208 LLVMValueRef a)
1209 {
1210 const struct lp_type type = bld->type;
1211
1212 assert(type.floating);
1213
1214 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
1215 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rsqrt.ps", lp_build_vec_type(type), a);
1216
1217 return lp_build_rcp(bld, lp_build_sqrt(bld, a));
1218 }
1219
1220
1221 static inline LLVMValueRef
1222 lp_build_const_v4si(unsigned long value)
1223 {
1224 LLVMValueRef element = LLVMConstInt(LLVMInt32Type(), value, 0);
1225 LLVMValueRef elements[4] = { element, element, element, element };
1226 return LLVMConstVector(elements, 4);
1227 }
1228
1229 static inline LLVMValueRef
1230 lp_build_const_v4sf(float value)
1231 {
1232 LLVMValueRef element = LLVMConstReal(LLVMFloatType(), value);
1233 LLVMValueRef elements[4] = { element, element, element, element };
1234 return LLVMConstVector(elements, 4);
1235 }
1236
1237
1238 /**
1239 * Generate sin(a) using SSE2
1240 */
1241 LLVMValueRef
1242 lp_build_sin(struct lp_build_context *bld,
1243 LLVMValueRef a)
1244 {
1245 struct lp_type int_type = lp_int_type(bld->type);
1246 LLVMBuilderRef b = bld->builder;
1247 LLVMTypeRef v4sf = LLVMVectorType(LLVMFloatType(), 4);
1248 LLVMTypeRef v4si = LLVMVectorType(LLVMInt32Type(), 4);
1249
1250 /*
1251 * take the absolute value,
1252 * x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
1253 */
1254
1255 LLVMValueRef inv_sig_mask = lp_build_const_v4si(~0x80000000);
1256 LLVMValueRef a_v4si = LLVMBuildBitCast(b, a, v4si, "a_v4si");
1257
1258 LLVMValueRef absi = LLVMBuildAnd(b, a_v4si, inv_sig_mask, "absi");
1259 LLVMValueRef x_abs = LLVMBuildBitCast(b, absi, v4sf, "x_abs");
1260
1261 /*
1262 * extract the sign bit (upper one)
1263 * sign_bit = _mm_and_ps(sign_bit, *(v4sf*)_ps_sign_mask);
1264 */
1265 LLVMValueRef sig_mask = lp_build_const_v4si(0x80000000);
1266 LLVMValueRef sign_bit_i = LLVMBuildAnd(b, a_v4si, sig_mask, "sign_bit_i");
1267
1268 /*
1269 * scale by 4/Pi
1270 * y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
1271 */
1272
1273 LLVMValueRef FOPi = lp_build_const_v4sf(1.27323954473516);
1274 LLVMValueRef scale_y = LLVMBuildMul(b, x_abs, FOPi, "scale_y");
1275
1276 /*
1277 * store the integer part of y in mm0
1278 * emm2 = _mm_cvttps_epi32(y);
1279 */
1280
1281 LLVMValueRef emm2_i = LLVMBuildFPToSI(b, scale_y, v4si, "emm2_i");
1282
1283 /*
1284 * j=(j+1) & (~1) (see the cephes sources)
1285 * emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
1286 */
1287
1288 LLVMValueRef all_one = lp_build_const_v4si(1);
1289 LLVMValueRef emm2_add = LLVMBuildAdd(b, emm2_i, all_one, "emm2_add");
1290 /*
1291 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
1292 */
1293 LLVMValueRef inv_one = lp_build_const_v4si(~1);
1294 LLVMValueRef emm2_and = LLVMBuildAnd(b, emm2_add, inv_one, "emm2_and");
1295
1296 /*
1297 * y = _mm_cvtepi32_ps(emm2);
1298 */
1299 LLVMValueRef y_2 = LLVMBuildSIToFP(b, emm2_and, v4sf, "y_2");
1300
1301 /* get the swap sign flag
1302 * emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
1303 */
1304 LLVMValueRef pi32_4 = lp_build_const_v4si(4);
1305 LLVMValueRef emm0_and = LLVMBuildAnd(b, emm2_add, pi32_4, "emm0_and");
1306
1307 /*
1308 * emm2 = _mm_slli_epi32(emm0, 29);
1309 */
1310 LLVMValueRef const_29 = lp_build_const_v4si(29);
1311 LLVMValueRef swap_sign_bit = LLVMBuildShl(b, emm0_and, const_29, "swap_sign_bit");
1312
1313 /*
1314 * get the polynom selection mask
1315 * there is one polynom for 0 <= x <= Pi/4
1316 * and another one for Pi/4<x<=Pi/2
1317 * Both branches will be computed.
1318 *
1319 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
1320 * emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
1321 */
1322
1323 LLVMValueRef pi32_2 = lp_build_const_v4si(2);
1324 LLVMValueRef emm2_3 = LLVMBuildAnd(b, emm2_and, pi32_2, "emm2_3");
1325 LLVMValueRef poly_mask = lp_build_compare(b, int_type, PIPE_FUNC_EQUAL,
1326 emm2_3, lp_build_const_v4si(0));
1327 /*
1328 * sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
1329 */
1330 LLVMValueRef sign_bit_1 = LLVMBuildXor(b, sign_bit_i, swap_sign_bit, "sign_bit");
1331
1332 /*
1333 * _PS_CONST(minus_cephes_DP1, -0.78515625);
1334 * _PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
1335 * _PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
1336 */
1337 LLVMValueRef DP1 = lp_build_const_v4sf(-0.78515625);
1338 LLVMValueRef DP2 = lp_build_const_v4sf(-2.4187564849853515625e-4);
1339 LLVMValueRef DP3 = lp_build_const_v4sf(-3.77489497744594108e-8);
1340
1341 /*
1342 * The magic pass: "Extended precision modular arithmetic"
1343 * x = ((x - y * DP1) - y * DP2) - y * DP3;
1344 * xmm1 = _mm_mul_ps(y, xmm1);
1345 * xmm2 = _mm_mul_ps(y, xmm2);
1346 * xmm3 = _mm_mul_ps(y, xmm3);
1347 */
1348 LLVMValueRef xmm1 = LLVMBuildMul(b, y_2, DP1, "xmm1");
1349 LLVMValueRef xmm2 = LLVMBuildMul(b, y_2, DP2, "xmm2");
1350 LLVMValueRef xmm3 = LLVMBuildMul(b, y_2, DP3, "xmm3");
1351
1352 /*
1353 * x = _mm_add_ps(x, xmm1);
1354 * x = _mm_add_ps(x, xmm2);
1355 * x = _mm_add_ps(x, xmm3);
1356 */
1357
1358 LLVMValueRef x_1 = LLVMBuildAdd(b, x_abs, xmm1, "x_1");
1359 LLVMValueRef x_2 = LLVMBuildAdd(b, x_1, xmm2, "x_2");
1360 LLVMValueRef x_3 = LLVMBuildAdd(b, x_2, xmm3, "x_3");
1361
1362 /*
1363 * Evaluate the first polynom (0 <= x <= Pi/4)
1364 *
1365 * z = _mm_mul_ps(x,x);
1366 */
1367 LLVMValueRef z = LLVMBuildMul(b, x_3, x_3, "z");
1368
1369 /*
1370 * _PS_CONST(coscof_p0, 2.443315711809948E-005);
1371 * _PS_CONST(coscof_p1, -1.388731625493765E-003);
1372 * _PS_CONST(coscof_p2, 4.166664568298827E-002);
1373 */
1374 LLVMValueRef coscof_p0 = lp_build_const_v4sf(2.443315711809948E-005);
1375 LLVMValueRef coscof_p1 = lp_build_const_v4sf(-1.388731625493765E-003);
1376 LLVMValueRef coscof_p2 = lp_build_const_v4sf(4.166664568298827E-002);
1377
1378 /*
1379 * y = *(v4sf*)_ps_coscof_p0;
1380 * y = _mm_mul_ps(y, z);
1381 */
1382 LLVMValueRef y_3 = LLVMBuildMul(b, z, coscof_p0, "y_3");
1383 LLVMValueRef y_4 = LLVMBuildAdd(b, y_3, coscof_p1, "y_4");
1384 LLVMValueRef y_5 = LLVMBuildMul(b, y_4, z, "y_5");
1385 LLVMValueRef y_6 = LLVMBuildAdd(b, y_5, coscof_p2, "y_6");
1386 LLVMValueRef y_7 = LLVMBuildMul(b, y_6, z, "y_7");
1387 LLVMValueRef y_8 = LLVMBuildMul(b, y_7, z, "y_8");
1388
1389
1390 /*
1391 * tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
1392 * y = _mm_sub_ps(y, tmp);
1393 * y = _mm_add_ps(y, *(v4sf*)_ps_1);
1394 */
1395 LLVMValueRef half = lp_build_const_v4sf(0.5);
1396 LLVMValueRef tmp = LLVMBuildMul(b, z, half, "tmp");
1397 LLVMValueRef y_9 = LLVMBuildSub(b, y_8, tmp, "y_8");
1398 LLVMValueRef one = lp_build_const_v4sf(1.0);
1399 LLVMValueRef y_10 = LLVMBuildAdd(b, y_9, one, "y_9");
1400
1401 /*
1402 * _PS_CONST(sincof_p0, -1.9515295891E-4);
1403 * _PS_CONST(sincof_p1, 8.3321608736E-3);
1404 * _PS_CONST(sincof_p2, -1.6666654611E-1);
1405 */
1406 LLVMValueRef sincof_p0 = lp_build_const_v4sf(-1.9515295891E-4);
1407 LLVMValueRef sincof_p1 = lp_build_const_v4sf(8.3321608736E-3);
1408 LLVMValueRef sincof_p2 = lp_build_const_v4sf(-1.6666654611E-1);
1409
1410 /*
1411 * Evaluate the second polynom (Pi/4 <= x <= 0)
1412 *
1413 * y2 = *(v4sf*)_ps_sincof_p0;
1414 * y2 = _mm_mul_ps(y2, z);
1415 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
1416 * y2 = _mm_mul_ps(y2, z);
1417 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
1418 * y2 = _mm_mul_ps(y2, z);
1419 * y2 = _mm_mul_ps(y2, x);
1420 * y2 = _mm_add_ps(y2, x);
1421 */
1422
1423 LLVMValueRef y2_3 = LLVMBuildMul(b, z, sincof_p0, "y2_3");
1424 LLVMValueRef y2_4 = LLVMBuildAdd(b, y2_3, sincof_p1, "y2_4");
1425 LLVMValueRef y2_5 = LLVMBuildMul(b, y2_4, z, "y2_5");
1426 LLVMValueRef y2_6 = LLVMBuildAdd(b, y2_5, sincof_p2, "y2_6");
1427 LLVMValueRef y2_7 = LLVMBuildMul(b, y2_6, z, "y2_7");
1428 LLVMValueRef y2_8 = LLVMBuildMul(b, y2_7, x_3, "y2_8");
1429 LLVMValueRef y2_9 = LLVMBuildAdd(b, y2_8, x_3, "y2_9");
1430
1431 /*
1432 * select the correct result from the two polynoms
1433 * xmm3 = poly_mask;
1434 * y2 = _mm_and_ps(xmm3, y2); //, xmm3);
1435 * y = _mm_andnot_ps(xmm3, y);
1436 * y = _mm_add_ps(y,y2);
1437 */
1438 LLVMValueRef y2_i = LLVMBuildBitCast(b, y2_9, v4si, "y2_i");
1439 LLVMValueRef y_i = LLVMBuildBitCast(b, y_10, v4si, "y_i");
1440 LLVMValueRef y2_and = LLVMBuildAnd(b, y2_i, poly_mask, "y2_and");
1441 LLVMValueRef inv = lp_build_const_v4si(~0);
1442 LLVMValueRef poly_mask_inv = LLVMBuildXor(b, poly_mask, inv, "poly_mask_inv");
1443 LLVMValueRef y_and = LLVMBuildAnd(b, y_i, poly_mask_inv, "y_and");
1444 LLVMValueRef y_combine = LLVMBuildAdd(b, y_and, y2_and, "y_combine");
1445
1446 /*
1447 * update the sign
1448 * y = _mm_xor_ps(y, sign_bit);
1449 */
1450 LLVMValueRef y_sign = LLVMBuildXor(b, y_combine, sign_bit_1, "y_sin");
1451 LLVMValueRef y_result = LLVMBuildBitCast(b, y_sign, v4sf, "y_result");
1452 return y_result;
1453 }
1454
1455
1456 /**
1457 * Generate cos(a) using SSE2
1458 */
1459 LLVMValueRef
1460 lp_build_cos(struct lp_build_context *bld,
1461 LLVMValueRef a)
1462 {
1463 struct lp_type int_type = lp_int_type(bld->type);
1464 LLVMBuilderRef b = bld->builder;
1465 LLVMTypeRef v4sf = LLVMVectorType(LLVMFloatType(), 4);
1466 LLVMTypeRef v4si = LLVMVectorType(LLVMInt32Type(), 4);
1467
1468 /*
1469 * take the absolute value,
1470 * x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
1471 */
1472
1473 LLVMValueRef inv_sig_mask = lp_build_const_v4si(~0x80000000);
1474 LLVMValueRef a_v4si = LLVMBuildBitCast(b, a, v4si, "a_v4si");
1475
1476 LLVMValueRef absi = LLVMBuildAnd(b, a_v4si, inv_sig_mask, "absi");
1477 LLVMValueRef x_abs = LLVMBuildBitCast(b, absi, v4sf, "x_abs");
1478
1479 /*
1480 * scale by 4/Pi
1481 * y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
1482 */
1483
1484 LLVMValueRef FOPi = lp_build_const_v4sf(1.27323954473516);
1485 LLVMValueRef scale_y = LLVMBuildMul(b, x_abs, FOPi, "scale_y");
1486
1487 /*
1488 * store the integer part of y in mm0
1489 * emm2 = _mm_cvttps_epi32(y);
1490 */
1491
1492 LLVMValueRef emm2_i = LLVMBuildFPToSI(b, scale_y, v4si, "emm2_i");
1493
1494 /*
1495 * j=(j+1) & (~1) (see the cephes sources)
1496 * emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
1497 */
1498
1499 LLVMValueRef all_one = lp_build_const_v4si(1);
1500 LLVMValueRef emm2_add = LLVMBuildAdd(b, emm2_i, all_one, "emm2_add");
1501 /*
1502 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
1503 */
1504 LLVMValueRef inv_one = lp_build_const_v4si(~1);
1505 LLVMValueRef emm2_and = LLVMBuildAnd(b, emm2_add, inv_one, "emm2_and");
1506
1507 /*
1508 * y = _mm_cvtepi32_ps(emm2);
1509 */
1510 LLVMValueRef y_2 = LLVMBuildSIToFP(b, emm2_and, v4sf, "y_2");
1511
1512
1513 /*
1514 * emm2 = _mm_sub_epi32(emm2, *(v4si*)_pi32_2);
1515 */
1516 LLVMValueRef const_2 = lp_build_const_v4si(2);
1517 LLVMValueRef emm2_2 = LLVMBuildSub(b, emm2_and, const_2, "emm2_2");
1518
1519
1520 /* get the swap sign flag
1521 * emm0 = _mm_andnot_si128(emm2, *(v4si*)_pi32_4);
1522 */
1523 LLVMValueRef inv = lp_build_const_v4si(~0);
1524 LLVMValueRef emm0_not = LLVMBuildXor(b, emm2_2, inv, "emm0_not");
1525 LLVMValueRef pi32_4 = lp_build_const_v4si(4);
1526 LLVMValueRef emm0_and = LLVMBuildAnd(b, emm0_not, pi32_4, "emm0_and");
1527
1528 /*
1529 * emm2 = _mm_slli_epi32(emm0, 29);
1530 */
1531 LLVMValueRef const_29 = lp_build_const_v4si(29);
1532 LLVMValueRef sign_bit = LLVMBuildShl(b, emm0_and, const_29, "sign_bit");
1533
1534 /*
1535 * get the polynom selection mask
1536 * there is one polynom for 0 <= x <= Pi/4
1537 * and another one for Pi/4<x<=Pi/2
1538 * Both branches will be computed.
1539 *
1540 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
1541 * emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
1542 */
1543
1544 LLVMValueRef pi32_2 = lp_build_const_v4si(2);
1545 LLVMValueRef emm2_3 = LLVMBuildAnd(b, emm2_2, pi32_2, "emm2_3");
1546 LLVMValueRef poly_mask = lp_build_compare(b, int_type, PIPE_FUNC_EQUAL,
1547 emm2_3, lp_build_const_v4si(0));
1548
1549 /*
1550 * _PS_CONST(minus_cephes_DP1, -0.78515625);
1551 * _PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
1552 * _PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
1553 */
1554 LLVMValueRef DP1 = lp_build_const_v4sf(-0.78515625);
1555 LLVMValueRef DP2 = lp_build_const_v4sf(-2.4187564849853515625e-4);
1556 LLVMValueRef DP3 = lp_build_const_v4sf(-3.77489497744594108e-8);
1557
1558 /*
1559 * The magic pass: "Extended precision modular arithmetic"
1560 * x = ((x - y * DP1) - y * DP2) - y * DP3;
1561 * xmm1 = _mm_mul_ps(y, xmm1);
1562 * xmm2 = _mm_mul_ps(y, xmm2);
1563 * xmm3 = _mm_mul_ps(y, xmm3);
1564 */
1565 LLVMValueRef xmm1 = LLVMBuildMul(b, y_2, DP1, "xmm1");
1566 LLVMValueRef xmm2 = LLVMBuildMul(b, y_2, DP2, "xmm2");
1567 LLVMValueRef xmm3 = LLVMBuildMul(b, y_2, DP3, "xmm3");
1568
1569 /*
1570 * x = _mm_add_ps(x, xmm1);
1571 * x = _mm_add_ps(x, xmm2);
1572 * x = _mm_add_ps(x, xmm3);
1573 */
1574
1575 LLVMValueRef x_1 = LLVMBuildAdd(b, x_abs, xmm1, "x_1");
1576 LLVMValueRef x_2 = LLVMBuildAdd(b, x_1, xmm2, "x_2");
1577 LLVMValueRef x_3 = LLVMBuildAdd(b, x_2, xmm3, "x_3");
1578
1579 /*
1580 * Evaluate the first polynom (0 <= x <= Pi/4)
1581 *
1582 * z = _mm_mul_ps(x,x);
1583 */
1584 LLVMValueRef z = LLVMBuildMul(b, x_3, x_3, "z");
1585
1586 /*
1587 * _PS_CONST(coscof_p0, 2.443315711809948E-005);
1588 * _PS_CONST(coscof_p1, -1.388731625493765E-003);
1589 * _PS_CONST(coscof_p2, 4.166664568298827E-002);
1590 */
1591 LLVMValueRef coscof_p0 = lp_build_const_v4sf(2.443315711809948E-005);
1592 LLVMValueRef coscof_p1 = lp_build_const_v4sf(-1.388731625493765E-003);
1593 LLVMValueRef coscof_p2 = lp_build_const_v4sf(4.166664568298827E-002);
1594
1595 /*
1596 * y = *(v4sf*)_ps_coscof_p0;
1597 * y = _mm_mul_ps(y, z);
1598 */
1599 LLVMValueRef y_3 = LLVMBuildMul(b, z, coscof_p0, "y_3");
1600 LLVMValueRef y_4 = LLVMBuildAdd(b, y_3, coscof_p1, "y_4");
1601 LLVMValueRef y_5 = LLVMBuildMul(b, y_4, z, "y_5");
1602 LLVMValueRef y_6 = LLVMBuildAdd(b, y_5, coscof_p2, "y_6");
1603 LLVMValueRef y_7 = LLVMBuildMul(b, y_6, z, "y_7");
1604 LLVMValueRef y_8 = LLVMBuildMul(b, y_7, z, "y_8");
1605
1606
1607 /*
1608 * tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
1609 * y = _mm_sub_ps(y, tmp);
1610 * y = _mm_add_ps(y, *(v4sf*)_ps_1);
1611 */
1612 LLVMValueRef half = lp_build_const_v4sf(0.5);
1613 LLVMValueRef tmp = LLVMBuildMul(b, z, half, "tmp");
1614 LLVMValueRef y_9 = LLVMBuildSub(b, y_8, tmp, "y_8");
1615 LLVMValueRef one = lp_build_const_v4sf(1.0);
1616 LLVMValueRef y_10 = LLVMBuildAdd(b, y_9, one, "y_9");
1617
1618 /*
1619 * _PS_CONST(sincof_p0, -1.9515295891E-4);
1620 * _PS_CONST(sincof_p1, 8.3321608736E-3);
1621 * _PS_CONST(sincof_p2, -1.6666654611E-1);
1622 */
1623 LLVMValueRef sincof_p0 = lp_build_const_v4sf(-1.9515295891E-4);
1624 LLVMValueRef sincof_p1 = lp_build_const_v4sf(8.3321608736E-3);
1625 LLVMValueRef sincof_p2 = lp_build_const_v4sf(-1.6666654611E-1);
1626
1627 /*
1628 * Evaluate the second polynom (Pi/4 <= x <= 0)
1629 *
1630 * y2 = *(v4sf*)_ps_sincof_p0;
1631 * y2 = _mm_mul_ps(y2, z);
1632 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
1633 * y2 = _mm_mul_ps(y2, z);
1634 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
1635 * y2 = _mm_mul_ps(y2, z);
1636 * y2 = _mm_mul_ps(y2, x);
1637 * y2 = _mm_add_ps(y2, x);
1638 */
1639
1640 LLVMValueRef y2_3 = LLVMBuildMul(b, z, sincof_p0, "y2_3");
1641 LLVMValueRef y2_4 = LLVMBuildAdd(b, y2_3, sincof_p1, "y2_4");
1642 LLVMValueRef y2_5 = LLVMBuildMul(b, y2_4, z, "y2_5");
1643 LLVMValueRef y2_6 = LLVMBuildAdd(b, y2_5, sincof_p2, "y2_6");
1644 LLVMValueRef y2_7 = LLVMBuildMul(b, y2_6, z, "y2_7");
1645 LLVMValueRef y2_8 = LLVMBuildMul(b, y2_7, x_3, "y2_8");
1646 LLVMValueRef y2_9 = LLVMBuildAdd(b, y2_8, x_3, "y2_9");
1647
1648 /*
1649 * select the correct result from the two polynoms
1650 * xmm3 = poly_mask;
1651 * y2 = _mm_and_ps(xmm3, y2); //, xmm3);
1652 * y = _mm_andnot_ps(xmm3, y);
1653 * y = _mm_add_ps(y,y2);
1654 */
1655 LLVMValueRef y2_i = LLVMBuildBitCast(b, y2_9, v4si, "y2_i");
1656 LLVMValueRef y_i = LLVMBuildBitCast(b, y_10, v4si, "y_i");
1657 LLVMValueRef y2_and = LLVMBuildAnd(b, y2_i, poly_mask, "y2_and");
1658 LLVMValueRef poly_mask_inv = LLVMBuildXor(b, poly_mask, inv, "poly_mask_inv");
1659 LLVMValueRef y_and = LLVMBuildAnd(b, y_i, poly_mask_inv, "y_and");
1660 LLVMValueRef y_combine = LLVMBuildAdd(b, y_and, y2_and, "y_combine");
1661
1662 /*
1663 * update the sign
1664 * y = _mm_xor_ps(y, sign_bit);
1665 */
1666 LLVMValueRef y_sign = LLVMBuildXor(b, y_combine, sign_bit, "y_sin");
1667 LLVMValueRef y_result = LLVMBuildBitCast(b, y_sign, v4sf, "y_result");
1668 return y_result;
1669 }
1670
1671
1672 /**
1673 * Generate pow(x, y)
1674 */
1675 LLVMValueRef
1676 lp_build_pow(struct lp_build_context *bld,
1677 LLVMValueRef x,
1678 LLVMValueRef y)
1679 {
1680 /* TODO: optimize the constant case */
1681 if(LLVMIsConstant(x) && LLVMIsConstant(y))
1682 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1683 __FUNCTION__);
1684
1685 return lp_build_exp2(bld, lp_build_mul(bld, lp_build_log2(bld, x), y));
1686 }
1687
1688
1689 /**
1690 * Generate exp(x)
1691 */
1692 LLVMValueRef
1693 lp_build_exp(struct lp_build_context *bld,
1694 LLVMValueRef x)
1695 {
1696 /* log2(e) = 1/log(2) */
1697 LLVMValueRef log2e = lp_build_const_vec(bld->type, 1.4426950408889634);
1698
1699 return lp_build_mul(bld, log2e, lp_build_exp2(bld, x));
1700 }
1701
1702
1703 /**
1704 * Generate log(x)
1705 */
1706 LLVMValueRef
1707 lp_build_log(struct lp_build_context *bld,
1708 LLVMValueRef x)
1709 {
1710 /* log(2) */
1711 LLVMValueRef log2 = lp_build_const_vec(bld->type, 0.69314718055994529);
1712
1713 return lp_build_mul(bld, log2, lp_build_exp2(bld, x));
1714 }
1715
1716
1717 #define EXP_POLY_DEGREE 3
1718 #define LOG_POLY_DEGREE 5
1719
1720
1721 /**
1722 * Generate polynomial.
1723 * Ex: coeffs[0] + x * coeffs[1] + x^2 * coeffs[2].
1724 */
1725 static LLVMValueRef
1726 lp_build_polynomial(struct lp_build_context *bld,
1727 LLVMValueRef x,
1728 const double *coeffs,
1729 unsigned num_coeffs)
1730 {
1731 const struct lp_type type = bld->type;
1732 LLVMValueRef res = NULL;
1733 unsigned i;
1734
1735 /* TODO: optimize the constant case */
1736 if(LLVMIsConstant(x))
1737 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1738 __FUNCTION__);
1739
1740 for (i = num_coeffs; i--; ) {
1741 LLVMValueRef coeff;
1742
1743 coeff = lp_build_const_vec(type, coeffs[i]);
1744
1745 if(res)
1746 res = lp_build_add(bld, coeff, lp_build_mul(bld, x, res));
1747 else
1748 res = coeff;
1749 }
1750
1751 if(res)
1752 return res;
1753 else
1754 return bld->undef;
1755 }
1756
1757
1758 /**
1759 * Minimax polynomial fit of 2**x, in range [0, 1[
1760 */
1761 const double lp_build_exp2_polynomial[] = {
1762 #if EXP_POLY_DEGREE == 5
1763 0.999999999690134838155,
1764 0.583974334321735217258,
1765 0.164553105719676828492,
1766 0.0292811063701710962255,
1767 0.00354944426657875141846,
1768 0.000296253726543423377365
1769 #elif EXP_POLY_DEGREE == 4
1770 1.00000001502262084505,
1771 0.563586057338685991394,
1772 0.150436017652442413623,
1773 0.0243220604213317927308,
1774 0.0025359088446580436489
1775 #elif EXP_POLY_DEGREE == 3
1776 0.999925218562710312959,
1777 0.695833540494823811697,
1778 0.226067155427249155588,
1779 0.0780245226406372992967
1780 #elif EXP_POLY_DEGREE == 2
1781 1.00172476321474503578,
1782 0.657636275736077639316,
1783 0.33718943461968720704
1784 #else
1785 #error
1786 #endif
1787 };
1788
1789
1790 void
1791 lp_build_exp2_approx(struct lp_build_context *bld,
1792 LLVMValueRef x,
1793 LLVMValueRef *p_exp2_int_part,
1794 LLVMValueRef *p_frac_part,
1795 LLVMValueRef *p_exp2)
1796 {
1797 const struct lp_type type = bld->type;
1798 LLVMTypeRef vec_type = lp_build_vec_type(type);
1799 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1800 LLVMValueRef ipart = NULL;
1801 LLVMValueRef fpart = NULL;
1802 LLVMValueRef expipart = NULL;
1803 LLVMValueRef expfpart = NULL;
1804 LLVMValueRef res = NULL;
1805
1806 if(p_exp2_int_part || p_frac_part || p_exp2) {
1807 /* TODO: optimize the constant case */
1808 if(LLVMIsConstant(x))
1809 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1810 __FUNCTION__);
1811
1812 assert(type.floating && type.width == 32);
1813
1814 x = lp_build_min(bld, x, lp_build_const_vec(type, 129.0));
1815 x = lp_build_max(bld, x, lp_build_const_vec(type, -126.99999));
1816
1817 /* ipart = floor(x) */
1818 ipart = lp_build_floor(bld, x);
1819
1820 /* fpart = x - ipart */
1821 fpart = LLVMBuildSub(bld->builder, x, ipart, "");
1822 }
1823
1824 if(p_exp2_int_part || p_exp2) {
1825 /* expipart = (float) (1 << ipart) */
1826 ipart = LLVMBuildFPToSI(bld->builder, ipart, int_vec_type, "");
1827 expipart = LLVMBuildAdd(bld->builder, ipart, lp_build_const_int_vec(type, 127), "");
1828 expipart = LLVMBuildShl(bld->builder, expipart, lp_build_const_int_vec(type, 23), "");
1829 expipart = LLVMBuildBitCast(bld->builder, expipart, vec_type, "");
1830 }
1831
1832 if(p_exp2) {
1833 expfpart = lp_build_polynomial(bld, fpart, lp_build_exp2_polynomial,
1834 Elements(lp_build_exp2_polynomial));
1835
1836 res = LLVMBuildMul(bld->builder, expipart, expfpart, "");
1837 }
1838
1839 if(p_exp2_int_part)
1840 *p_exp2_int_part = expipart;
1841
1842 if(p_frac_part)
1843 *p_frac_part = fpart;
1844
1845 if(p_exp2)
1846 *p_exp2 = res;
1847 }
1848
1849
1850 LLVMValueRef
1851 lp_build_exp2(struct lp_build_context *bld,
1852 LLVMValueRef x)
1853 {
1854 LLVMValueRef res;
1855 lp_build_exp2_approx(bld, x, NULL, NULL, &res);
1856 return res;
1857 }
1858
1859
1860 /**
1861 * Minimax polynomial fit of log2(x)/(x - 1), for x in range [1, 2[
1862 * These coefficients can be generate with
1863 * http://www.boost.org/doc/libs/1_36_0/libs/math/doc/sf_and_dist/html/math_toolkit/toolkit/internals2/minimax.html
1864 */
1865 const double lp_build_log2_polynomial[] = {
1866 #if LOG_POLY_DEGREE == 6
1867 3.11578814719469302614,
1868 -3.32419399085241980044,
1869 2.59883907202499966007,
1870 -1.23152682416275988241,
1871 0.318212422185251071475,
1872 -0.0344359067839062357313
1873 #elif LOG_POLY_DEGREE == 5
1874 2.8882704548164776201,
1875 -2.52074962577807006663,
1876 1.48116647521213171641,
1877 -0.465725644288844778798,
1878 0.0596515482674574969533
1879 #elif LOG_POLY_DEGREE == 4
1880 2.61761038894603480148,
1881 -1.75647175389045657003,
1882 0.688243882994381274313,
1883 -0.107254423828329604454
1884 #elif LOG_POLY_DEGREE == 3
1885 2.28330284476918490682,
1886 -1.04913055217340124191,
1887 0.204446009836232697516
1888 #else
1889 #error
1890 #endif
1891 };
1892
1893
1894 /**
1895 * See http://www.devmaster.net/forums/showthread.php?p=43580
1896 */
1897 void
1898 lp_build_log2_approx(struct lp_build_context *bld,
1899 LLVMValueRef x,
1900 LLVMValueRef *p_exp,
1901 LLVMValueRef *p_floor_log2,
1902 LLVMValueRef *p_log2)
1903 {
1904 const struct lp_type type = bld->type;
1905 LLVMTypeRef vec_type = lp_build_vec_type(type);
1906 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1907
1908 LLVMValueRef expmask = lp_build_const_int_vec(type, 0x7f800000);
1909 LLVMValueRef mantmask = lp_build_const_int_vec(type, 0x007fffff);
1910 LLVMValueRef one = LLVMConstBitCast(bld->one, int_vec_type);
1911
1912 LLVMValueRef i = NULL;
1913 LLVMValueRef exp = NULL;
1914 LLVMValueRef mant = NULL;
1915 LLVMValueRef logexp = NULL;
1916 LLVMValueRef logmant = NULL;
1917 LLVMValueRef res = NULL;
1918
1919 if(p_exp || p_floor_log2 || p_log2) {
1920 /* TODO: optimize the constant case */
1921 if(LLVMIsConstant(x))
1922 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1923 __FUNCTION__);
1924
1925 assert(type.floating && type.width == 32);
1926
1927 i = LLVMBuildBitCast(bld->builder, x, int_vec_type, "");
1928
1929 /* exp = (float) exponent(x) */
1930 exp = LLVMBuildAnd(bld->builder, i, expmask, "");
1931 }
1932
1933 if(p_floor_log2 || p_log2) {
1934 logexp = LLVMBuildLShr(bld->builder, exp, lp_build_const_int_vec(type, 23), "");
1935 logexp = LLVMBuildSub(bld->builder, logexp, lp_build_const_int_vec(type, 127), "");
1936 logexp = LLVMBuildSIToFP(bld->builder, logexp, vec_type, "");
1937 }
1938
1939 if(p_log2) {
1940 /* mant = (float) mantissa(x) */
1941 mant = LLVMBuildAnd(bld->builder, i, mantmask, "");
1942 mant = LLVMBuildOr(bld->builder, mant, one, "");
1943 mant = LLVMBuildBitCast(bld->builder, mant, vec_type, "");
1944
1945 logmant = lp_build_polynomial(bld, mant, lp_build_log2_polynomial,
1946 Elements(lp_build_log2_polynomial));
1947
1948 /* This effectively increases the polynomial degree by one, but ensures that log2(1) == 0*/
1949 logmant = LLVMBuildMul(bld->builder, logmant, LLVMBuildSub(bld->builder, mant, bld->one, ""), "");
1950
1951 res = LLVMBuildAdd(bld->builder, logmant, logexp, "");
1952 }
1953
1954 if(p_exp) {
1955 exp = LLVMBuildBitCast(bld->builder, exp, vec_type, "");
1956 *p_exp = exp;
1957 }
1958
1959 if(p_floor_log2)
1960 *p_floor_log2 = logexp;
1961
1962 if(p_log2)
1963 *p_log2 = res;
1964 }
1965
1966
1967 LLVMValueRef
1968 lp_build_log2(struct lp_build_context *bld,
1969 LLVMValueRef x)
1970 {
1971 LLVMValueRef res;
1972 lp_build_log2_approx(bld, x, NULL, NULL, &res);
1973 return res;
1974 }