Merge branch 'glsl2-head' into glsl2
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_arit.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper
32 *
33 * LLVM IR doesn't support all basic arithmetic operations we care about (most
34 * notably min/max and saturated operations), and it is often necessary to
35 * resort machine-specific intrinsics directly. The functions here hide all
36 * these implementation details from the other modules.
37 *
38 * We also do simple expressions simplification here. Reasons are:
39 * - it is very easy given we have all necessary information readily available
40 * - LLVM optimization passes fail to simplify several vector expressions
41 * - We often know value constraints which the optimization passes have no way
42 * of knowing, such as when source arguments are known to be in [0, 1] range.
43 *
44 * @author Jose Fonseca <jfonseca@vmware.com>
45 */
46
47
48 #include "util/u_memory.h"
49 #include "util/u_debug.h"
50 #include "util/u_math.h"
51 #include "util/u_string.h"
52 #include "util/u_cpu_detect.h"
53
54 #include "lp_bld_type.h"
55 #include "lp_bld_const.h"
56 #include "lp_bld_intr.h"
57 #include "lp_bld_logic.h"
58 #include "lp_bld_pack.h"
59 #include "lp_bld_debug.h"
60 #include "lp_bld_arit.h"
61
62
63 /**
64 * Generate min(a, b)
65 * No checks for special case values of a or b = 1 or 0 are done.
66 */
67 static LLVMValueRef
68 lp_build_min_simple(struct lp_build_context *bld,
69 LLVMValueRef a,
70 LLVMValueRef b)
71 {
72 const struct lp_type type = bld->type;
73 const char *intrinsic = NULL;
74 LLVMValueRef cond;
75
76 /* TODO: optimize the constant case */
77
78 if(type.width * type.length == 128) {
79 if(type.floating) {
80 if(type.width == 32 && util_cpu_caps.has_sse)
81 intrinsic = "llvm.x86.sse.min.ps";
82 if(type.width == 64 && util_cpu_caps.has_sse2)
83 intrinsic = "llvm.x86.sse2.min.pd";
84 }
85 else {
86 if(type.width == 8 && !type.sign && util_cpu_caps.has_sse2)
87 intrinsic = "llvm.x86.sse2.pminu.b";
88 if(type.width == 8 && type.sign && util_cpu_caps.has_sse4_1)
89 intrinsic = "llvm.x86.sse41.pminsb";
90 if(type.width == 16 && !type.sign && util_cpu_caps.has_sse4_1)
91 intrinsic = "llvm.x86.sse41.pminuw";
92 if(type.width == 16 && type.sign && util_cpu_caps.has_sse2)
93 intrinsic = "llvm.x86.sse2.pmins.w";
94 if(type.width == 32 && !type.sign && util_cpu_caps.has_sse4_1)
95 intrinsic = "llvm.x86.sse41.pminud";
96 if(type.width == 32 && type.sign && util_cpu_caps.has_sse4_1)
97 intrinsic = "llvm.x86.sse41.pminsd";
98 }
99 }
100
101 if(intrinsic)
102 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
103
104 cond = lp_build_cmp(bld, PIPE_FUNC_LESS, a, b);
105 return lp_build_select(bld, cond, a, b);
106 }
107
108
109 /**
110 * Generate max(a, b)
111 * No checks for special case values of a or b = 1 or 0 are done.
112 */
113 static LLVMValueRef
114 lp_build_max_simple(struct lp_build_context *bld,
115 LLVMValueRef a,
116 LLVMValueRef b)
117 {
118 const struct lp_type type = bld->type;
119 const char *intrinsic = NULL;
120 LLVMValueRef cond;
121
122 /* TODO: optimize the constant case */
123
124 if(type.width * type.length == 128) {
125 if(type.floating) {
126 if(type.width == 32 && util_cpu_caps.has_sse)
127 intrinsic = "llvm.x86.sse.max.ps";
128 if(type.width == 64 && util_cpu_caps.has_sse2)
129 intrinsic = "llvm.x86.sse2.max.pd";
130 }
131 else {
132 if(type.width == 8 && !type.sign && util_cpu_caps.has_sse2)
133 intrinsic = "llvm.x86.sse2.pmaxu.b";
134 if(type.width == 8 && type.sign && util_cpu_caps.has_sse4_1)
135 intrinsic = "llvm.x86.sse41.pmaxsb";
136 if(type.width == 16 && !type.sign && util_cpu_caps.has_sse4_1)
137 intrinsic = "llvm.x86.sse41.pmaxuw";
138 if(type.width == 16 && type.sign && util_cpu_caps.has_sse2)
139 intrinsic = "llvm.x86.sse2.pmaxs.w";
140 if(type.width == 32 && !type.sign && util_cpu_caps.has_sse4_1)
141 intrinsic = "llvm.x86.sse41.pmaxud";
142 if(type.width == 32 && type.sign && util_cpu_caps.has_sse4_1)
143 intrinsic = "llvm.x86.sse41.pmaxsd";
144 }
145 }
146
147 if(intrinsic)
148 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
149
150 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, b);
151 return lp_build_select(bld, cond, a, b);
152 }
153
154
155 /**
156 * Generate 1 - a, or ~a depending on bld->type.
157 */
158 LLVMValueRef
159 lp_build_comp(struct lp_build_context *bld,
160 LLVMValueRef a)
161 {
162 const struct lp_type type = bld->type;
163
164 if(a == bld->one)
165 return bld->zero;
166 if(a == bld->zero)
167 return bld->one;
168
169 if(type.norm && !type.floating && !type.fixed && !type.sign) {
170 if(LLVMIsConstant(a))
171 return LLVMConstNot(a);
172 else
173 return LLVMBuildNot(bld->builder, a, "");
174 }
175
176 if(LLVMIsConstant(a))
177 return LLVMConstSub(bld->one, a);
178 else
179 return LLVMBuildSub(bld->builder, bld->one, a, "");
180 }
181
182
183 /**
184 * Generate a + b
185 */
186 LLVMValueRef
187 lp_build_add(struct lp_build_context *bld,
188 LLVMValueRef a,
189 LLVMValueRef b)
190 {
191 const struct lp_type type = bld->type;
192 LLVMValueRef res;
193
194 if(a == bld->zero)
195 return b;
196 if(b == bld->zero)
197 return a;
198 if(a == bld->undef || b == bld->undef)
199 return bld->undef;
200
201 if(bld->type.norm) {
202 const char *intrinsic = NULL;
203
204 if(a == bld->one || b == bld->one)
205 return bld->one;
206
207 if(util_cpu_caps.has_sse2 &&
208 type.width * type.length == 128 &&
209 !type.floating && !type.fixed) {
210 if(type.width == 8)
211 intrinsic = type.sign ? "llvm.x86.sse2.padds.b" : "llvm.x86.sse2.paddus.b";
212 if(type.width == 16)
213 intrinsic = type.sign ? "llvm.x86.sse2.padds.w" : "llvm.x86.sse2.paddus.w";
214 }
215
216 if(intrinsic)
217 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
218 }
219
220 if(LLVMIsConstant(a) && LLVMIsConstant(b))
221 res = LLVMConstAdd(a, b);
222 else
223 res = LLVMBuildAdd(bld->builder, a, b, "");
224
225 /* clamp to ceiling of 1.0 */
226 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
227 res = lp_build_min_simple(bld, res, bld->one);
228
229 /* XXX clamp to floor of -1 or 0??? */
230
231 return res;
232 }
233
234
235 /** Return the sum of the elements of a */
236 LLVMValueRef
237 lp_build_sum_vector(struct lp_build_context *bld,
238 LLVMValueRef a)
239 {
240 const struct lp_type type = bld->type;
241 LLVMValueRef index, res;
242 unsigned i;
243
244 if (a == bld->zero)
245 return bld->zero;
246 if (a == bld->undef)
247 return bld->undef;
248 assert(type.length > 1);
249
250 assert(!bld->type.norm);
251
252 index = LLVMConstInt(LLVMInt32Type(), 0, 0);
253 res = LLVMBuildExtractElement(bld->builder, a, index, "");
254
255 for (i = 1; i < type.length; i++) {
256 index = LLVMConstInt(LLVMInt32Type(), i, 0);
257 res = LLVMBuildAdd(bld->builder, res,
258 LLVMBuildExtractElement(bld->builder, a, index, ""),
259 "");
260 }
261
262 return res;
263 }
264
265
266 /**
267 * Generate a - b
268 */
269 LLVMValueRef
270 lp_build_sub(struct lp_build_context *bld,
271 LLVMValueRef a,
272 LLVMValueRef b)
273 {
274 const struct lp_type type = bld->type;
275 LLVMValueRef res;
276
277 if(b == bld->zero)
278 return a;
279 if(a == bld->undef || b == bld->undef)
280 return bld->undef;
281 if(a == b)
282 return bld->zero;
283
284 if(bld->type.norm) {
285 const char *intrinsic = NULL;
286
287 if(b == bld->one)
288 return bld->zero;
289
290 if(util_cpu_caps.has_sse2 &&
291 type.width * type.length == 128 &&
292 !type.floating && !type.fixed) {
293 if(type.width == 8)
294 intrinsic = type.sign ? "llvm.x86.sse2.psubs.b" : "llvm.x86.sse2.psubus.b";
295 if(type.width == 16)
296 intrinsic = type.sign ? "llvm.x86.sse2.psubs.w" : "llvm.x86.sse2.psubus.w";
297 }
298
299 if(intrinsic)
300 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
301 }
302
303 if(LLVMIsConstant(a) && LLVMIsConstant(b))
304 res = LLVMConstSub(a, b);
305 else
306 res = LLVMBuildSub(bld->builder, a, b, "");
307
308 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
309 res = lp_build_max_simple(bld, res, bld->zero);
310
311 return res;
312 }
313
314
315 /**
316 * Normalized 8bit multiplication.
317 *
318 * - alpha plus one
319 *
320 * makes the following approximation to the division (Sree)
321 *
322 * a*b/255 ~= (a*(b + 1)) >> 256
323 *
324 * which is the fastest method that satisfies the following OpenGL criteria
325 *
326 * 0*0 = 0 and 255*255 = 255
327 *
328 * - geometric series
329 *
330 * takes the geometric series approximation to the division
331 *
332 * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
333 *
334 * in this case just the first two terms to fit in 16bit arithmetic
335 *
336 * t/255 ~= (t + (t >> 8)) >> 8
337 *
338 * note that just by itself it doesn't satisfies the OpenGL criteria, as
339 * 255*255 = 254, so the special case b = 255 must be accounted or roundoff
340 * must be used
341 *
342 * - geometric series plus rounding
343 *
344 * when using a geometric series division instead of truncating the result
345 * use roundoff in the approximation (Jim Blinn)
346 *
347 * t/255 ~= (t + (t >> 8) + 0x80) >> 8
348 *
349 * achieving the exact results
350 *
351 * @sa Alvy Ray Smith, Image Compositing Fundamentals, Tech Memo 4, Aug 15, 1995,
352 * ftp://ftp.alvyray.com/Acrobat/4_Comp.pdf
353 * @sa Michael Herf, The "double blend trick", May 2000,
354 * http://www.stereopsis.com/doubleblend.html
355 */
356 static LLVMValueRef
357 lp_build_mul_u8n(LLVMBuilderRef builder,
358 struct lp_type i16_type,
359 LLVMValueRef a, LLVMValueRef b)
360 {
361 LLVMValueRef c8;
362 LLVMValueRef ab;
363
364 c8 = lp_build_const_int_vec(i16_type, 8);
365
366 #if 0
367
368 /* a*b/255 ~= (a*(b + 1)) >> 256 */
369 b = LLVMBuildAdd(builder, b, lp_build_const_int_vec(i16_type, 1), "");
370 ab = LLVMBuildMul(builder, a, b, "");
371
372 #else
373
374 /* ab/255 ~= (ab + (ab >> 8) + 0x80) >> 8 */
375 ab = LLVMBuildMul(builder, a, b, "");
376 ab = LLVMBuildAdd(builder, ab, LLVMBuildLShr(builder, ab, c8, ""), "");
377 ab = LLVMBuildAdd(builder, ab, lp_build_const_int_vec(i16_type, 0x80), "");
378
379 #endif
380
381 ab = LLVMBuildLShr(builder, ab, c8, "");
382
383 return ab;
384 }
385
386
387 /**
388 * Generate a * b
389 */
390 LLVMValueRef
391 lp_build_mul(struct lp_build_context *bld,
392 LLVMValueRef a,
393 LLVMValueRef b)
394 {
395 const struct lp_type type = bld->type;
396 LLVMValueRef shift;
397 LLVMValueRef res;
398
399 if(a == bld->zero)
400 return bld->zero;
401 if(a == bld->one)
402 return b;
403 if(b == bld->zero)
404 return bld->zero;
405 if(b == bld->one)
406 return a;
407 if(a == bld->undef || b == bld->undef)
408 return bld->undef;
409
410 if(!type.floating && !type.fixed && type.norm) {
411 if(type.width == 8) {
412 struct lp_type i16_type = lp_wider_type(type);
413 LLVMValueRef al, ah, bl, bh, abl, abh, ab;
414
415 lp_build_unpack2(bld->builder, type, i16_type, a, &al, &ah);
416 lp_build_unpack2(bld->builder, type, i16_type, b, &bl, &bh);
417
418 /* PMULLW, PSRLW, PADDW */
419 abl = lp_build_mul_u8n(bld->builder, i16_type, al, bl);
420 abh = lp_build_mul_u8n(bld->builder, i16_type, ah, bh);
421
422 ab = lp_build_pack2(bld->builder, i16_type, type, abl, abh);
423
424 return ab;
425 }
426
427 /* FIXME */
428 assert(0);
429 }
430
431 if(type.fixed)
432 shift = lp_build_const_int_vec(type, type.width/2);
433 else
434 shift = NULL;
435
436 if(LLVMIsConstant(a) && LLVMIsConstant(b)) {
437 res = LLVMConstMul(a, b);
438 if(shift) {
439 if(type.sign)
440 res = LLVMConstAShr(res, shift);
441 else
442 res = LLVMConstLShr(res, shift);
443 }
444 }
445 else {
446 res = LLVMBuildMul(bld->builder, a, b, "");
447 if(shift) {
448 if(type.sign)
449 res = LLVMBuildAShr(bld->builder, res, shift, "");
450 else
451 res = LLVMBuildLShr(bld->builder, res, shift, "");
452 }
453 }
454
455 return res;
456 }
457
458
459 /**
460 * Small vector x scale multiplication optimization.
461 */
462 LLVMValueRef
463 lp_build_mul_imm(struct lp_build_context *bld,
464 LLVMValueRef a,
465 int b)
466 {
467 LLVMValueRef factor;
468
469 if(b == 0)
470 return bld->zero;
471
472 if(b == 1)
473 return a;
474
475 if(b == -1)
476 return LLVMBuildNeg(bld->builder, a, "");
477
478 if(b == 2 && bld->type.floating)
479 return lp_build_add(bld, a, a);
480
481 if(util_is_pot(b)) {
482 unsigned shift = ffs(b) - 1;
483
484 if(bld->type.floating) {
485 #if 0
486 /*
487 * Power of two multiplication by directly manipulating the mantissa.
488 *
489 * XXX: This might not be always faster, it will introduce a small error
490 * for multiplication by zero, and it will produce wrong results
491 * for Inf and NaN.
492 */
493 unsigned mantissa = lp_mantissa(bld->type);
494 factor = lp_build_const_int_vec(bld->type, (unsigned long long)shift << mantissa);
495 a = LLVMBuildBitCast(bld->builder, a, lp_build_int_vec_type(bld->type), "");
496 a = LLVMBuildAdd(bld->builder, a, factor, "");
497 a = LLVMBuildBitCast(bld->builder, a, lp_build_vec_type(bld->type), "");
498 return a;
499 #endif
500 }
501 else {
502 factor = lp_build_const_vec(bld->type, shift);
503 return LLVMBuildShl(bld->builder, a, factor, "");
504 }
505 }
506
507 factor = lp_build_const_vec(bld->type, (double)b);
508 return lp_build_mul(bld, a, factor);
509 }
510
511
512 /**
513 * Generate a / b
514 */
515 LLVMValueRef
516 lp_build_div(struct lp_build_context *bld,
517 LLVMValueRef a,
518 LLVMValueRef b)
519 {
520 const struct lp_type type = bld->type;
521
522 if(a == bld->zero)
523 return bld->zero;
524 if(a == bld->one)
525 return lp_build_rcp(bld, b);
526 if(b == bld->zero)
527 return bld->undef;
528 if(b == bld->one)
529 return a;
530 if(a == bld->undef || b == bld->undef)
531 return bld->undef;
532
533 if(LLVMIsConstant(a) && LLVMIsConstant(b))
534 return LLVMConstFDiv(a, b);
535
536 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
537 return lp_build_mul(bld, a, lp_build_rcp(bld, b));
538
539 return LLVMBuildFDiv(bld->builder, a, b, "");
540 }
541
542
543 /**
544 * Linear interpolation.
545 *
546 * This also works for integer values with a few caveats.
547 *
548 * @sa http://www.stereopsis.com/doubleblend.html
549 */
550 LLVMValueRef
551 lp_build_lerp(struct lp_build_context *bld,
552 LLVMValueRef x,
553 LLVMValueRef v0,
554 LLVMValueRef v1)
555 {
556 LLVMValueRef delta;
557 LLVMValueRef res;
558
559 delta = lp_build_sub(bld, v1, v0);
560
561 res = lp_build_mul(bld, x, delta);
562
563 res = lp_build_add(bld, v0, res);
564
565 if(bld->type.fixed)
566 /* XXX: This step is necessary for lerping 8bit colors stored on 16bits,
567 * but it will be wrong for other uses. Basically we need a more
568 * powerful lp_type, capable of further distinguishing the values
569 * interpretation from the value storage. */
570 res = LLVMBuildAnd(bld->builder, res, lp_build_const_int_vec(bld->type, (1 << bld->type.width/2) - 1), "");
571
572 return res;
573 }
574
575
576 LLVMValueRef
577 lp_build_lerp_2d(struct lp_build_context *bld,
578 LLVMValueRef x,
579 LLVMValueRef y,
580 LLVMValueRef v00,
581 LLVMValueRef v01,
582 LLVMValueRef v10,
583 LLVMValueRef v11)
584 {
585 LLVMValueRef v0 = lp_build_lerp(bld, x, v00, v01);
586 LLVMValueRef v1 = lp_build_lerp(bld, x, v10, v11);
587 return lp_build_lerp(bld, y, v0, v1);
588 }
589
590
591 /**
592 * Generate min(a, b)
593 * Do checks for special cases.
594 */
595 LLVMValueRef
596 lp_build_min(struct lp_build_context *bld,
597 LLVMValueRef a,
598 LLVMValueRef b)
599 {
600 if(a == bld->undef || b == bld->undef)
601 return bld->undef;
602
603 if(a == b)
604 return a;
605
606 if(bld->type.norm) {
607 if(a == bld->zero || b == bld->zero)
608 return bld->zero;
609 if(a == bld->one)
610 return b;
611 if(b == bld->one)
612 return a;
613 }
614
615 return lp_build_min_simple(bld, a, b);
616 }
617
618
619 /**
620 * Generate max(a, b)
621 * Do checks for special cases.
622 */
623 LLVMValueRef
624 lp_build_max(struct lp_build_context *bld,
625 LLVMValueRef a,
626 LLVMValueRef b)
627 {
628 if(a == bld->undef || b == bld->undef)
629 return bld->undef;
630
631 if(a == b)
632 return a;
633
634 if(bld->type.norm) {
635 if(a == bld->one || b == bld->one)
636 return bld->one;
637 if(a == bld->zero)
638 return b;
639 if(b == bld->zero)
640 return a;
641 }
642
643 return lp_build_max_simple(bld, a, b);
644 }
645
646
647 /**
648 * Generate clamp(a, min, max)
649 * Do checks for special cases.
650 */
651 LLVMValueRef
652 lp_build_clamp(struct lp_build_context *bld,
653 LLVMValueRef a,
654 LLVMValueRef min,
655 LLVMValueRef max)
656 {
657 a = lp_build_min(bld, a, max);
658 a = lp_build_max(bld, a, min);
659 return a;
660 }
661
662
663 /**
664 * Generate abs(a)
665 */
666 LLVMValueRef
667 lp_build_abs(struct lp_build_context *bld,
668 LLVMValueRef a)
669 {
670 const struct lp_type type = bld->type;
671 LLVMTypeRef vec_type = lp_build_vec_type(type);
672
673 if(!type.sign)
674 return a;
675
676 if(type.floating) {
677 /* Mask out the sign bit */
678 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
679 unsigned long long absMask = ~(1ULL << (type.width - 1));
680 LLVMValueRef mask = lp_build_const_int_vec(type, ((unsigned long long) absMask));
681 a = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
682 a = LLVMBuildAnd(bld->builder, a, mask, "");
683 a = LLVMBuildBitCast(bld->builder, a, vec_type, "");
684 return a;
685 }
686
687 if(type.width*type.length == 128 && util_cpu_caps.has_ssse3) {
688 switch(type.width) {
689 case 8:
690 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.b.128", vec_type, a);
691 case 16:
692 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.w.128", vec_type, a);
693 case 32:
694 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.d.128", vec_type, a);
695 }
696 }
697
698 return lp_build_max(bld, a, LLVMBuildNeg(bld->builder, a, ""));
699 }
700
701
702 LLVMValueRef
703 lp_build_negate(struct lp_build_context *bld,
704 LLVMValueRef a)
705 {
706 return LLVMBuildNeg(bld->builder, a, "");
707 }
708
709
710 /** Return -1, 0 or +1 depending on the sign of a */
711 LLVMValueRef
712 lp_build_sgn(struct lp_build_context *bld,
713 LLVMValueRef a)
714 {
715 const struct lp_type type = bld->type;
716 LLVMValueRef cond;
717 LLVMValueRef res;
718
719 /* Handle non-zero case */
720 if(!type.sign) {
721 /* if not zero then sign must be positive */
722 res = bld->one;
723 }
724 else if(type.floating) {
725 LLVMTypeRef vec_type;
726 LLVMTypeRef int_type;
727 LLVMValueRef mask;
728 LLVMValueRef sign;
729 LLVMValueRef one;
730 unsigned long long maskBit = (unsigned long long)1 << (type.width - 1);
731
732 int_type = lp_build_int_vec_type(type);
733 vec_type = lp_build_vec_type(type);
734 mask = lp_build_const_int_vec(type, maskBit);
735
736 /* Take the sign bit and add it to 1 constant */
737 sign = LLVMBuildBitCast(bld->builder, a, int_type, "");
738 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
739 one = LLVMConstBitCast(bld->one, int_type);
740 res = LLVMBuildOr(bld->builder, sign, one, "");
741 res = LLVMBuildBitCast(bld->builder, res, vec_type, "");
742 }
743 else
744 {
745 LLVMValueRef minus_one = lp_build_const_vec(type, -1.0);
746 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, bld->zero);
747 res = lp_build_select(bld, cond, bld->one, minus_one);
748 }
749
750 /* Handle zero */
751 cond = lp_build_cmp(bld, PIPE_FUNC_EQUAL, a, bld->zero);
752 res = lp_build_select(bld, cond, bld->zero, res);
753
754 return res;
755 }
756
757
758 /**
759 * Set the sign of float vector 'a' according to 'sign'.
760 * If sign==0, return abs(a).
761 * If sign==1, return -abs(a);
762 * Other values for sign produce undefined results.
763 */
764 LLVMValueRef
765 lp_build_set_sign(struct lp_build_context *bld,
766 LLVMValueRef a, LLVMValueRef sign)
767 {
768 const struct lp_type type = bld->type;
769 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
770 LLVMTypeRef vec_type = lp_build_vec_type(type);
771 LLVMValueRef shift = lp_build_const_int_vec(type, type.width - 1);
772 LLVMValueRef mask = lp_build_const_int_vec(type,
773 ~((unsigned long long) 1 << (type.width - 1)));
774 LLVMValueRef val, res;
775
776 assert(type.floating);
777
778 /* val = reinterpret_cast<int>(a) */
779 val = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
780 /* val = val & mask */
781 val = LLVMBuildAnd(bld->builder, val, mask, "");
782 /* sign = sign << shift */
783 sign = LLVMBuildShl(bld->builder, sign, shift, "");
784 /* res = val | sign */
785 res = LLVMBuildOr(bld->builder, val, sign, "");
786 /* res = reinterpret_cast<float>(res) */
787 res = LLVMBuildBitCast(bld->builder, res, vec_type, "");
788
789 return res;
790 }
791
792
793 /**
794 * Convert vector of (or scalar) int to vector of (or scalar) float.
795 */
796 LLVMValueRef
797 lp_build_int_to_float(struct lp_build_context *bld,
798 LLVMValueRef a)
799 {
800 const struct lp_type type = bld->type;
801 LLVMTypeRef vec_type = lp_build_vec_type(type);
802
803 assert(type.floating);
804
805 return LLVMBuildSIToFP(bld->builder, a, vec_type, "");
806 }
807
808
809
810 enum lp_build_round_sse41_mode
811 {
812 LP_BUILD_ROUND_SSE41_NEAREST = 0,
813 LP_BUILD_ROUND_SSE41_FLOOR = 1,
814 LP_BUILD_ROUND_SSE41_CEIL = 2,
815 LP_BUILD_ROUND_SSE41_TRUNCATE = 3
816 };
817
818
819 static INLINE LLVMValueRef
820 lp_build_round_sse41(struct lp_build_context *bld,
821 LLVMValueRef a,
822 enum lp_build_round_sse41_mode mode)
823 {
824 const struct lp_type type = bld->type;
825 LLVMTypeRef vec_type = lp_build_vec_type(type);
826 const char *intrinsic;
827
828 assert(type.floating);
829 assert(type.width*type.length == 128);
830 assert(lp_check_value(type, a));
831 assert(util_cpu_caps.has_sse4_1);
832
833 switch(type.width) {
834 case 32:
835 intrinsic = "llvm.x86.sse41.round.ps";
836 break;
837 case 64:
838 intrinsic = "llvm.x86.sse41.round.pd";
839 break;
840 default:
841 assert(0);
842 return bld->undef;
843 }
844
845 return lp_build_intrinsic_binary(bld->builder, intrinsic, vec_type, a,
846 LLVMConstInt(LLVMInt32Type(), mode, 0));
847 }
848
849
850 LLVMValueRef
851 lp_build_trunc(struct lp_build_context *bld,
852 LLVMValueRef a)
853 {
854 const struct lp_type type = bld->type;
855
856 assert(type.floating);
857 assert(lp_check_value(type, a));
858
859 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128)
860 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_TRUNCATE);
861 else {
862 LLVMTypeRef vec_type = lp_build_vec_type(type);
863 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
864 LLVMValueRef res;
865 res = LLVMBuildFPToSI(bld->builder, a, int_vec_type, "");
866 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
867 return res;
868 }
869 }
870
871
872 LLVMValueRef
873 lp_build_round(struct lp_build_context *bld,
874 LLVMValueRef a)
875 {
876 const struct lp_type type = bld->type;
877
878 assert(type.floating);
879 assert(lp_check_value(type, a));
880
881 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128)
882 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
883 else {
884 LLVMTypeRef vec_type = lp_build_vec_type(type);
885 LLVMValueRef res;
886 res = lp_build_iround(bld, a);
887 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
888 return res;
889 }
890 }
891
892
893 LLVMValueRef
894 lp_build_floor(struct lp_build_context *bld,
895 LLVMValueRef a)
896 {
897 const struct lp_type type = bld->type;
898
899 assert(type.floating);
900 assert(lp_check_value(type, a));
901
902 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128)
903 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
904 else {
905 LLVMTypeRef vec_type = lp_build_vec_type(type);
906 LLVMValueRef res;
907 res = lp_build_ifloor(bld, a);
908 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
909 return res;
910 }
911 }
912
913
914 LLVMValueRef
915 lp_build_ceil(struct lp_build_context *bld,
916 LLVMValueRef a)
917 {
918 const struct lp_type type = bld->type;
919
920 assert(type.floating);
921 assert(lp_check_value(type, a));
922
923 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128)
924 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_CEIL);
925 else {
926 LLVMTypeRef vec_type = lp_build_vec_type(type);
927 LLVMValueRef res;
928 res = lp_build_iceil(bld, a);
929 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
930 return res;
931 }
932 }
933
934
935 /**
936 * Return fractional part of 'a' computed as a - floor(f)
937 * Typically used in texture coord arithmetic.
938 */
939 LLVMValueRef
940 lp_build_fract(struct lp_build_context *bld,
941 LLVMValueRef a)
942 {
943 assert(bld->type.floating);
944 return lp_build_sub(bld, a, lp_build_floor(bld, a));
945 }
946
947
948 /**
949 * Convert to integer, through whichever rounding method that's fastest,
950 * typically truncating toward zero.
951 */
952 LLVMValueRef
953 lp_build_itrunc(struct lp_build_context *bld,
954 LLVMValueRef a)
955 {
956 const struct lp_type type = bld->type;
957 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
958
959 assert(type.floating);
960 assert(lp_check_value(type, a));
961
962 return LLVMBuildFPToSI(bld->builder, a, int_vec_type, "");
963 }
964
965
966 /**
967 * Convert float[] to int[] with round().
968 */
969 LLVMValueRef
970 lp_build_iround(struct lp_build_context *bld,
971 LLVMValueRef a)
972 {
973 const struct lp_type type = bld->type;
974 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
975 LLVMValueRef res;
976
977 assert(type.floating);
978
979 assert(lp_check_value(type, a));
980
981 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128) {
982 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
983 }
984 else {
985 LLVMTypeRef vec_type = lp_build_vec_type(type);
986 LLVMValueRef mask = lp_build_const_int_vec(type, (unsigned long long)1 << (type.width - 1));
987 LLVMValueRef sign;
988 LLVMValueRef half;
989
990 /* get sign bit */
991 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
992 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
993
994 /* sign * 0.5 */
995 half = lp_build_const_vec(type, 0.5);
996 half = LLVMBuildBitCast(bld->builder, half, int_vec_type, "");
997 half = LLVMBuildOr(bld->builder, sign, half, "");
998 half = LLVMBuildBitCast(bld->builder, half, vec_type, "");
999
1000 res = LLVMBuildAdd(bld->builder, a, half, "");
1001 }
1002
1003 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
1004
1005 return res;
1006 }
1007
1008
1009 /**
1010 * Convert float[] to int[] with floor().
1011 */
1012 LLVMValueRef
1013 lp_build_ifloor(struct lp_build_context *bld,
1014 LLVMValueRef a)
1015 {
1016 const struct lp_type type = bld->type;
1017 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1018 LLVMValueRef res;
1019
1020 assert(type.floating);
1021 assert(lp_check_value(type, a));
1022
1023 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128) {
1024 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
1025 }
1026 else {
1027 /* Take the sign bit and add it to 1 constant */
1028 LLVMTypeRef vec_type = lp_build_vec_type(type);
1029 unsigned mantissa = lp_mantissa(type);
1030 LLVMValueRef mask = lp_build_const_int_vec(type, (unsigned long long)1 << (type.width - 1));
1031 LLVMValueRef sign;
1032 LLVMValueRef offset;
1033
1034 /* sign = a < 0 ? ~0 : 0 */
1035 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
1036 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
1037 sign = LLVMBuildAShr(bld->builder, sign, lp_build_const_int_vec(type, type.width - 1), "");
1038 lp_build_name(sign, "floor.sign");
1039
1040 /* offset = -0.99999(9)f */
1041 offset = lp_build_const_vec(type, -(double)(((unsigned long long)1 << mantissa) - 1)/((unsigned long long)1 << mantissa));
1042 offset = LLVMConstBitCast(offset, int_vec_type);
1043
1044 /* offset = a < 0 ? -0.99999(9)f : 0.0f */
1045 offset = LLVMBuildAnd(bld->builder, offset, sign, "");
1046 offset = LLVMBuildBitCast(bld->builder, offset, vec_type, "");
1047 lp_build_name(offset, "floor.offset");
1048
1049 res = LLVMBuildAdd(bld->builder, a, offset, "");
1050 lp_build_name(res, "floor.res");
1051 }
1052
1053 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
1054 lp_build_name(res, "floor");
1055
1056 return res;
1057 }
1058
1059
1060 LLVMValueRef
1061 lp_build_iceil(struct lp_build_context *bld,
1062 LLVMValueRef a)
1063 {
1064 const struct lp_type type = bld->type;
1065 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1066 LLVMValueRef res;
1067
1068 assert(type.floating);
1069 assert(lp_check_value(type, a));
1070
1071 if (util_cpu_caps.has_sse4_1 && type.width*type.length == 128) {
1072 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_CEIL);
1073 }
1074 else {
1075 /* TODO: mimic lp_build_ifloor() here */
1076 assert(0);
1077 res = bld->undef;
1078 }
1079
1080 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
1081
1082 return res;
1083 }
1084
1085
1086 LLVMValueRef
1087 lp_build_sqrt(struct lp_build_context *bld,
1088 LLVMValueRef a)
1089 {
1090 const struct lp_type type = bld->type;
1091 LLVMTypeRef vec_type = lp_build_vec_type(type);
1092 char intrinsic[32];
1093
1094 /* TODO: optimize the constant case */
1095 /* TODO: optimize the constant case */
1096
1097 assert(type.floating);
1098 util_snprintf(intrinsic, sizeof intrinsic, "llvm.sqrt.v%uf%u", type.length, type.width);
1099
1100 return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
1101 }
1102
1103
1104 LLVMValueRef
1105 lp_build_rcp(struct lp_build_context *bld,
1106 LLVMValueRef a)
1107 {
1108 const struct lp_type type = bld->type;
1109
1110 if(a == bld->zero)
1111 return bld->undef;
1112 if(a == bld->one)
1113 return bld->one;
1114 if(a == bld->undef)
1115 return bld->undef;
1116
1117 assert(type.floating);
1118
1119 if(LLVMIsConstant(a))
1120 return LLVMConstFDiv(bld->one, a);
1121
1122 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4) {
1123 /*
1124 * XXX: Added precision is not always necessary, so only enable this
1125 * when we have a better system in place to track minimum precision.
1126 */
1127
1128 #if 0
1129 /*
1130 * Do one Newton-Raphson step to improve precision:
1131 *
1132 * x1 = (2 - a * rcp(a)) * rcp(a)
1133 */
1134
1135 LLVMValueRef two = lp_build_const_vec(bld->type, 2.0);
1136 LLVMValueRef rcp_a;
1137 LLVMValueRef res;
1138
1139 rcp_a = lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rcp.ps", lp_build_vec_type(type), a);
1140
1141 res = LLVMBuildMul(bld->builder, a, rcp_a, "");
1142 res = LLVMBuildSub(bld->builder, two, res, "");
1143 res = LLVMBuildMul(bld->builder, res, rcp_a, "");
1144
1145 return rcp_a;
1146 #else
1147 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rcp.ps", lp_build_vec_type(type), a);
1148 #endif
1149 }
1150
1151 return LLVMBuildFDiv(bld->builder, bld->one, a, "");
1152 }
1153
1154
1155 /**
1156 * Generate 1/sqrt(a)
1157 */
1158 LLVMValueRef
1159 lp_build_rsqrt(struct lp_build_context *bld,
1160 LLVMValueRef a)
1161 {
1162 const struct lp_type type = bld->type;
1163
1164 assert(type.floating);
1165
1166 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
1167 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rsqrt.ps", lp_build_vec_type(type), a);
1168
1169 return lp_build_rcp(bld, lp_build_sqrt(bld, a));
1170 }
1171
1172
1173 static inline LLVMValueRef
1174 lp_build_const_v4si(unsigned long value)
1175 {
1176 LLVMValueRef element = LLVMConstInt(LLVMInt32Type(), value, 0);
1177 LLVMValueRef elements[4] = { element, element, element, element };
1178 return LLVMConstVector(elements, 4);
1179 }
1180
1181 static inline LLVMValueRef
1182 lp_build_const_v4sf(float value)
1183 {
1184 LLVMValueRef element = LLVMConstReal(LLVMFloatType(), value);
1185 LLVMValueRef elements[4] = { element, element, element, element };
1186 return LLVMConstVector(elements, 4);
1187 }
1188
1189
1190 /**
1191 * Generate sin(a) using SSE2
1192 */
1193 LLVMValueRef
1194 lp_build_sin(struct lp_build_context *bld,
1195 LLVMValueRef a)
1196 {
1197 struct lp_type int_type = lp_int_type(bld->type);
1198 LLVMBuilderRef b = bld->builder;
1199 LLVMTypeRef v4sf = LLVMVectorType(LLVMFloatType(), 4);
1200 LLVMTypeRef v4si = LLVMVectorType(LLVMInt32Type(), 4);
1201
1202 /*
1203 * take the absolute value,
1204 * x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
1205 */
1206
1207 LLVMValueRef inv_sig_mask = lp_build_const_v4si(~0x80000000);
1208 LLVMValueRef a_v4si = LLVMBuildBitCast(b, a, v4si, "a_v4si");
1209
1210 LLVMValueRef absi = LLVMBuildAnd(b, a_v4si, inv_sig_mask, "absi");
1211 LLVMValueRef x_abs = LLVMBuildBitCast(b, absi, v4sf, "x_abs");
1212
1213 /*
1214 * extract the sign bit (upper one)
1215 * sign_bit = _mm_and_ps(sign_bit, *(v4sf*)_ps_sign_mask);
1216 */
1217 LLVMValueRef sig_mask = lp_build_const_v4si(0x80000000);
1218 LLVMValueRef sign_bit_i = LLVMBuildAnd(b, a_v4si, sig_mask, "sign_bit_i");
1219
1220 /*
1221 * scale by 4/Pi
1222 * y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
1223 */
1224
1225 LLVMValueRef FOPi = lp_build_const_v4sf(1.27323954473516);
1226 LLVMValueRef scale_y = LLVMBuildMul(b, x_abs, FOPi, "scale_y");
1227
1228 /*
1229 * store the integer part of y in mm0
1230 * emm2 = _mm_cvttps_epi32(y);
1231 */
1232
1233 LLVMValueRef emm2_i = LLVMBuildFPToSI(b, scale_y, v4si, "emm2_i");
1234
1235 /*
1236 * j=(j+1) & (~1) (see the cephes sources)
1237 * emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
1238 */
1239
1240 LLVMValueRef all_one = lp_build_const_v4si(1);
1241 LLVMValueRef emm2_add = LLVMBuildAdd(b, emm2_i, all_one, "emm2_add");
1242 /*
1243 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
1244 */
1245 LLVMValueRef inv_one = lp_build_const_v4si(~1);
1246 LLVMValueRef emm2_and = LLVMBuildAnd(b, emm2_add, inv_one, "emm2_and");
1247
1248 /*
1249 * y = _mm_cvtepi32_ps(emm2);
1250 */
1251 LLVMValueRef y_2 = LLVMBuildSIToFP(b, emm2_and, v4sf, "y_2");
1252
1253 /* get the swap sign flag
1254 * emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
1255 */
1256 LLVMValueRef pi32_4 = lp_build_const_v4si(4);
1257 LLVMValueRef emm0_and = LLVMBuildAnd(b, emm2_add, pi32_4, "emm0_and");
1258
1259 /*
1260 * emm2 = _mm_slli_epi32(emm0, 29);
1261 */
1262 LLVMValueRef const_29 = lp_build_const_v4si(29);
1263 LLVMValueRef swap_sign_bit = LLVMBuildShl(b, emm0_and, const_29, "swap_sign_bit");
1264
1265 /*
1266 * get the polynom selection mask
1267 * there is one polynom for 0 <= x <= Pi/4
1268 * and another one for Pi/4<x<=Pi/2
1269 * Both branches will be computed.
1270 *
1271 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
1272 * emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
1273 */
1274
1275 LLVMValueRef pi32_2 = lp_build_const_v4si(2);
1276 LLVMValueRef emm2_3 = LLVMBuildAnd(b, emm2_and, pi32_2, "emm2_3");
1277 LLVMValueRef poly_mask = lp_build_compare(b, int_type, PIPE_FUNC_EQUAL,
1278 emm2_3, lp_build_const_v4si(0));
1279 /*
1280 * sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
1281 */
1282 LLVMValueRef sign_bit_1 = LLVMBuildXor(b, sign_bit_i, swap_sign_bit, "sign_bit");
1283
1284 /*
1285 * _PS_CONST(minus_cephes_DP1, -0.78515625);
1286 * _PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
1287 * _PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
1288 */
1289 LLVMValueRef DP1 = lp_build_const_v4sf(-0.78515625);
1290 LLVMValueRef DP2 = lp_build_const_v4sf(-2.4187564849853515625e-4);
1291 LLVMValueRef DP3 = lp_build_const_v4sf(-3.77489497744594108e-8);
1292
1293 /*
1294 * The magic pass: "Extended precision modular arithmetic"
1295 * x = ((x - y * DP1) - y * DP2) - y * DP3;
1296 * xmm1 = _mm_mul_ps(y, xmm1);
1297 * xmm2 = _mm_mul_ps(y, xmm2);
1298 * xmm3 = _mm_mul_ps(y, xmm3);
1299 */
1300 LLVMValueRef xmm1 = LLVMBuildMul(b, y_2, DP1, "xmm1");
1301 LLVMValueRef xmm2 = LLVMBuildMul(b, y_2, DP2, "xmm2");
1302 LLVMValueRef xmm3 = LLVMBuildMul(b, y_2, DP3, "xmm3");
1303
1304 /*
1305 * x = _mm_add_ps(x, xmm1);
1306 * x = _mm_add_ps(x, xmm2);
1307 * x = _mm_add_ps(x, xmm3);
1308 */
1309
1310 LLVMValueRef x_1 = LLVMBuildAdd(b, x_abs, xmm1, "x_1");
1311 LLVMValueRef x_2 = LLVMBuildAdd(b, x_1, xmm2, "x_2");
1312 LLVMValueRef x_3 = LLVMBuildAdd(b, x_2, xmm3, "x_3");
1313
1314 /*
1315 * Evaluate the first polynom (0 <= x <= Pi/4)
1316 *
1317 * z = _mm_mul_ps(x,x);
1318 */
1319 LLVMValueRef z = LLVMBuildMul(b, x_3, x_3, "z");
1320
1321 /*
1322 * _PS_CONST(coscof_p0, 2.443315711809948E-005);
1323 * _PS_CONST(coscof_p1, -1.388731625493765E-003);
1324 * _PS_CONST(coscof_p2, 4.166664568298827E-002);
1325 */
1326 LLVMValueRef coscof_p0 = lp_build_const_v4sf(2.443315711809948E-005);
1327 LLVMValueRef coscof_p1 = lp_build_const_v4sf(-1.388731625493765E-003);
1328 LLVMValueRef coscof_p2 = lp_build_const_v4sf(4.166664568298827E-002);
1329
1330 /*
1331 * y = *(v4sf*)_ps_coscof_p0;
1332 * y = _mm_mul_ps(y, z);
1333 */
1334 LLVMValueRef y_3 = LLVMBuildMul(b, z, coscof_p0, "y_3");
1335 LLVMValueRef y_4 = LLVMBuildAdd(b, y_3, coscof_p1, "y_4");
1336 LLVMValueRef y_5 = LLVMBuildMul(b, y_4, z, "y_5");
1337 LLVMValueRef y_6 = LLVMBuildAdd(b, y_5, coscof_p2, "y_6");
1338 LLVMValueRef y_7 = LLVMBuildMul(b, y_6, z, "y_7");
1339 LLVMValueRef y_8 = LLVMBuildMul(b, y_7, z, "y_8");
1340
1341
1342 /*
1343 * tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
1344 * y = _mm_sub_ps(y, tmp);
1345 * y = _mm_add_ps(y, *(v4sf*)_ps_1);
1346 */
1347 LLVMValueRef half = lp_build_const_v4sf(0.5);
1348 LLVMValueRef tmp = LLVMBuildMul(b, z, half, "tmp");
1349 LLVMValueRef y_9 = LLVMBuildSub(b, y_8, tmp, "y_8");
1350 LLVMValueRef one = lp_build_const_v4sf(1.0);
1351 LLVMValueRef y_10 = LLVMBuildAdd(b, y_9, one, "y_9");
1352
1353 /*
1354 * _PS_CONST(sincof_p0, -1.9515295891E-4);
1355 * _PS_CONST(sincof_p1, 8.3321608736E-3);
1356 * _PS_CONST(sincof_p2, -1.6666654611E-1);
1357 */
1358 LLVMValueRef sincof_p0 = lp_build_const_v4sf(-1.9515295891E-4);
1359 LLVMValueRef sincof_p1 = lp_build_const_v4sf(8.3321608736E-3);
1360 LLVMValueRef sincof_p2 = lp_build_const_v4sf(-1.6666654611E-1);
1361
1362 /*
1363 * Evaluate the second polynom (Pi/4 <= x <= 0)
1364 *
1365 * y2 = *(v4sf*)_ps_sincof_p0;
1366 * y2 = _mm_mul_ps(y2, z);
1367 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
1368 * y2 = _mm_mul_ps(y2, z);
1369 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
1370 * y2 = _mm_mul_ps(y2, z);
1371 * y2 = _mm_mul_ps(y2, x);
1372 * y2 = _mm_add_ps(y2, x);
1373 */
1374
1375 LLVMValueRef y2_3 = LLVMBuildMul(b, z, sincof_p0, "y2_3");
1376 LLVMValueRef y2_4 = LLVMBuildAdd(b, y2_3, sincof_p1, "y2_4");
1377 LLVMValueRef y2_5 = LLVMBuildMul(b, y2_4, z, "y2_5");
1378 LLVMValueRef y2_6 = LLVMBuildAdd(b, y2_5, sincof_p2, "y2_6");
1379 LLVMValueRef y2_7 = LLVMBuildMul(b, y2_6, z, "y2_7");
1380 LLVMValueRef y2_8 = LLVMBuildMul(b, y2_7, x_3, "y2_8");
1381 LLVMValueRef y2_9 = LLVMBuildAdd(b, y2_8, x_3, "y2_9");
1382
1383 /*
1384 * select the correct result from the two polynoms
1385 * xmm3 = poly_mask;
1386 * y2 = _mm_and_ps(xmm3, y2); //, xmm3);
1387 * y = _mm_andnot_ps(xmm3, y);
1388 * y = _mm_add_ps(y,y2);
1389 */
1390 LLVMValueRef y2_i = LLVMBuildBitCast(b, y2_9, v4si, "y2_i");
1391 LLVMValueRef y_i = LLVMBuildBitCast(b, y_10, v4si, "y_i");
1392 LLVMValueRef y2_and = LLVMBuildAnd(b, y2_i, poly_mask, "y2_and");
1393 LLVMValueRef inv = lp_build_const_v4si(~0);
1394 LLVMValueRef poly_mask_inv = LLVMBuildXor(b, poly_mask, inv, "poly_mask_inv");
1395 LLVMValueRef y_and = LLVMBuildAnd(b, y_i, poly_mask_inv, "y_and");
1396 LLVMValueRef y_combine = LLVMBuildAdd(b, y_and, y2_and, "y_combine");
1397
1398 /*
1399 * update the sign
1400 * y = _mm_xor_ps(y, sign_bit);
1401 */
1402 LLVMValueRef y_sign = LLVMBuildXor(b, y_combine, sign_bit_1, "y_sin");
1403 LLVMValueRef y_result = LLVMBuildBitCast(b, y_sign, v4sf, "y_result");
1404 return y_result;
1405 }
1406
1407
1408 /**
1409 * Generate cos(a) using SSE2
1410 */
1411 LLVMValueRef
1412 lp_build_cos(struct lp_build_context *bld,
1413 LLVMValueRef a)
1414 {
1415 struct lp_type int_type = lp_int_type(bld->type);
1416 LLVMBuilderRef b = bld->builder;
1417 LLVMTypeRef v4sf = LLVMVectorType(LLVMFloatType(), 4);
1418 LLVMTypeRef v4si = LLVMVectorType(LLVMInt32Type(), 4);
1419
1420 /*
1421 * take the absolute value,
1422 * x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
1423 */
1424
1425 LLVMValueRef inv_sig_mask = lp_build_const_v4si(~0x80000000);
1426 LLVMValueRef a_v4si = LLVMBuildBitCast(b, a, v4si, "a_v4si");
1427
1428 LLVMValueRef absi = LLVMBuildAnd(b, a_v4si, inv_sig_mask, "absi");
1429 LLVMValueRef x_abs = LLVMBuildBitCast(b, absi, v4sf, "x_abs");
1430
1431 /*
1432 * scale by 4/Pi
1433 * y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
1434 */
1435
1436 LLVMValueRef FOPi = lp_build_const_v4sf(1.27323954473516);
1437 LLVMValueRef scale_y = LLVMBuildMul(b, x_abs, FOPi, "scale_y");
1438
1439 /*
1440 * store the integer part of y in mm0
1441 * emm2 = _mm_cvttps_epi32(y);
1442 */
1443
1444 LLVMValueRef emm2_i = LLVMBuildFPToSI(b, scale_y, v4si, "emm2_i");
1445
1446 /*
1447 * j=(j+1) & (~1) (see the cephes sources)
1448 * emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
1449 */
1450
1451 LLVMValueRef all_one = lp_build_const_v4si(1);
1452 LLVMValueRef emm2_add = LLVMBuildAdd(b, emm2_i, all_one, "emm2_add");
1453 /*
1454 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
1455 */
1456 LLVMValueRef inv_one = lp_build_const_v4si(~1);
1457 LLVMValueRef emm2_and = LLVMBuildAnd(b, emm2_add, inv_one, "emm2_and");
1458
1459 /*
1460 * y = _mm_cvtepi32_ps(emm2);
1461 */
1462 LLVMValueRef y_2 = LLVMBuildSIToFP(b, emm2_and, v4sf, "y_2");
1463
1464
1465 /*
1466 * emm2 = _mm_sub_epi32(emm2, *(v4si*)_pi32_2);
1467 */
1468 LLVMValueRef const_2 = lp_build_const_v4si(2);
1469 LLVMValueRef emm2_2 = LLVMBuildSub(b, emm2_and, const_2, "emm2_2");
1470
1471
1472 /* get the swap sign flag
1473 * emm0 = _mm_andnot_si128(emm2, *(v4si*)_pi32_4);
1474 */
1475 LLVMValueRef inv = lp_build_const_v4si(~0);
1476 LLVMValueRef emm0_not = LLVMBuildXor(b, emm2_2, inv, "emm0_not");
1477 LLVMValueRef pi32_4 = lp_build_const_v4si(4);
1478 LLVMValueRef emm0_and = LLVMBuildAnd(b, emm0_not, pi32_4, "emm0_and");
1479
1480 /*
1481 * emm2 = _mm_slli_epi32(emm0, 29);
1482 */
1483 LLVMValueRef const_29 = lp_build_const_v4si(29);
1484 LLVMValueRef sign_bit = LLVMBuildShl(b, emm0_and, const_29, "sign_bit");
1485
1486 /*
1487 * get the polynom selection mask
1488 * there is one polynom for 0 <= x <= Pi/4
1489 * and another one for Pi/4<x<=Pi/2
1490 * Both branches will be computed.
1491 *
1492 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
1493 * emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
1494 */
1495
1496 LLVMValueRef pi32_2 = lp_build_const_v4si(2);
1497 LLVMValueRef emm2_3 = LLVMBuildAnd(b, emm2_2, pi32_2, "emm2_3");
1498 LLVMValueRef poly_mask = lp_build_compare(b, int_type, PIPE_FUNC_EQUAL,
1499 emm2_3, lp_build_const_v4si(0));
1500
1501 /*
1502 * _PS_CONST(minus_cephes_DP1, -0.78515625);
1503 * _PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
1504 * _PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
1505 */
1506 LLVMValueRef DP1 = lp_build_const_v4sf(-0.78515625);
1507 LLVMValueRef DP2 = lp_build_const_v4sf(-2.4187564849853515625e-4);
1508 LLVMValueRef DP3 = lp_build_const_v4sf(-3.77489497744594108e-8);
1509
1510 /*
1511 * The magic pass: "Extended precision modular arithmetic"
1512 * x = ((x - y * DP1) - y * DP2) - y * DP3;
1513 * xmm1 = _mm_mul_ps(y, xmm1);
1514 * xmm2 = _mm_mul_ps(y, xmm2);
1515 * xmm3 = _mm_mul_ps(y, xmm3);
1516 */
1517 LLVMValueRef xmm1 = LLVMBuildMul(b, y_2, DP1, "xmm1");
1518 LLVMValueRef xmm2 = LLVMBuildMul(b, y_2, DP2, "xmm2");
1519 LLVMValueRef xmm3 = LLVMBuildMul(b, y_2, DP3, "xmm3");
1520
1521 /*
1522 * x = _mm_add_ps(x, xmm1);
1523 * x = _mm_add_ps(x, xmm2);
1524 * x = _mm_add_ps(x, xmm3);
1525 */
1526
1527 LLVMValueRef x_1 = LLVMBuildAdd(b, x_abs, xmm1, "x_1");
1528 LLVMValueRef x_2 = LLVMBuildAdd(b, x_1, xmm2, "x_2");
1529 LLVMValueRef x_3 = LLVMBuildAdd(b, x_2, xmm3, "x_3");
1530
1531 /*
1532 * Evaluate the first polynom (0 <= x <= Pi/4)
1533 *
1534 * z = _mm_mul_ps(x,x);
1535 */
1536 LLVMValueRef z = LLVMBuildMul(b, x_3, x_3, "z");
1537
1538 /*
1539 * _PS_CONST(coscof_p0, 2.443315711809948E-005);
1540 * _PS_CONST(coscof_p1, -1.388731625493765E-003);
1541 * _PS_CONST(coscof_p2, 4.166664568298827E-002);
1542 */
1543 LLVMValueRef coscof_p0 = lp_build_const_v4sf(2.443315711809948E-005);
1544 LLVMValueRef coscof_p1 = lp_build_const_v4sf(-1.388731625493765E-003);
1545 LLVMValueRef coscof_p2 = lp_build_const_v4sf(4.166664568298827E-002);
1546
1547 /*
1548 * y = *(v4sf*)_ps_coscof_p0;
1549 * y = _mm_mul_ps(y, z);
1550 */
1551 LLVMValueRef y_3 = LLVMBuildMul(b, z, coscof_p0, "y_3");
1552 LLVMValueRef y_4 = LLVMBuildAdd(b, y_3, coscof_p1, "y_4");
1553 LLVMValueRef y_5 = LLVMBuildMul(b, y_4, z, "y_5");
1554 LLVMValueRef y_6 = LLVMBuildAdd(b, y_5, coscof_p2, "y_6");
1555 LLVMValueRef y_7 = LLVMBuildMul(b, y_6, z, "y_7");
1556 LLVMValueRef y_8 = LLVMBuildMul(b, y_7, z, "y_8");
1557
1558
1559 /*
1560 * tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
1561 * y = _mm_sub_ps(y, tmp);
1562 * y = _mm_add_ps(y, *(v4sf*)_ps_1);
1563 */
1564 LLVMValueRef half = lp_build_const_v4sf(0.5);
1565 LLVMValueRef tmp = LLVMBuildMul(b, z, half, "tmp");
1566 LLVMValueRef y_9 = LLVMBuildSub(b, y_8, tmp, "y_8");
1567 LLVMValueRef one = lp_build_const_v4sf(1.0);
1568 LLVMValueRef y_10 = LLVMBuildAdd(b, y_9, one, "y_9");
1569
1570 /*
1571 * _PS_CONST(sincof_p0, -1.9515295891E-4);
1572 * _PS_CONST(sincof_p1, 8.3321608736E-3);
1573 * _PS_CONST(sincof_p2, -1.6666654611E-1);
1574 */
1575 LLVMValueRef sincof_p0 = lp_build_const_v4sf(-1.9515295891E-4);
1576 LLVMValueRef sincof_p1 = lp_build_const_v4sf(8.3321608736E-3);
1577 LLVMValueRef sincof_p2 = lp_build_const_v4sf(-1.6666654611E-1);
1578
1579 /*
1580 * Evaluate the second polynom (Pi/4 <= x <= 0)
1581 *
1582 * y2 = *(v4sf*)_ps_sincof_p0;
1583 * y2 = _mm_mul_ps(y2, z);
1584 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
1585 * y2 = _mm_mul_ps(y2, z);
1586 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
1587 * y2 = _mm_mul_ps(y2, z);
1588 * y2 = _mm_mul_ps(y2, x);
1589 * y2 = _mm_add_ps(y2, x);
1590 */
1591
1592 LLVMValueRef y2_3 = LLVMBuildMul(b, z, sincof_p0, "y2_3");
1593 LLVMValueRef y2_4 = LLVMBuildAdd(b, y2_3, sincof_p1, "y2_4");
1594 LLVMValueRef y2_5 = LLVMBuildMul(b, y2_4, z, "y2_5");
1595 LLVMValueRef y2_6 = LLVMBuildAdd(b, y2_5, sincof_p2, "y2_6");
1596 LLVMValueRef y2_7 = LLVMBuildMul(b, y2_6, z, "y2_7");
1597 LLVMValueRef y2_8 = LLVMBuildMul(b, y2_7, x_3, "y2_8");
1598 LLVMValueRef y2_9 = LLVMBuildAdd(b, y2_8, x_3, "y2_9");
1599
1600 /*
1601 * select the correct result from the two polynoms
1602 * xmm3 = poly_mask;
1603 * y2 = _mm_and_ps(xmm3, y2); //, xmm3);
1604 * y = _mm_andnot_ps(xmm3, y);
1605 * y = _mm_add_ps(y,y2);
1606 */
1607 LLVMValueRef y2_i = LLVMBuildBitCast(b, y2_9, v4si, "y2_i");
1608 LLVMValueRef y_i = LLVMBuildBitCast(b, y_10, v4si, "y_i");
1609 LLVMValueRef y2_and = LLVMBuildAnd(b, y2_i, poly_mask, "y2_and");
1610 LLVMValueRef poly_mask_inv = LLVMBuildXor(b, poly_mask, inv, "poly_mask_inv");
1611 LLVMValueRef y_and = LLVMBuildAnd(b, y_i, poly_mask_inv, "y_and");
1612 LLVMValueRef y_combine = LLVMBuildAdd(b, y_and, y2_and, "y_combine");
1613
1614 /*
1615 * update the sign
1616 * y = _mm_xor_ps(y, sign_bit);
1617 */
1618 LLVMValueRef y_sign = LLVMBuildXor(b, y_combine, sign_bit, "y_sin");
1619 LLVMValueRef y_result = LLVMBuildBitCast(b, y_sign, v4sf, "y_result");
1620 return y_result;
1621 }
1622
1623
1624 /**
1625 * Generate pow(x, y)
1626 */
1627 LLVMValueRef
1628 lp_build_pow(struct lp_build_context *bld,
1629 LLVMValueRef x,
1630 LLVMValueRef y)
1631 {
1632 /* TODO: optimize the constant case */
1633 if(LLVMIsConstant(x) && LLVMIsConstant(y))
1634 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1635 __FUNCTION__);
1636
1637 return lp_build_exp2(bld, lp_build_mul(bld, lp_build_log2(bld, x), y));
1638 }
1639
1640
1641 /**
1642 * Generate exp(x)
1643 */
1644 LLVMValueRef
1645 lp_build_exp(struct lp_build_context *bld,
1646 LLVMValueRef x)
1647 {
1648 /* log2(e) = 1/log(2) */
1649 LLVMValueRef log2e = lp_build_const_vec(bld->type, 1.4426950408889634);
1650
1651 return lp_build_mul(bld, log2e, lp_build_exp2(bld, x));
1652 }
1653
1654
1655 /**
1656 * Generate log(x)
1657 */
1658 LLVMValueRef
1659 lp_build_log(struct lp_build_context *bld,
1660 LLVMValueRef x)
1661 {
1662 /* log(2) */
1663 LLVMValueRef log2 = lp_build_const_vec(bld->type, 0.69314718055994529);
1664
1665 return lp_build_mul(bld, log2, lp_build_exp2(bld, x));
1666 }
1667
1668
1669 #define EXP_POLY_DEGREE 3
1670 #define LOG_POLY_DEGREE 5
1671
1672
1673 /**
1674 * Generate polynomial.
1675 * Ex: coeffs[0] + x * coeffs[1] + x^2 * coeffs[2].
1676 */
1677 static LLVMValueRef
1678 lp_build_polynomial(struct lp_build_context *bld,
1679 LLVMValueRef x,
1680 const double *coeffs,
1681 unsigned num_coeffs)
1682 {
1683 const struct lp_type type = bld->type;
1684 LLVMValueRef res = NULL;
1685 unsigned i;
1686
1687 /* TODO: optimize the constant case */
1688 if(LLVMIsConstant(x))
1689 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1690 __FUNCTION__);
1691
1692 for (i = num_coeffs; i--; ) {
1693 LLVMValueRef coeff;
1694
1695 coeff = lp_build_const_vec(type, coeffs[i]);
1696
1697 if(res)
1698 res = lp_build_add(bld, coeff, lp_build_mul(bld, x, res));
1699 else
1700 res = coeff;
1701 }
1702
1703 if(res)
1704 return res;
1705 else
1706 return bld->undef;
1707 }
1708
1709
1710 /**
1711 * Minimax polynomial fit of 2**x, in range [0, 1[
1712 */
1713 const double lp_build_exp2_polynomial[] = {
1714 #if EXP_POLY_DEGREE == 5
1715 0.999999999690134838155,
1716 0.583974334321735217258,
1717 0.164553105719676828492,
1718 0.0292811063701710962255,
1719 0.00354944426657875141846,
1720 0.000296253726543423377365
1721 #elif EXP_POLY_DEGREE == 4
1722 1.00000001502262084505,
1723 0.563586057338685991394,
1724 0.150436017652442413623,
1725 0.0243220604213317927308,
1726 0.0025359088446580436489
1727 #elif EXP_POLY_DEGREE == 3
1728 0.999925218562710312959,
1729 0.695833540494823811697,
1730 0.226067155427249155588,
1731 0.0780245226406372992967
1732 #elif EXP_POLY_DEGREE == 2
1733 1.00172476321474503578,
1734 0.657636275736077639316,
1735 0.33718943461968720704
1736 #else
1737 #error
1738 #endif
1739 };
1740
1741
1742 void
1743 lp_build_exp2_approx(struct lp_build_context *bld,
1744 LLVMValueRef x,
1745 LLVMValueRef *p_exp2_int_part,
1746 LLVMValueRef *p_frac_part,
1747 LLVMValueRef *p_exp2)
1748 {
1749 const struct lp_type type = bld->type;
1750 LLVMTypeRef vec_type = lp_build_vec_type(type);
1751 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1752 LLVMValueRef ipart = NULL;
1753 LLVMValueRef fpart = NULL;
1754 LLVMValueRef expipart = NULL;
1755 LLVMValueRef expfpart = NULL;
1756 LLVMValueRef res = NULL;
1757
1758 if(p_exp2_int_part || p_frac_part || p_exp2) {
1759 /* TODO: optimize the constant case */
1760 if(LLVMIsConstant(x))
1761 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1762 __FUNCTION__);
1763
1764 assert(type.floating && type.width == 32);
1765
1766 x = lp_build_min(bld, x, lp_build_const_vec(type, 129.0));
1767 x = lp_build_max(bld, x, lp_build_const_vec(type, -126.99999));
1768
1769 /* ipart = floor(x) */
1770 ipart = lp_build_floor(bld, x);
1771
1772 /* fpart = x - ipart */
1773 fpart = LLVMBuildSub(bld->builder, x, ipart, "");
1774 }
1775
1776 if(p_exp2_int_part || p_exp2) {
1777 /* expipart = (float) (1 << ipart) */
1778 ipart = LLVMBuildFPToSI(bld->builder, ipart, int_vec_type, "");
1779 expipart = LLVMBuildAdd(bld->builder, ipart, lp_build_const_int_vec(type, 127), "");
1780 expipart = LLVMBuildShl(bld->builder, expipart, lp_build_const_int_vec(type, 23), "");
1781 expipart = LLVMBuildBitCast(bld->builder, expipart, vec_type, "");
1782 }
1783
1784 if(p_exp2) {
1785 expfpart = lp_build_polynomial(bld, fpart, lp_build_exp2_polynomial,
1786 Elements(lp_build_exp2_polynomial));
1787
1788 res = LLVMBuildMul(bld->builder, expipart, expfpart, "");
1789 }
1790
1791 if(p_exp2_int_part)
1792 *p_exp2_int_part = expipart;
1793
1794 if(p_frac_part)
1795 *p_frac_part = fpart;
1796
1797 if(p_exp2)
1798 *p_exp2 = res;
1799 }
1800
1801
1802 LLVMValueRef
1803 lp_build_exp2(struct lp_build_context *bld,
1804 LLVMValueRef x)
1805 {
1806 LLVMValueRef res;
1807 lp_build_exp2_approx(bld, x, NULL, NULL, &res);
1808 return res;
1809 }
1810
1811
1812 /**
1813 * Minimax polynomial fit of log2(x)/(x - 1), for x in range [1, 2[
1814 * These coefficients can be generate with
1815 * http://www.boost.org/doc/libs/1_36_0/libs/math/doc/sf_and_dist/html/math_toolkit/toolkit/internals2/minimax.html
1816 */
1817 const double lp_build_log2_polynomial[] = {
1818 #if LOG_POLY_DEGREE == 6
1819 3.11578814719469302614,
1820 -3.32419399085241980044,
1821 2.59883907202499966007,
1822 -1.23152682416275988241,
1823 0.318212422185251071475,
1824 -0.0344359067839062357313
1825 #elif LOG_POLY_DEGREE == 5
1826 2.8882704548164776201,
1827 -2.52074962577807006663,
1828 1.48116647521213171641,
1829 -0.465725644288844778798,
1830 0.0596515482674574969533
1831 #elif LOG_POLY_DEGREE == 4
1832 2.61761038894603480148,
1833 -1.75647175389045657003,
1834 0.688243882994381274313,
1835 -0.107254423828329604454
1836 #elif LOG_POLY_DEGREE == 3
1837 2.28330284476918490682,
1838 -1.04913055217340124191,
1839 0.204446009836232697516
1840 #else
1841 #error
1842 #endif
1843 };
1844
1845
1846 /**
1847 * See http://www.devmaster.net/forums/showthread.php?p=43580
1848 */
1849 void
1850 lp_build_log2_approx(struct lp_build_context *bld,
1851 LLVMValueRef x,
1852 LLVMValueRef *p_exp,
1853 LLVMValueRef *p_floor_log2,
1854 LLVMValueRef *p_log2)
1855 {
1856 const struct lp_type type = bld->type;
1857 LLVMTypeRef vec_type = lp_build_vec_type(type);
1858 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1859
1860 LLVMValueRef expmask = lp_build_const_int_vec(type, 0x7f800000);
1861 LLVMValueRef mantmask = lp_build_const_int_vec(type, 0x007fffff);
1862 LLVMValueRef one = LLVMConstBitCast(bld->one, int_vec_type);
1863
1864 LLVMValueRef i = NULL;
1865 LLVMValueRef exp = NULL;
1866 LLVMValueRef mant = NULL;
1867 LLVMValueRef logexp = NULL;
1868 LLVMValueRef logmant = NULL;
1869 LLVMValueRef res = NULL;
1870
1871 if(p_exp || p_floor_log2 || p_log2) {
1872 /* TODO: optimize the constant case */
1873 if(LLVMIsConstant(x))
1874 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1875 __FUNCTION__);
1876
1877 assert(type.floating && type.width == 32);
1878
1879 i = LLVMBuildBitCast(bld->builder, x, int_vec_type, "");
1880
1881 /* exp = (float) exponent(x) */
1882 exp = LLVMBuildAnd(bld->builder, i, expmask, "");
1883 }
1884
1885 if(p_floor_log2 || p_log2) {
1886 logexp = LLVMBuildLShr(bld->builder, exp, lp_build_const_int_vec(type, 23), "");
1887 logexp = LLVMBuildSub(bld->builder, logexp, lp_build_const_int_vec(type, 127), "");
1888 logexp = LLVMBuildSIToFP(bld->builder, logexp, vec_type, "");
1889 }
1890
1891 if(p_log2) {
1892 /* mant = (float) mantissa(x) */
1893 mant = LLVMBuildAnd(bld->builder, i, mantmask, "");
1894 mant = LLVMBuildOr(bld->builder, mant, one, "");
1895 mant = LLVMBuildBitCast(bld->builder, mant, vec_type, "");
1896
1897 logmant = lp_build_polynomial(bld, mant, lp_build_log2_polynomial,
1898 Elements(lp_build_log2_polynomial));
1899
1900 /* This effectively increases the polynomial degree by one, but ensures that log2(1) == 0*/
1901 logmant = LLVMBuildMul(bld->builder, logmant, LLVMBuildSub(bld->builder, mant, bld->one, ""), "");
1902
1903 res = LLVMBuildAdd(bld->builder, logmant, logexp, "");
1904 }
1905
1906 if(p_exp) {
1907 exp = LLVMBuildBitCast(bld->builder, exp, vec_type, "");
1908 *p_exp = exp;
1909 }
1910
1911 if(p_floor_log2)
1912 *p_floor_log2 = logexp;
1913
1914 if(p_log2)
1915 *p_log2 = res;
1916 }
1917
1918
1919 LLVMValueRef
1920 lp_build_log2(struct lp_build_context *bld,
1921 LLVMValueRef x)
1922 {
1923 LLVMValueRef res;
1924 lp_build_log2_approx(bld, x, NULL, NULL, &res);
1925 return res;
1926 }