Merge commit 'origin/master' into gallium-sw-api-2
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_arit.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper
32 *
33 * LLVM IR doesn't support all basic arithmetic operations we care about (most
34 * notably min/max and saturated operations), and it is often necessary to
35 * resort machine-specific intrinsics directly. The functions here hide all
36 * these implementation details from the other modules.
37 *
38 * We also do simple expressions simplification here. Reasons are:
39 * - it is very easy given we have all necessary information readily available
40 * - LLVM optimization passes fail to simplify several vector expressions
41 * - We often know value constraints which the optimization passes have no way
42 * of knowing, such as when source arguments are known to be in [0, 1] range.
43 *
44 * @author Jose Fonseca <jfonseca@vmware.com>
45 */
46
47
48 #include "util/u_memory.h"
49 #include "util/u_debug.h"
50 #include "util/u_math.h"
51 #include "util/u_string.h"
52 #include "util/u_cpu_detect.h"
53
54 #include "lp_bld_type.h"
55 #include "lp_bld_const.h"
56 #include "lp_bld_intr.h"
57 #include "lp_bld_logic.h"
58 #include "lp_bld_pack.h"
59 #include "lp_bld_debug.h"
60 #include "lp_bld_arit.h"
61
62
63 /**
64 * Generate min(a, b)
65 * No checks for special case values of a or b = 1 or 0 are done.
66 */
67 static LLVMValueRef
68 lp_build_min_simple(struct lp_build_context *bld,
69 LLVMValueRef a,
70 LLVMValueRef b)
71 {
72 const struct lp_type type = bld->type;
73 const char *intrinsic = NULL;
74 LLVMValueRef cond;
75
76 /* TODO: optimize the constant case */
77
78 if(type.width * type.length == 128) {
79 if(type.floating) {
80 if(type.width == 32 && util_cpu_caps.has_sse)
81 intrinsic = "llvm.x86.sse.min.ps";
82 if(type.width == 64 && util_cpu_caps.has_sse2)
83 intrinsic = "llvm.x86.sse2.min.pd";
84 }
85 else {
86 if(type.width == 8 && !type.sign && util_cpu_caps.has_sse2)
87 intrinsic = "llvm.x86.sse2.pminu.b";
88 if(type.width == 8 && type.sign && util_cpu_caps.has_sse4_1)
89 intrinsic = "llvm.x86.sse41.pminsb";
90 if(type.width == 16 && !type.sign && util_cpu_caps.has_sse4_1)
91 intrinsic = "llvm.x86.sse41.pminuw";
92 if(type.width == 16 && type.sign && util_cpu_caps.has_sse2)
93 intrinsic = "llvm.x86.sse2.pmins.w";
94 if(type.width == 32 && !type.sign && util_cpu_caps.has_sse4_1)
95 intrinsic = "llvm.x86.sse41.pminud";
96 if(type.width == 32 && type.sign && util_cpu_caps.has_sse4_1)
97 intrinsic = "llvm.x86.sse41.pminsd";
98 }
99 }
100
101 if(intrinsic)
102 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
103
104 cond = lp_build_cmp(bld, PIPE_FUNC_LESS, a, b);
105 return lp_build_select(bld, cond, a, b);
106 }
107
108
109 /**
110 * Generate max(a, b)
111 * No checks for special case values of a or b = 1 or 0 are done.
112 */
113 static LLVMValueRef
114 lp_build_max_simple(struct lp_build_context *bld,
115 LLVMValueRef a,
116 LLVMValueRef b)
117 {
118 const struct lp_type type = bld->type;
119 const char *intrinsic = NULL;
120 LLVMValueRef cond;
121
122 /* TODO: optimize the constant case */
123
124 if(type.width * type.length == 128) {
125 if(type.floating) {
126 if(type.width == 32 && util_cpu_caps.has_sse)
127 intrinsic = "llvm.x86.sse.max.ps";
128 if(type.width == 64 && util_cpu_caps.has_sse2)
129 intrinsic = "llvm.x86.sse2.max.pd";
130 }
131 else {
132 if(type.width == 8 && !type.sign && util_cpu_caps.has_sse2)
133 intrinsic = "llvm.x86.sse2.pmaxu.b";
134 if(type.width == 8 && type.sign && util_cpu_caps.has_sse4_1)
135 intrinsic = "llvm.x86.sse41.pmaxsb";
136 if(type.width == 16 && !type.sign && util_cpu_caps.has_sse4_1)
137 intrinsic = "llvm.x86.sse41.pmaxuw";
138 if(type.width == 16 && type.sign && util_cpu_caps.has_sse2)
139 intrinsic = "llvm.x86.sse2.pmaxs.w";
140 if(type.width == 32 && !type.sign && util_cpu_caps.has_sse4_1)
141 intrinsic = "llvm.x86.sse41.pmaxud";
142 if(type.width == 32 && type.sign && util_cpu_caps.has_sse4_1)
143 intrinsic = "llvm.x86.sse41.pmaxsd";
144 }
145 }
146
147 if(intrinsic)
148 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
149
150 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, b);
151 return lp_build_select(bld, cond, a, b);
152 }
153
154
155 /**
156 * Generate 1 - a, or ~a depending on bld->type.
157 */
158 LLVMValueRef
159 lp_build_comp(struct lp_build_context *bld,
160 LLVMValueRef a)
161 {
162 const struct lp_type type = bld->type;
163
164 if(a == bld->one)
165 return bld->zero;
166 if(a == bld->zero)
167 return bld->one;
168
169 if(type.norm && !type.floating && !type.fixed && !type.sign) {
170 if(LLVMIsConstant(a))
171 return LLVMConstNot(a);
172 else
173 return LLVMBuildNot(bld->builder, a, "");
174 }
175
176 if(LLVMIsConstant(a))
177 return LLVMConstSub(bld->one, a);
178 else
179 return LLVMBuildSub(bld->builder, bld->one, a, "");
180 }
181
182
183 /**
184 * Generate a + b
185 */
186 LLVMValueRef
187 lp_build_add(struct lp_build_context *bld,
188 LLVMValueRef a,
189 LLVMValueRef b)
190 {
191 const struct lp_type type = bld->type;
192 LLVMValueRef res;
193
194 if(a == bld->zero)
195 return b;
196 if(b == bld->zero)
197 return a;
198 if(a == bld->undef || b == bld->undef)
199 return bld->undef;
200
201 if(bld->type.norm) {
202 const char *intrinsic = NULL;
203
204 if(a == bld->one || b == bld->one)
205 return bld->one;
206
207 if(util_cpu_caps.has_sse2 &&
208 type.width * type.length == 128 &&
209 !type.floating && !type.fixed) {
210 if(type.width == 8)
211 intrinsic = type.sign ? "llvm.x86.sse2.padds.b" : "llvm.x86.sse2.paddus.b";
212 if(type.width == 16)
213 intrinsic = type.sign ? "llvm.x86.sse2.padds.w" : "llvm.x86.sse2.paddus.w";
214 }
215
216 if(intrinsic)
217 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
218 }
219
220 if(LLVMIsConstant(a) && LLVMIsConstant(b))
221 res = LLVMConstAdd(a, b);
222 else
223 res = LLVMBuildAdd(bld->builder, a, b, "");
224
225 /* clamp to ceiling of 1.0 */
226 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
227 res = lp_build_min_simple(bld, res, bld->one);
228
229 /* XXX clamp to floor of -1 or 0??? */
230
231 return res;
232 }
233
234
235 /**
236 * Generate a - b
237 */
238 LLVMValueRef
239 lp_build_sub(struct lp_build_context *bld,
240 LLVMValueRef a,
241 LLVMValueRef b)
242 {
243 const struct lp_type type = bld->type;
244 LLVMValueRef res;
245
246 if(b == bld->zero)
247 return a;
248 if(a == bld->undef || b == bld->undef)
249 return bld->undef;
250 if(a == b)
251 return bld->zero;
252
253 if(bld->type.norm) {
254 const char *intrinsic = NULL;
255
256 if(b == bld->one)
257 return bld->zero;
258
259 if(util_cpu_caps.has_sse2 &&
260 type.width * type.length == 128 &&
261 !type.floating && !type.fixed) {
262 if(type.width == 8)
263 intrinsic = type.sign ? "llvm.x86.sse2.psubs.b" : "llvm.x86.sse2.psubus.b";
264 if(type.width == 16)
265 intrinsic = type.sign ? "llvm.x86.sse2.psubs.w" : "llvm.x86.sse2.psubus.w";
266 }
267
268 if(intrinsic)
269 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
270 }
271
272 if(LLVMIsConstant(a) && LLVMIsConstant(b))
273 res = LLVMConstSub(a, b);
274 else
275 res = LLVMBuildSub(bld->builder, a, b, "");
276
277 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
278 res = lp_build_max_simple(bld, res, bld->zero);
279
280 return res;
281 }
282
283
284 /**
285 * Normalized 8bit multiplication.
286 *
287 * - alpha plus one
288 *
289 * makes the following approximation to the division (Sree)
290 *
291 * a*b/255 ~= (a*(b + 1)) >> 256
292 *
293 * which is the fastest method that satisfies the following OpenGL criteria
294 *
295 * 0*0 = 0 and 255*255 = 255
296 *
297 * - geometric series
298 *
299 * takes the geometric series approximation to the division
300 *
301 * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
302 *
303 * in this case just the first two terms to fit in 16bit arithmetic
304 *
305 * t/255 ~= (t + (t >> 8)) >> 8
306 *
307 * note that just by itself it doesn't satisfies the OpenGL criteria, as
308 * 255*255 = 254, so the special case b = 255 must be accounted or roundoff
309 * must be used
310 *
311 * - geometric series plus rounding
312 *
313 * when using a geometric series division instead of truncating the result
314 * use roundoff in the approximation (Jim Blinn)
315 *
316 * t/255 ~= (t + (t >> 8) + 0x80) >> 8
317 *
318 * achieving the exact results
319 *
320 * @sa Alvy Ray Smith, Image Compositing Fundamentals, Tech Memo 4, Aug 15, 1995,
321 * ftp://ftp.alvyray.com/Acrobat/4_Comp.pdf
322 * @sa Michael Herf, The "double blend trick", May 2000,
323 * http://www.stereopsis.com/doubleblend.html
324 */
325 static LLVMValueRef
326 lp_build_mul_u8n(LLVMBuilderRef builder,
327 struct lp_type i16_type,
328 LLVMValueRef a, LLVMValueRef b)
329 {
330 LLVMValueRef c8;
331 LLVMValueRef ab;
332
333 c8 = lp_build_int_const_scalar(i16_type, 8);
334
335 #if 0
336
337 /* a*b/255 ~= (a*(b + 1)) >> 256 */
338 b = LLVMBuildAdd(builder, b, lp_build_int_const_scalar(i16_type, 1), "");
339 ab = LLVMBuildMul(builder, a, b, "");
340
341 #else
342
343 /* ab/255 ~= (ab + (ab >> 8) + 0x80) >> 8 */
344 ab = LLVMBuildMul(builder, a, b, "");
345 ab = LLVMBuildAdd(builder, ab, LLVMBuildLShr(builder, ab, c8, ""), "");
346 ab = LLVMBuildAdd(builder, ab, lp_build_int_const_scalar(i16_type, 0x80), "");
347
348 #endif
349
350 ab = LLVMBuildLShr(builder, ab, c8, "");
351
352 return ab;
353 }
354
355
356 /**
357 * Generate a * b
358 */
359 LLVMValueRef
360 lp_build_mul(struct lp_build_context *bld,
361 LLVMValueRef a,
362 LLVMValueRef b)
363 {
364 const struct lp_type type = bld->type;
365 LLVMValueRef shift;
366 LLVMValueRef res;
367
368 if(a == bld->zero)
369 return bld->zero;
370 if(a == bld->one)
371 return b;
372 if(b == bld->zero)
373 return bld->zero;
374 if(b == bld->one)
375 return a;
376 if(a == bld->undef || b == bld->undef)
377 return bld->undef;
378
379 if(!type.floating && !type.fixed && type.norm) {
380 if(type.width == 8) {
381 struct lp_type i16_type = lp_wider_type(type);
382 LLVMValueRef al, ah, bl, bh, abl, abh, ab;
383
384 lp_build_unpack2(bld->builder, type, i16_type, a, &al, &ah);
385 lp_build_unpack2(bld->builder, type, i16_type, b, &bl, &bh);
386
387 /* PMULLW, PSRLW, PADDW */
388 abl = lp_build_mul_u8n(bld->builder, i16_type, al, bl);
389 abh = lp_build_mul_u8n(bld->builder, i16_type, ah, bh);
390
391 ab = lp_build_pack2(bld->builder, i16_type, type, abl, abh);
392
393 return ab;
394 }
395
396 /* FIXME */
397 assert(0);
398 }
399
400 if(type.fixed)
401 shift = lp_build_int_const_scalar(type, type.width/2);
402 else
403 shift = NULL;
404
405 if(LLVMIsConstant(a) && LLVMIsConstant(b)) {
406 res = LLVMConstMul(a, b);
407 if(shift) {
408 if(type.sign)
409 res = LLVMConstAShr(res, shift);
410 else
411 res = LLVMConstLShr(res, shift);
412 }
413 }
414 else {
415 res = LLVMBuildMul(bld->builder, a, b, "");
416 if(shift) {
417 if(type.sign)
418 res = LLVMBuildAShr(bld->builder, res, shift, "");
419 else
420 res = LLVMBuildLShr(bld->builder, res, shift, "");
421 }
422 }
423
424 return res;
425 }
426
427
428 /**
429 * Small vector x scale multiplication optimization.
430 */
431 LLVMValueRef
432 lp_build_mul_imm(struct lp_build_context *bld,
433 LLVMValueRef a,
434 int b)
435 {
436 LLVMValueRef factor;
437
438 if(b == 0)
439 return bld->zero;
440
441 if(b == 1)
442 return a;
443
444 if(b == -1)
445 return LLVMBuildNeg(bld->builder, a, "");
446
447 if(b == 2 && bld->type.floating)
448 return lp_build_add(bld, a, a);
449
450 if(util_is_pot(b)) {
451 unsigned shift = ffs(b) - 1;
452
453 if(bld->type.floating) {
454 #if 0
455 /*
456 * Power of two multiplication by directly manipulating the mantissa.
457 *
458 * XXX: This might not be always faster, it will introduce a small error
459 * for multiplication by zero, and it will produce wrong results
460 * for Inf and NaN.
461 */
462 unsigned mantissa = lp_mantissa(bld->type);
463 factor = lp_build_int_const_scalar(bld->type, (unsigned long long)shift << mantissa);
464 a = LLVMBuildBitCast(bld->builder, a, lp_build_int_vec_type(bld->type), "");
465 a = LLVMBuildAdd(bld->builder, a, factor, "");
466 a = LLVMBuildBitCast(bld->builder, a, lp_build_vec_type(bld->type), "");
467 return a;
468 #endif
469 }
470 else {
471 factor = lp_build_const_scalar(bld->type, shift);
472 return LLVMBuildShl(bld->builder, a, factor, "");
473 }
474 }
475
476 factor = lp_build_const_scalar(bld->type, (double)b);
477 return lp_build_mul(bld, a, factor);
478 }
479
480
481 /**
482 * Generate a / b
483 */
484 LLVMValueRef
485 lp_build_div(struct lp_build_context *bld,
486 LLVMValueRef a,
487 LLVMValueRef b)
488 {
489 const struct lp_type type = bld->type;
490
491 if(a == bld->zero)
492 return bld->zero;
493 if(a == bld->one)
494 return lp_build_rcp(bld, b);
495 if(b == bld->zero)
496 return bld->undef;
497 if(b == bld->one)
498 return a;
499 if(a == bld->undef || b == bld->undef)
500 return bld->undef;
501
502 if(LLVMIsConstant(a) && LLVMIsConstant(b))
503 return LLVMConstFDiv(a, b);
504
505 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
506 return lp_build_mul(bld, a, lp_build_rcp(bld, b));
507
508 return LLVMBuildFDiv(bld->builder, a, b, "");
509 }
510
511
512 /**
513 * Linear interpolation.
514 *
515 * This also works for integer values with a few caveats.
516 *
517 * @sa http://www.stereopsis.com/doubleblend.html
518 */
519 LLVMValueRef
520 lp_build_lerp(struct lp_build_context *bld,
521 LLVMValueRef x,
522 LLVMValueRef v0,
523 LLVMValueRef v1)
524 {
525 LLVMValueRef delta;
526 LLVMValueRef res;
527
528 delta = lp_build_sub(bld, v1, v0);
529
530 res = lp_build_mul(bld, x, delta);
531
532 res = lp_build_add(bld, v0, res);
533
534 if(bld->type.fixed)
535 /* XXX: This step is necessary for lerping 8bit colors stored on 16bits,
536 * but it will be wrong for other uses. Basically we need a more
537 * powerful lp_type, capable of further distinguishing the values
538 * interpretation from the value storage. */
539 res = LLVMBuildAnd(bld->builder, res, lp_build_int_const_scalar(bld->type, (1 << bld->type.width/2) - 1), "");
540
541 return res;
542 }
543
544
545 LLVMValueRef
546 lp_build_lerp_2d(struct lp_build_context *bld,
547 LLVMValueRef x,
548 LLVMValueRef y,
549 LLVMValueRef v00,
550 LLVMValueRef v01,
551 LLVMValueRef v10,
552 LLVMValueRef v11)
553 {
554 LLVMValueRef v0 = lp_build_lerp(bld, x, v00, v01);
555 LLVMValueRef v1 = lp_build_lerp(bld, x, v10, v11);
556 return lp_build_lerp(bld, y, v0, v1);
557 }
558
559
560 /**
561 * Generate min(a, b)
562 * Do checks for special cases.
563 */
564 LLVMValueRef
565 lp_build_min(struct lp_build_context *bld,
566 LLVMValueRef a,
567 LLVMValueRef b)
568 {
569 if(a == bld->undef || b == bld->undef)
570 return bld->undef;
571
572 if(a == b)
573 return a;
574
575 if(bld->type.norm) {
576 if(a == bld->zero || b == bld->zero)
577 return bld->zero;
578 if(a == bld->one)
579 return b;
580 if(b == bld->one)
581 return a;
582 }
583
584 return lp_build_min_simple(bld, a, b);
585 }
586
587
588 /**
589 * Generate max(a, b)
590 * Do checks for special cases.
591 */
592 LLVMValueRef
593 lp_build_max(struct lp_build_context *bld,
594 LLVMValueRef a,
595 LLVMValueRef b)
596 {
597 if(a == bld->undef || b == bld->undef)
598 return bld->undef;
599
600 if(a == b)
601 return a;
602
603 if(bld->type.norm) {
604 if(a == bld->one || b == bld->one)
605 return bld->one;
606 if(a == bld->zero)
607 return b;
608 if(b == bld->zero)
609 return a;
610 }
611
612 return lp_build_max_simple(bld, a, b);
613 }
614
615
616 /**
617 * Generate clamp(a, min, max)
618 * Do checks for special cases.
619 */
620 LLVMValueRef
621 lp_build_clamp(struct lp_build_context *bld,
622 LLVMValueRef a,
623 LLVMValueRef min,
624 LLVMValueRef max)
625 {
626 a = lp_build_min(bld, a, max);
627 a = lp_build_max(bld, a, min);
628 return a;
629 }
630
631
632 /**
633 * Generate abs(a)
634 */
635 LLVMValueRef
636 lp_build_abs(struct lp_build_context *bld,
637 LLVMValueRef a)
638 {
639 const struct lp_type type = bld->type;
640 LLVMTypeRef vec_type = lp_build_vec_type(type);
641
642 if(!type.sign)
643 return a;
644
645 if(type.floating) {
646 /* Mask out the sign bit */
647 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
648 unsigned long long absMask = ~(1ULL << (type.width - 1));
649 LLVMValueRef mask = lp_build_int_const_scalar(type, ((unsigned long long) absMask));
650 a = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
651 a = LLVMBuildAnd(bld->builder, a, mask, "");
652 a = LLVMBuildBitCast(bld->builder, a, vec_type, "");
653 return a;
654 }
655
656 if(type.width*type.length == 128 && util_cpu_caps.has_ssse3) {
657 switch(type.width) {
658 case 8:
659 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.b.128", vec_type, a);
660 case 16:
661 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.w.128", vec_type, a);
662 case 32:
663 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.d.128", vec_type, a);
664 }
665 }
666
667 return lp_build_max(bld, a, LLVMBuildNeg(bld->builder, a, ""));
668 }
669
670
671 LLVMValueRef
672 lp_build_negate(struct lp_build_context *bld,
673 LLVMValueRef a)
674 {
675 return LLVMBuildNeg(bld->builder, a, "");
676 }
677
678
679 LLVMValueRef
680 lp_build_sgn(struct lp_build_context *bld,
681 LLVMValueRef a)
682 {
683 const struct lp_type type = bld->type;
684 LLVMTypeRef vec_type = lp_build_vec_type(type);
685 LLVMValueRef cond;
686 LLVMValueRef res;
687
688 /* Handle non-zero case */
689 if(!type.sign) {
690 /* if not zero then sign must be positive */
691 res = bld->one;
692 }
693 else if(type.floating) {
694 /* Take the sign bit and add it to 1 constant */
695 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
696 LLVMValueRef mask = lp_build_int_const_scalar(type, (unsigned long long)1 << (type.width - 1));
697 LLVMValueRef sign;
698 LLVMValueRef one;
699 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
700 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
701 one = LLVMConstBitCast(bld->one, int_vec_type);
702 res = LLVMBuildOr(bld->builder, sign, one, "");
703 res = LLVMBuildBitCast(bld->builder, res, vec_type, "");
704 }
705 else
706 {
707 LLVMValueRef minus_one = lp_build_const_scalar(type, -1.0);
708 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, bld->zero);
709 res = lp_build_select(bld, cond, bld->one, minus_one);
710 }
711
712 /* Handle zero */
713 cond = lp_build_cmp(bld, PIPE_FUNC_EQUAL, a, bld->zero);
714 res = lp_build_select(bld, cond, bld->zero, bld->one);
715
716 return res;
717 }
718
719
720 /**
721 * Set the sign of float vector 'a' according to 'sign'.
722 * If sign==0, return abs(a).
723 * If sign==1, return -abs(a);
724 * Other values for sign produce undefined results.
725 */
726 LLVMValueRef
727 lp_build_set_sign(struct lp_build_context *bld,
728 LLVMValueRef a, LLVMValueRef sign)
729 {
730 const struct lp_type type = bld->type;
731 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
732 LLVMTypeRef vec_type = lp_build_vec_type(type);
733 LLVMValueRef shift = lp_build_int_const_scalar(type, type.width - 1);
734 LLVMValueRef mask = lp_build_int_const_scalar(type,
735 ~((unsigned long long) 1 << (type.width - 1)));
736 LLVMValueRef val, res;
737
738 assert(type.floating);
739
740 /* val = reinterpret_cast<int>(a) */
741 val = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
742 /* val = val & mask */
743 val = LLVMBuildAnd(bld->builder, val, mask, "");
744 /* sign = sign << shift */
745 sign = LLVMBuildShl(bld->builder, sign, shift, "");
746 /* res = val | sign */
747 res = LLVMBuildOr(bld->builder, val, sign, "");
748 /* res = reinterpret_cast<float>(res) */
749 res = LLVMBuildBitCast(bld->builder, res, vec_type, "");
750
751 return res;
752 }
753
754
755 /**
756 * Convert vector of int to vector of float.
757 */
758 LLVMValueRef
759 lp_build_int_to_float(struct lp_build_context *bld,
760 LLVMValueRef a)
761 {
762 const struct lp_type type = bld->type;
763
764 assert(type.floating);
765 /*assert(lp_check_value(type, a));*/
766
767 {
768 LLVMTypeRef vec_type = lp_build_vec_type(type);
769 /*LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);*/
770 LLVMValueRef res;
771 res = LLVMBuildSIToFP(bld->builder, a, vec_type, "");
772 return res;
773 }
774 }
775
776
777
778 enum lp_build_round_sse41_mode
779 {
780 LP_BUILD_ROUND_SSE41_NEAREST = 0,
781 LP_BUILD_ROUND_SSE41_FLOOR = 1,
782 LP_BUILD_ROUND_SSE41_CEIL = 2,
783 LP_BUILD_ROUND_SSE41_TRUNCATE = 3
784 };
785
786
787 static INLINE LLVMValueRef
788 lp_build_round_sse41(struct lp_build_context *bld,
789 LLVMValueRef a,
790 enum lp_build_round_sse41_mode mode)
791 {
792 const struct lp_type type = bld->type;
793 LLVMTypeRef vec_type = lp_build_vec_type(type);
794 const char *intrinsic;
795
796 assert(type.floating);
797 assert(type.width*type.length == 128);
798 assert(lp_check_value(type, a));
799 assert(util_cpu_caps.has_sse4_1);
800
801 switch(type.width) {
802 case 32:
803 intrinsic = "llvm.x86.sse41.round.ps";
804 break;
805 case 64:
806 intrinsic = "llvm.x86.sse41.round.pd";
807 break;
808 default:
809 assert(0);
810 return bld->undef;
811 }
812
813 return lp_build_intrinsic_binary(bld->builder, intrinsic, vec_type, a,
814 LLVMConstInt(LLVMInt32Type(), mode, 0));
815 }
816
817
818 LLVMValueRef
819 lp_build_trunc(struct lp_build_context *bld,
820 LLVMValueRef a)
821 {
822 const struct lp_type type = bld->type;
823
824 assert(type.floating);
825 assert(lp_check_value(type, a));
826
827 if(util_cpu_caps.has_sse4_1)
828 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_TRUNCATE);
829 else {
830 LLVMTypeRef vec_type = lp_build_vec_type(type);
831 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
832 LLVMValueRef res;
833 res = LLVMBuildFPToSI(bld->builder, a, int_vec_type, "");
834 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
835 return res;
836 }
837 }
838
839
840 LLVMValueRef
841 lp_build_round(struct lp_build_context *bld,
842 LLVMValueRef a)
843 {
844 const struct lp_type type = bld->type;
845
846 assert(type.floating);
847 assert(lp_check_value(type, a));
848
849 if(util_cpu_caps.has_sse4_1)
850 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
851 else {
852 LLVMTypeRef vec_type = lp_build_vec_type(type);
853 LLVMValueRef res;
854 res = lp_build_iround(bld, a);
855 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
856 return res;
857 }
858 }
859
860
861 LLVMValueRef
862 lp_build_floor(struct lp_build_context *bld,
863 LLVMValueRef a)
864 {
865 const struct lp_type type = bld->type;
866
867 assert(type.floating);
868
869 if(util_cpu_caps.has_sse4_1)
870 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
871 else {
872 LLVMTypeRef vec_type = lp_build_vec_type(type);
873 LLVMValueRef res;
874 res = lp_build_ifloor(bld, a);
875 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
876 return res;
877 }
878 }
879
880
881 LLVMValueRef
882 lp_build_ceil(struct lp_build_context *bld,
883 LLVMValueRef a)
884 {
885 const struct lp_type type = bld->type;
886
887 assert(type.floating);
888 assert(lp_check_value(type, a));
889
890 if(util_cpu_caps.has_sse4_1)
891 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_CEIL);
892 else {
893 LLVMTypeRef vec_type = lp_build_vec_type(type);
894 LLVMValueRef res;
895 res = lp_build_iceil(bld, a);
896 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
897 return res;
898 }
899 }
900
901
902 /**
903 * Return fractional part of 'a' computed as a - floor(f)
904 * Typically used in texture coord arithmetic.
905 */
906 LLVMValueRef
907 lp_build_fract(struct lp_build_context *bld,
908 LLVMValueRef a)
909 {
910 assert(bld->type.floating);
911 return lp_build_sub(bld, a, lp_build_floor(bld, a));
912 }
913
914
915 /**
916 * Convert to integer, through whichever rounding method that's fastest,
917 * typically truncating toward zero.
918 */
919 LLVMValueRef
920 lp_build_itrunc(struct lp_build_context *bld,
921 LLVMValueRef a)
922 {
923 const struct lp_type type = bld->type;
924 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
925
926 assert(type.floating);
927 assert(lp_check_value(type, a));
928
929 return LLVMBuildFPToSI(bld->builder, a, int_vec_type, "");
930 }
931
932
933 LLVMValueRef
934 lp_build_iround(struct lp_build_context *bld,
935 LLVMValueRef a)
936 {
937 const struct lp_type type = bld->type;
938 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
939 LLVMValueRef res;
940
941 assert(type.floating);
942 assert(lp_check_value(type, a));
943
944 if(util_cpu_caps.has_sse4_1) {
945 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
946 }
947 else {
948 LLVMTypeRef vec_type = lp_build_vec_type(type);
949 LLVMValueRef mask = lp_build_int_const_scalar(type, (unsigned long long)1 << (type.width - 1));
950 LLVMValueRef sign;
951 LLVMValueRef half;
952
953 /* get sign bit */
954 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
955 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
956
957 /* sign * 0.5 */
958 half = lp_build_const_scalar(type, 0.5);
959 half = LLVMBuildBitCast(bld->builder, half, int_vec_type, "");
960 half = LLVMBuildOr(bld->builder, sign, half, "");
961 half = LLVMBuildBitCast(bld->builder, half, vec_type, "");
962
963 res = LLVMBuildAdd(bld->builder, a, half, "");
964 }
965
966 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
967
968 return res;
969 }
970
971
972 /**
973 * Convert float[] to int[] with floor().
974 */
975 LLVMValueRef
976 lp_build_ifloor(struct lp_build_context *bld,
977 LLVMValueRef a)
978 {
979 const struct lp_type type = bld->type;
980 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
981 LLVMValueRef res;
982
983 assert(type.floating);
984 assert(lp_check_value(type, a));
985
986 if(util_cpu_caps.has_sse4_1) {
987 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
988 }
989 else {
990 /* Take the sign bit and add it to 1 constant */
991 LLVMTypeRef vec_type = lp_build_vec_type(type);
992 unsigned mantissa = lp_mantissa(type);
993 LLVMValueRef mask = lp_build_int_const_scalar(type, (unsigned long long)1 << (type.width - 1));
994 LLVMValueRef sign;
995 LLVMValueRef offset;
996
997 /* sign = a < 0 ? ~0 : 0 */
998 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
999 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
1000 sign = LLVMBuildAShr(bld->builder, sign, lp_build_int_const_scalar(type, type.width - 1), "");
1001 lp_build_name(sign, "floor.sign");
1002
1003 /* offset = -0.99999(9)f */
1004 offset = lp_build_const_scalar(type, -(double)(((unsigned long long)1 << mantissa) - 1)/((unsigned long long)1 << mantissa));
1005 offset = LLVMConstBitCast(offset, int_vec_type);
1006
1007 /* offset = a < 0 ? -0.99999(9)f : 0.0f */
1008 offset = LLVMBuildAnd(bld->builder, offset, sign, "");
1009 offset = LLVMBuildBitCast(bld->builder, offset, vec_type, "");
1010 lp_build_name(offset, "floor.offset");
1011
1012 res = LLVMBuildAdd(bld->builder, a, offset, "");
1013 lp_build_name(res, "floor.res");
1014 }
1015
1016 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
1017 lp_build_name(res, "floor");
1018
1019 return res;
1020 }
1021
1022
1023 LLVMValueRef
1024 lp_build_iceil(struct lp_build_context *bld,
1025 LLVMValueRef a)
1026 {
1027 const struct lp_type type = bld->type;
1028 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1029 LLVMValueRef res;
1030
1031 assert(type.floating);
1032 assert(lp_check_value(type, a));
1033
1034 if(util_cpu_caps.has_sse4_1) {
1035 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_CEIL);
1036 }
1037 else {
1038 assert(0);
1039 res = bld->undef;
1040 }
1041
1042 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
1043
1044 return res;
1045 }
1046
1047
1048 LLVMValueRef
1049 lp_build_sqrt(struct lp_build_context *bld,
1050 LLVMValueRef a)
1051 {
1052 const struct lp_type type = bld->type;
1053 LLVMTypeRef vec_type = lp_build_vec_type(type);
1054 char intrinsic[32];
1055
1056 /* TODO: optimize the constant case */
1057 /* TODO: optimize the constant case */
1058
1059 assert(type.floating);
1060 util_snprintf(intrinsic, sizeof intrinsic, "llvm.sqrt.v%uf%u", type.length, type.width);
1061
1062 return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
1063 }
1064
1065
1066 LLVMValueRef
1067 lp_build_rcp(struct lp_build_context *bld,
1068 LLVMValueRef a)
1069 {
1070 const struct lp_type type = bld->type;
1071
1072 if(a == bld->zero)
1073 return bld->undef;
1074 if(a == bld->one)
1075 return bld->one;
1076 if(a == bld->undef)
1077 return bld->undef;
1078
1079 assert(type.floating);
1080
1081 if(LLVMIsConstant(a))
1082 return LLVMConstFDiv(bld->one, a);
1083
1084 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
1085 /* FIXME: improve precision */
1086 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rcp.ps", lp_build_vec_type(type), a);
1087
1088 return LLVMBuildFDiv(bld->builder, bld->one, a, "");
1089 }
1090
1091
1092 /**
1093 * Generate 1/sqrt(a)
1094 */
1095 LLVMValueRef
1096 lp_build_rsqrt(struct lp_build_context *bld,
1097 LLVMValueRef a)
1098 {
1099 const struct lp_type type = bld->type;
1100
1101 assert(type.floating);
1102
1103 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
1104 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rsqrt.ps", lp_build_vec_type(type), a);
1105
1106 return lp_build_rcp(bld, lp_build_sqrt(bld, a));
1107 }
1108
1109
1110 /**
1111 * Generate cos(a)
1112 */
1113 LLVMValueRef
1114 lp_build_cos(struct lp_build_context *bld,
1115 LLVMValueRef a)
1116 {
1117 const struct lp_type type = bld->type;
1118 LLVMTypeRef vec_type = lp_build_vec_type(type);
1119 char intrinsic[32];
1120
1121 /* TODO: optimize the constant case */
1122
1123 assert(type.floating);
1124 util_snprintf(intrinsic, sizeof intrinsic, "llvm.cos.v%uf%u", type.length, type.width);
1125
1126 return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
1127 }
1128
1129
1130 /**
1131 * Generate sin(a)
1132 */
1133 LLVMValueRef
1134 lp_build_sin(struct lp_build_context *bld,
1135 LLVMValueRef a)
1136 {
1137 const struct lp_type type = bld->type;
1138 LLVMTypeRef vec_type = lp_build_vec_type(type);
1139 char intrinsic[32];
1140
1141 /* TODO: optimize the constant case */
1142
1143 assert(type.floating);
1144 util_snprintf(intrinsic, sizeof intrinsic, "llvm.sin.v%uf%u", type.length, type.width);
1145
1146 return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
1147 }
1148
1149
1150 /**
1151 * Generate pow(x, y)
1152 */
1153 LLVMValueRef
1154 lp_build_pow(struct lp_build_context *bld,
1155 LLVMValueRef x,
1156 LLVMValueRef y)
1157 {
1158 /* TODO: optimize the constant case */
1159 if(LLVMIsConstant(x) && LLVMIsConstant(y))
1160 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1161 __FUNCTION__);
1162
1163 return lp_build_exp2(bld, lp_build_mul(bld, lp_build_log2(bld, x), y));
1164 }
1165
1166
1167 /**
1168 * Generate exp(x)
1169 */
1170 LLVMValueRef
1171 lp_build_exp(struct lp_build_context *bld,
1172 LLVMValueRef x)
1173 {
1174 /* log2(e) = 1/log(2) */
1175 LLVMValueRef log2e = lp_build_const_scalar(bld->type, 1.4426950408889634);
1176
1177 return lp_build_mul(bld, log2e, lp_build_exp2(bld, x));
1178 }
1179
1180
1181 /**
1182 * Generate log(x)
1183 */
1184 LLVMValueRef
1185 lp_build_log(struct lp_build_context *bld,
1186 LLVMValueRef x)
1187 {
1188 /* log(2) */
1189 LLVMValueRef log2 = lp_build_const_scalar(bld->type, 0.69314718055994529);
1190
1191 return lp_build_mul(bld, log2, lp_build_exp2(bld, x));
1192 }
1193
1194
1195 #define EXP_POLY_DEGREE 3
1196 #define LOG_POLY_DEGREE 5
1197
1198
1199 /**
1200 * Generate polynomial.
1201 * Ex: coeffs[0] + x * coeffs[1] + x^2 * coeffs[2].
1202 */
1203 static LLVMValueRef
1204 lp_build_polynomial(struct lp_build_context *bld,
1205 LLVMValueRef x,
1206 const double *coeffs,
1207 unsigned num_coeffs)
1208 {
1209 const struct lp_type type = bld->type;
1210 LLVMValueRef res = NULL;
1211 unsigned i;
1212
1213 /* TODO: optimize the constant case */
1214 if(LLVMIsConstant(x))
1215 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1216 __FUNCTION__);
1217
1218 for (i = num_coeffs; i--; ) {
1219 LLVMValueRef coeff = lp_build_const_scalar(type, coeffs[i]);
1220 if(res)
1221 res = lp_build_add(bld, coeff, lp_build_mul(bld, x, res));
1222 else
1223 res = coeff;
1224 }
1225
1226 if(res)
1227 return res;
1228 else
1229 return bld->undef;
1230 }
1231
1232
1233 /**
1234 * Minimax polynomial fit of 2**x, in range [-0.5, 0.5[
1235 */
1236 const double lp_build_exp2_polynomial[] = {
1237 #if EXP_POLY_DEGREE == 5
1238 9.9999994e-1, 6.9315308e-1, 2.4015361e-1, 5.5826318e-2, 8.9893397e-3, 1.8775767e-3
1239 #elif EXP_POLY_DEGREE == 4
1240 1.0000026, 6.9300383e-1, 2.4144275e-1, 5.2011464e-2, 1.3534167e-2
1241 #elif EXP_POLY_DEGREE == 3
1242 9.9992520e-1, 6.9583356e-1, 2.2606716e-1, 7.8024521e-2
1243 #elif EXP_POLY_DEGREE == 2
1244 1.0017247, 6.5763628e-1, 3.3718944e-1
1245 #else
1246 #error
1247 #endif
1248 };
1249
1250
1251 void
1252 lp_build_exp2_approx(struct lp_build_context *bld,
1253 LLVMValueRef x,
1254 LLVMValueRef *p_exp2_int_part,
1255 LLVMValueRef *p_frac_part,
1256 LLVMValueRef *p_exp2)
1257 {
1258 const struct lp_type type = bld->type;
1259 LLVMTypeRef vec_type = lp_build_vec_type(type);
1260 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1261 LLVMValueRef ipart = NULL;
1262 LLVMValueRef fpart = NULL;
1263 LLVMValueRef expipart = NULL;
1264 LLVMValueRef expfpart = NULL;
1265 LLVMValueRef res = NULL;
1266
1267 if(p_exp2_int_part || p_frac_part || p_exp2) {
1268 /* TODO: optimize the constant case */
1269 if(LLVMIsConstant(x))
1270 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1271 __FUNCTION__);
1272
1273 assert(type.floating && type.width == 32);
1274
1275 x = lp_build_min(bld, x, lp_build_const_scalar(type, 129.0));
1276 x = lp_build_max(bld, x, lp_build_const_scalar(type, -126.99999));
1277
1278 /* ipart = int(x - 0.5) */
1279 ipart = LLVMBuildSub(bld->builder, x, lp_build_const_scalar(type, 0.5f), "");
1280 ipart = LLVMBuildFPToSI(bld->builder, ipart, int_vec_type, "");
1281
1282 /* fpart = x - ipart */
1283 fpart = LLVMBuildSIToFP(bld->builder, ipart, vec_type, "");
1284 fpart = LLVMBuildSub(bld->builder, x, fpart, "");
1285 }
1286
1287 if(p_exp2_int_part || p_exp2) {
1288 /* expipart = (float) (1 << ipart) */
1289 expipart = LLVMBuildAdd(bld->builder, ipart, lp_build_int_const_scalar(type, 127), "");
1290 expipart = LLVMBuildShl(bld->builder, expipart, lp_build_int_const_scalar(type, 23), "");
1291 expipart = LLVMBuildBitCast(bld->builder, expipart, vec_type, "");
1292 }
1293
1294 if(p_exp2) {
1295 expfpart = lp_build_polynomial(bld, fpart, lp_build_exp2_polynomial,
1296 Elements(lp_build_exp2_polynomial));
1297
1298 res = LLVMBuildMul(bld->builder, expipart, expfpart, "");
1299 }
1300
1301 if(p_exp2_int_part)
1302 *p_exp2_int_part = expipart;
1303
1304 if(p_frac_part)
1305 *p_frac_part = fpart;
1306
1307 if(p_exp2)
1308 *p_exp2 = res;
1309 }
1310
1311
1312 LLVMValueRef
1313 lp_build_exp2(struct lp_build_context *bld,
1314 LLVMValueRef x)
1315 {
1316 LLVMValueRef res;
1317 lp_build_exp2_approx(bld, x, NULL, NULL, &res);
1318 return res;
1319 }
1320
1321
1322 /**
1323 * Minimax polynomial fit of log2(x)/(x - 1), for x in range [1, 2[
1324 * These coefficients can be generate with
1325 * http://www.boost.org/doc/libs/1_36_0/libs/math/doc/sf_and_dist/html/math_toolkit/toolkit/internals2/minimax.html
1326 */
1327 const double lp_build_log2_polynomial[] = {
1328 #if LOG_POLY_DEGREE == 6
1329 3.11578814719469302614, -3.32419399085241980044, 2.59883907202499966007, -1.23152682416275988241, 0.318212422185251071475, -0.0344359067839062357313
1330 #elif LOG_POLY_DEGREE == 5
1331 2.8882704548164776201, -2.52074962577807006663, 1.48116647521213171641, -0.465725644288844778798, 0.0596515482674574969533
1332 #elif LOG_POLY_DEGREE == 4
1333 2.61761038894603480148, -1.75647175389045657003, 0.688243882994381274313, -0.107254423828329604454
1334 #elif LOG_POLY_DEGREE == 3
1335 2.28330284476918490682, -1.04913055217340124191, 0.204446009836232697516
1336 #else
1337 #error
1338 #endif
1339 };
1340
1341
1342 /**
1343 * See http://www.devmaster.net/forums/showthread.php?p=43580
1344 */
1345 void
1346 lp_build_log2_approx(struct lp_build_context *bld,
1347 LLVMValueRef x,
1348 LLVMValueRef *p_exp,
1349 LLVMValueRef *p_floor_log2,
1350 LLVMValueRef *p_log2)
1351 {
1352 const struct lp_type type = bld->type;
1353 LLVMTypeRef vec_type = lp_build_vec_type(type);
1354 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1355
1356 LLVMValueRef expmask = lp_build_int_const_scalar(type, 0x7f800000);
1357 LLVMValueRef mantmask = lp_build_int_const_scalar(type, 0x007fffff);
1358 LLVMValueRef one = LLVMConstBitCast(bld->one, int_vec_type);
1359
1360 LLVMValueRef i = NULL;
1361 LLVMValueRef exp = NULL;
1362 LLVMValueRef mant = NULL;
1363 LLVMValueRef logexp = NULL;
1364 LLVMValueRef logmant = NULL;
1365 LLVMValueRef res = NULL;
1366
1367 if(p_exp || p_floor_log2 || p_log2) {
1368 /* TODO: optimize the constant case */
1369 if(LLVMIsConstant(x))
1370 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1371 __FUNCTION__);
1372
1373 assert(type.floating && type.width == 32);
1374
1375 i = LLVMBuildBitCast(bld->builder, x, int_vec_type, "");
1376
1377 /* exp = (float) exponent(x) */
1378 exp = LLVMBuildAnd(bld->builder, i, expmask, "");
1379 }
1380
1381 if(p_floor_log2 || p_log2) {
1382 logexp = LLVMBuildLShr(bld->builder, exp, lp_build_int_const_scalar(type, 23), "");
1383 logexp = LLVMBuildSub(bld->builder, logexp, lp_build_int_const_scalar(type, 127), "");
1384 logexp = LLVMBuildSIToFP(bld->builder, logexp, vec_type, "");
1385 }
1386
1387 if(p_log2) {
1388 /* mant = (float) mantissa(x) */
1389 mant = LLVMBuildAnd(bld->builder, i, mantmask, "");
1390 mant = LLVMBuildOr(bld->builder, mant, one, "");
1391 mant = LLVMBuildBitCast(bld->builder, mant, vec_type, "");
1392
1393 logmant = lp_build_polynomial(bld, mant, lp_build_log2_polynomial,
1394 Elements(lp_build_log2_polynomial));
1395
1396 /* This effectively increases the polynomial degree by one, but ensures that log2(1) == 0*/
1397 logmant = LLVMBuildMul(bld->builder, logmant, LLVMBuildSub(bld->builder, mant, bld->one, ""), "");
1398
1399 res = LLVMBuildAdd(bld->builder, logmant, logexp, "");
1400 }
1401
1402 if(p_exp)
1403 *p_exp = exp;
1404
1405 if(p_floor_log2)
1406 *p_floor_log2 = logexp;
1407
1408 if(p_log2)
1409 *p_log2 = res;
1410 }
1411
1412
1413 LLVMValueRef
1414 lp_build_log2(struct lp_build_context *bld,
1415 LLVMValueRef x)
1416 {
1417 LLVMValueRef res;
1418 lp_build_log2_approx(bld, x, NULL, NULL, &res);
1419 return res;
1420 }