Merge branch 'mesa_7_7_branch'
[mesa.git] / src / gallium / drivers / llvmpipe / lp_bld_arit.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper
32 *
33 * LLVM IR doesn't support all basic arithmetic operations we care about (most
34 * notably min/max and saturated operations), and it is often necessary to
35 * resort machine-specific intrinsics directly. The functions here hide all
36 * these implementation details from the other modules.
37 *
38 * We also do simple expressions simplification here. Reasons are:
39 * - it is very easy given we have all necessary information readily available
40 * - LLVM optimization passes fail to simplify several vector expressions
41 * - We often know value constraints which the optimization passes have no way
42 * of knowing, such as when source arguments are known to be in [0, 1] range.
43 *
44 * @author Jose Fonseca <jfonseca@vmware.com>
45 */
46
47
48 #include "util/u_memory.h"
49 #include "util/u_debug.h"
50 #include "util/u_math.h"
51 #include "util/u_string.h"
52 #include "util/u_cpu_detect.h"
53
54 #include "lp_bld_type.h"
55 #include "lp_bld_const.h"
56 #include "lp_bld_intr.h"
57 #include "lp_bld_logic.h"
58 #include "lp_bld_pack.h"
59 #include "lp_bld_arit.h"
60
61
62 /**
63 * Generate min(a, b)
64 * No checks for special case values of a or b = 1 or 0 are done.
65 */
66 static LLVMValueRef
67 lp_build_min_simple(struct lp_build_context *bld,
68 LLVMValueRef a,
69 LLVMValueRef b)
70 {
71 const struct lp_type type = bld->type;
72 const char *intrinsic = NULL;
73 LLVMValueRef cond;
74
75 /* TODO: optimize the constant case */
76
77 if(type.width * type.length == 128) {
78 if(type.floating) {
79 if(type.width == 32 && util_cpu_caps.has_sse)
80 intrinsic = "llvm.x86.sse.min.ps";
81 if(type.width == 64 && util_cpu_caps.has_sse2)
82 intrinsic = "llvm.x86.sse2.min.pd";
83 }
84 else {
85 if(type.width == 8 && !type.sign && util_cpu_caps.has_sse2)
86 intrinsic = "llvm.x86.sse2.pminu.b";
87 if(type.width == 8 && type.sign && util_cpu_caps.has_sse4_1)
88 intrinsic = "llvm.x86.sse41.pminsb";
89 if(type.width == 16 && !type.sign && util_cpu_caps.has_sse4_1)
90 intrinsic = "llvm.x86.sse41.pminuw";
91 if(type.width == 16 && type.sign && util_cpu_caps.has_sse2)
92 intrinsic = "llvm.x86.sse2.pmins.w";
93 if(type.width == 32 && !type.sign && util_cpu_caps.has_sse4_1)
94 intrinsic = "llvm.x86.sse41.pminud";
95 if(type.width == 32 && type.sign && util_cpu_caps.has_sse4_1)
96 intrinsic = "llvm.x86.sse41.pminsd";
97 }
98 }
99
100 if(intrinsic)
101 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
102
103 cond = lp_build_cmp(bld, PIPE_FUNC_LESS, a, b);
104 return lp_build_select(bld, cond, a, b);
105 }
106
107
108 /**
109 * Generate max(a, b)
110 * No checks for special case values of a or b = 1 or 0 are done.
111 */
112 static LLVMValueRef
113 lp_build_max_simple(struct lp_build_context *bld,
114 LLVMValueRef a,
115 LLVMValueRef b)
116 {
117 const struct lp_type type = bld->type;
118 const char *intrinsic = NULL;
119 LLVMValueRef cond;
120
121 /* TODO: optimize the constant case */
122
123 if(type.width * type.length == 128) {
124 if(type.floating) {
125 if(type.width == 32 && util_cpu_caps.has_sse)
126 intrinsic = "llvm.x86.sse.max.ps";
127 if(type.width == 64 && util_cpu_caps.has_sse2)
128 intrinsic = "llvm.x86.sse2.max.pd";
129 }
130 else {
131 if(type.width == 8 && !type.sign && util_cpu_caps.has_sse2)
132 intrinsic = "llvm.x86.sse2.pmaxu.b";
133 if(type.width == 8 && type.sign && util_cpu_caps.has_sse4_1)
134 intrinsic = "llvm.x86.sse41.pmaxsb";
135 if(type.width == 16 && !type.sign && util_cpu_caps.has_sse4_1)
136 intrinsic = "llvm.x86.sse41.pmaxuw";
137 if(type.width == 16 && type.sign && util_cpu_caps.has_sse2)
138 intrinsic = "llvm.x86.sse2.pmaxs.w";
139 if(type.width == 32 && !type.sign && util_cpu_caps.has_sse4_1)
140 intrinsic = "llvm.x86.sse41.pmaxud";
141 if(type.width == 32 && type.sign && util_cpu_caps.has_sse4_1)
142 intrinsic = "llvm.x86.sse41.pmaxsd";
143 }
144 }
145
146 if(intrinsic)
147 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
148
149 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, b);
150 return lp_build_select(bld, cond, a, b);
151 }
152
153
154 /**
155 * Generate 1 - a, or ~a depending on bld->type.
156 */
157 LLVMValueRef
158 lp_build_comp(struct lp_build_context *bld,
159 LLVMValueRef a)
160 {
161 const struct lp_type type = bld->type;
162
163 if(a == bld->one)
164 return bld->zero;
165 if(a == bld->zero)
166 return bld->one;
167
168 if(type.norm && !type.floating && !type.fixed && !type.sign) {
169 if(LLVMIsConstant(a))
170 return LLVMConstNot(a);
171 else
172 return LLVMBuildNot(bld->builder, a, "");
173 }
174
175 if(LLVMIsConstant(a))
176 return LLVMConstSub(bld->one, a);
177 else
178 return LLVMBuildSub(bld->builder, bld->one, a, "");
179 }
180
181
182 /**
183 * Generate a + b
184 */
185 LLVMValueRef
186 lp_build_add(struct lp_build_context *bld,
187 LLVMValueRef a,
188 LLVMValueRef b)
189 {
190 const struct lp_type type = bld->type;
191 LLVMValueRef res;
192
193 if(a == bld->zero)
194 return b;
195 if(b == bld->zero)
196 return a;
197 if(a == bld->undef || b == bld->undef)
198 return bld->undef;
199
200 if(bld->type.norm) {
201 const char *intrinsic = NULL;
202
203 if(a == bld->one || b == bld->one)
204 return bld->one;
205
206 if(util_cpu_caps.has_sse2 &&
207 type.width * type.length == 128 &&
208 !type.floating && !type.fixed) {
209 if(type.width == 8)
210 intrinsic = type.sign ? "llvm.x86.sse2.padds.b" : "llvm.x86.sse2.paddus.b";
211 if(type.width == 16)
212 intrinsic = type.sign ? "llvm.x86.sse2.padds.w" : "llvm.x86.sse2.paddus.w";
213 }
214
215 if(intrinsic)
216 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
217 }
218
219 if(LLVMIsConstant(a) && LLVMIsConstant(b))
220 res = LLVMConstAdd(a, b);
221 else
222 res = LLVMBuildAdd(bld->builder, a, b, "");
223
224 /* clamp to ceiling of 1.0 */
225 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
226 res = lp_build_min_simple(bld, res, bld->one);
227
228 /* XXX clamp to floor of -1 or 0??? */
229
230 return res;
231 }
232
233
234 /**
235 * Generate a - b
236 */
237 LLVMValueRef
238 lp_build_sub(struct lp_build_context *bld,
239 LLVMValueRef a,
240 LLVMValueRef b)
241 {
242 const struct lp_type type = bld->type;
243 LLVMValueRef res;
244
245 if(b == bld->zero)
246 return a;
247 if(a == bld->undef || b == bld->undef)
248 return bld->undef;
249 if(a == b)
250 return bld->zero;
251
252 if(bld->type.norm) {
253 const char *intrinsic = NULL;
254
255 if(b == bld->one)
256 return bld->zero;
257
258 if(util_cpu_caps.has_sse2 &&
259 type.width * type.length == 128 &&
260 !type.floating && !type.fixed) {
261 if(type.width == 8)
262 intrinsic = type.sign ? "llvm.x86.sse2.psubs.b" : "llvm.x86.sse2.psubus.b";
263 if(type.width == 16)
264 intrinsic = type.sign ? "llvm.x86.sse2.psubs.w" : "llvm.x86.sse2.psubus.w";
265 }
266
267 if(intrinsic)
268 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
269 }
270
271 if(LLVMIsConstant(a) && LLVMIsConstant(b))
272 res = LLVMConstSub(a, b);
273 else
274 res = LLVMBuildSub(bld->builder, a, b, "");
275
276 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
277 res = lp_build_max_simple(bld, res, bld->zero);
278
279 return res;
280 }
281
282
283 /**
284 * Normalized 8bit multiplication.
285 *
286 * - alpha plus one
287 *
288 * makes the following approximation to the division (Sree)
289 *
290 * a*b/255 ~= (a*(b + 1)) >> 256
291 *
292 * which is the fastest method that satisfies the following OpenGL criteria
293 *
294 * 0*0 = 0 and 255*255 = 255
295 *
296 * - geometric series
297 *
298 * takes the geometric series approximation to the division
299 *
300 * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
301 *
302 * in this case just the first two terms to fit in 16bit arithmetic
303 *
304 * t/255 ~= (t + (t >> 8)) >> 8
305 *
306 * note that just by itself it doesn't satisfies the OpenGL criteria, as
307 * 255*255 = 254, so the special case b = 255 must be accounted or roundoff
308 * must be used
309 *
310 * - geometric series plus rounding
311 *
312 * when using a geometric series division instead of truncating the result
313 * use roundoff in the approximation (Jim Blinn)
314 *
315 * t/255 ~= (t + (t >> 8) + 0x80) >> 8
316 *
317 * achieving the exact results
318 *
319 * @sa Alvy Ray Smith, Image Compositing Fundamentals, Tech Memo 4, Aug 15, 1995,
320 * ftp://ftp.alvyray.com/Acrobat/4_Comp.pdf
321 * @sa Michael Herf, The "double blend trick", May 2000,
322 * http://www.stereopsis.com/doubleblend.html
323 */
324 static LLVMValueRef
325 lp_build_mul_u8n(LLVMBuilderRef builder,
326 struct lp_type i16_type,
327 LLVMValueRef a, LLVMValueRef b)
328 {
329 LLVMValueRef c8;
330 LLVMValueRef ab;
331
332 c8 = lp_build_int_const_scalar(i16_type, 8);
333
334 #if 0
335
336 /* a*b/255 ~= (a*(b + 1)) >> 256 */
337 b = LLVMBuildAdd(builder, b, lp_build_int_const_scalar(i16_type, 1), "");
338 ab = LLVMBuildMul(builder, a, b, "");
339
340 #else
341
342 /* ab/255 ~= (ab + (ab >> 8) + 0x80) >> 8 */
343 ab = LLVMBuildMul(builder, a, b, "");
344 ab = LLVMBuildAdd(builder, ab, LLVMBuildLShr(builder, ab, c8, ""), "");
345 ab = LLVMBuildAdd(builder, ab, lp_build_int_const_scalar(i16_type, 0x80), "");
346
347 #endif
348
349 ab = LLVMBuildLShr(builder, ab, c8, "");
350
351 return ab;
352 }
353
354
355 /**
356 * Generate a * b
357 */
358 LLVMValueRef
359 lp_build_mul(struct lp_build_context *bld,
360 LLVMValueRef a,
361 LLVMValueRef b)
362 {
363 const struct lp_type type = bld->type;
364 LLVMValueRef shift;
365 LLVMValueRef res;
366
367 if(a == bld->zero)
368 return bld->zero;
369 if(a == bld->one)
370 return b;
371 if(b == bld->zero)
372 return bld->zero;
373 if(b == bld->one)
374 return a;
375 if(a == bld->undef || b == bld->undef)
376 return bld->undef;
377
378 if(!type.floating && !type.fixed && type.norm) {
379 if(type.width == 8) {
380 struct lp_type i16_type = lp_wider_type(type);
381 LLVMValueRef al, ah, bl, bh, abl, abh, ab;
382
383 lp_build_unpack2(bld->builder, type, i16_type, a, &al, &ah);
384 lp_build_unpack2(bld->builder, type, i16_type, b, &bl, &bh);
385
386 /* PMULLW, PSRLW, PADDW */
387 abl = lp_build_mul_u8n(bld->builder, i16_type, al, bl);
388 abh = lp_build_mul_u8n(bld->builder, i16_type, ah, bh);
389
390 ab = lp_build_pack2(bld->builder, i16_type, type, abl, abh);
391
392 return ab;
393 }
394
395 /* FIXME */
396 assert(0);
397 }
398
399 if(type.fixed)
400 shift = lp_build_int_const_scalar(type, type.width/2);
401 else
402 shift = NULL;
403
404 if(LLVMIsConstant(a) && LLVMIsConstant(b)) {
405 res = LLVMConstMul(a, b);
406 if(shift) {
407 if(type.sign)
408 res = LLVMConstAShr(res, shift);
409 else
410 res = LLVMConstLShr(res, shift);
411 }
412 }
413 else {
414 res = LLVMBuildMul(bld->builder, a, b, "");
415 if(shift) {
416 if(type.sign)
417 res = LLVMBuildAShr(bld->builder, res, shift, "");
418 else
419 res = LLVMBuildLShr(bld->builder, res, shift, "");
420 }
421 }
422
423 return res;
424 }
425
426
427 /**
428 * Small vector x scale multiplication optimization.
429 */
430 LLVMValueRef
431 lp_build_mul_imm(struct lp_build_context *bld,
432 LLVMValueRef a,
433 int b)
434 {
435 LLVMValueRef factor;
436
437 if(b == 0)
438 return bld->zero;
439
440 if(b == 1)
441 return a;
442
443 if(b == -1)
444 return LLVMBuildNeg(bld->builder, a, "");
445
446 if(b == 2 && bld->type.floating)
447 return lp_build_add(bld, a, a);
448
449 if(util_is_pot(b)) {
450 unsigned shift = ffs(b) - 1;
451
452 if(bld->type.floating) {
453 #if 0
454 /*
455 * Power of two multiplication by directly manipulating the mantissa.
456 *
457 * XXX: This might not be always faster, it will introduce a small error
458 * for multiplication by zero, and it will produce wrong results
459 * for Inf and NaN.
460 */
461 unsigned mantissa = lp_mantissa(bld->type);
462 factor = lp_build_int_const_scalar(bld->type, (unsigned long long)shift << mantissa);
463 a = LLVMBuildBitCast(bld->builder, a, lp_build_int_vec_type(bld->type), "");
464 a = LLVMBuildAdd(bld->builder, a, factor, "");
465 a = LLVMBuildBitCast(bld->builder, a, lp_build_vec_type(bld->type), "");
466 return a;
467 #endif
468 }
469 else {
470 factor = lp_build_const_scalar(bld->type, shift);
471 return LLVMBuildShl(bld->builder, a, factor, "");
472 }
473 }
474
475 factor = lp_build_const_scalar(bld->type, (double)b);
476 return lp_build_mul(bld, a, factor);
477 }
478
479
480 /**
481 * Generate a / b
482 */
483 LLVMValueRef
484 lp_build_div(struct lp_build_context *bld,
485 LLVMValueRef a,
486 LLVMValueRef b)
487 {
488 const struct lp_type type = bld->type;
489
490 if(a == bld->zero)
491 return bld->zero;
492 if(a == bld->one)
493 return lp_build_rcp(bld, b);
494 if(b == bld->zero)
495 return bld->undef;
496 if(b == bld->one)
497 return a;
498 if(a == bld->undef || b == bld->undef)
499 return bld->undef;
500
501 if(LLVMIsConstant(a) && LLVMIsConstant(b))
502 return LLVMConstFDiv(a, b);
503
504 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
505 return lp_build_mul(bld, a, lp_build_rcp(bld, b));
506
507 return LLVMBuildFDiv(bld->builder, a, b, "");
508 }
509
510
511 /**
512 * Linear interpolation.
513 *
514 * This also works for integer values with a few caveats.
515 *
516 * @sa http://www.stereopsis.com/doubleblend.html
517 */
518 LLVMValueRef
519 lp_build_lerp(struct lp_build_context *bld,
520 LLVMValueRef x,
521 LLVMValueRef v0,
522 LLVMValueRef v1)
523 {
524 LLVMValueRef delta;
525 LLVMValueRef res;
526
527 delta = lp_build_sub(bld, v1, v0);
528
529 res = lp_build_mul(bld, x, delta);
530
531 res = lp_build_add(bld, v0, res);
532
533 if(bld->type.fixed)
534 /* XXX: This step is necessary for lerping 8bit colors stored on 16bits,
535 * but it will be wrong for other uses. Basically we need a more
536 * powerful lp_type, capable of further distinguishing the values
537 * interpretation from the value storage. */
538 res = LLVMBuildAnd(bld->builder, res, lp_build_int_const_scalar(bld->type, (1 << bld->type.width/2) - 1), "");
539
540 return res;
541 }
542
543
544 LLVMValueRef
545 lp_build_lerp_2d(struct lp_build_context *bld,
546 LLVMValueRef x,
547 LLVMValueRef y,
548 LLVMValueRef v00,
549 LLVMValueRef v01,
550 LLVMValueRef v10,
551 LLVMValueRef v11)
552 {
553 LLVMValueRef v0 = lp_build_lerp(bld, x, v00, v01);
554 LLVMValueRef v1 = lp_build_lerp(bld, x, v10, v11);
555 return lp_build_lerp(bld, y, v0, v1);
556 }
557
558
559 /**
560 * Generate min(a, b)
561 * Do checks for special cases.
562 */
563 LLVMValueRef
564 lp_build_min(struct lp_build_context *bld,
565 LLVMValueRef a,
566 LLVMValueRef b)
567 {
568 if(a == bld->undef || b == bld->undef)
569 return bld->undef;
570
571 if(a == b)
572 return a;
573
574 if(bld->type.norm) {
575 if(a == bld->zero || b == bld->zero)
576 return bld->zero;
577 if(a == bld->one)
578 return b;
579 if(b == bld->one)
580 return a;
581 }
582
583 return lp_build_min_simple(bld, a, b);
584 }
585
586
587 /**
588 * Generate max(a, b)
589 * Do checks for special cases.
590 */
591 LLVMValueRef
592 lp_build_max(struct lp_build_context *bld,
593 LLVMValueRef a,
594 LLVMValueRef b)
595 {
596 if(a == bld->undef || b == bld->undef)
597 return bld->undef;
598
599 if(a == b)
600 return a;
601
602 if(bld->type.norm) {
603 if(a == bld->one || b == bld->one)
604 return bld->one;
605 if(a == bld->zero)
606 return b;
607 if(b == bld->zero)
608 return a;
609 }
610
611 return lp_build_max_simple(bld, a, b);
612 }
613
614
615 /**
616 * Generate abs(a)
617 */
618 LLVMValueRef
619 lp_build_abs(struct lp_build_context *bld,
620 LLVMValueRef a)
621 {
622 const struct lp_type type = bld->type;
623 LLVMTypeRef vec_type = lp_build_vec_type(type);
624
625 if(!type.sign)
626 return a;
627
628 if(type.floating) {
629 /* Mask out the sign bit */
630 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
631 unsigned long absMask = ~(1 << (type.width - 1));
632 LLVMValueRef mask = lp_build_int_const_scalar(type, ((unsigned long long) absMask));
633 a = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
634 a = LLVMBuildAnd(bld->builder, a, mask, "");
635 a = LLVMBuildBitCast(bld->builder, a, vec_type, "");
636 return a;
637 }
638
639 if(type.width*type.length == 128 && util_cpu_caps.has_ssse3) {
640 switch(type.width) {
641 case 8:
642 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.b.128", vec_type, a);
643 case 16:
644 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.w.128", vec_type, a);
645 case 32:
646 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.d.128", vec_type, a);
647 }
648 }
649
650 return lp_build_max(bld, a, LLVMBuildNeg(bld->builder, a, ""));
651 }
652
653
654 LLVMValueRef
655 lp_build_sgn(struct lp_build_context *bld,
656 LLVMValueRef a)
657 {
658 const struct lp_type type = bld->type;
659 LLVMTypeRef vec_type = lp_build_vec_type(type);
660 LLVMValueRef cond;
661 LLVMValueRef res;
662
663 /* Handle non-zero case */
664 if(!type.sign) {
665 /* if not zero then sign must be positive */
666 res = bld->one;
667 }
668 else if(type.floating) {
669 /* Take the sign bit and add it to 1 constant */
670 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
671 LLVMValueRef mask = lp_build_int_const_scalar(type, (unsigned long long)1 << (type.width - 1));
672 LLVMValueRef sign;
673 LLVMValueRef one;
674 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
675 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
676 one = LLVMConstBitCast(bld->one, int_vec_type);
677 res = LLVMBuildOr(bld->builder, sign, one, "");
678 res = LLVMBuildBitCast(bld->builder, res, vec_type, "");
679 }
680 else
681 {
682 LLVMValueRef minus_one = lp_build_const_scalar(type, -1.0);
683 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, bld->zero);
684 res = lp_build_select(bld, cond, bld->one, minus_one);
685 }
686
687 /* Handle zero */
688 cond = lp_build_cmp(bld, PIPE_FUNC_EQUAL, a, bld->zero);
689 res = lp_build_select(bld, cond, bld->zero, bld->one);
690
691 return res;
692 }
693
694
695 enum lp_build_round_sse41_mode
696 {
697 LP_BUILD_ROUND_SSE41_NEAREST = 0,
698 LP_BUILD_ROUND_SSE41_FLOOR = 1,
699 LP_BUILD_ROUND_SSE41_CEIL = 2,
700 LP_BUILD_ROUND_SSE41_TRUNCATE = 3
701 };
702
703
704 static INLINE LLVMValueRef
705 lp_build_round_sse41(struct lp_build_context *bld,
706 LLVMValueRef a,
707 enum lp_build_round_sse41_mode mode)
708 {
709 const struct lp_type type = bld->type;
710 LLVMTypeRef vec_type = lp_build_vec_type(type);
711 const char *intrinsic;
712
713 assert(type.floating);
714 assert(type.width*type.length == 128);
715 assert(lp_check_value(type, a));
716 assert(util_cpu_caps.has_sse4_1);
717
718 switch(type.width) {
719 case 32:
720 intrinsic = "llvm.x86.sse41.round.ps";
721 break;
722 case 64:
723 intrinsic = "llvm.x86.sse41.round.pd";
724 break;
725 default:
726 assert(0);
727 return bld->undef;
728 }
729
730 return lp_build_intrinsic_binary(bld->builder, intrinsic, vec_type, a,
731 LLVMConstInt(LLVMInt32Type(), mode, 0));
732 }
733
734
735 LLVMValueRef
736 lp_build_trunc(struct lp_build_context *bld,
737 LLVMValueRef a)
738 {
739 const struct lp_type type = bld->type;
740
741 assert(type.floating);
742 assert(lp_check_value(type, a));
743
744 if(util_cpu_caps.has_sse4_1)
745 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_TRUNCATE);
746 else {
747 LLVMTypeRef vec_type = lp_build_vec_type(type);
748 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
749 LLVMValueRef res;
750 res = LLVMBuildFPToSI(bld->builder, a, int_vec_type, "");
751 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
752 return res;
753 }
754 }
755
756
757 LLVMValueRef
758 lp_build_round(struct lp_build_context *bld,
759 LLVMValueRef a)
760 {
761 const struct lp_type type = bld->type;
762
763 assert(type.floating);
764 assert(lp_check_value(type, a));
765
766 if(util_cpu_caps.has_sse4_1)
767 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
768 else {
769 LLVMTypeRef vec_type = lp_build_vec_type(type);
770 LLVMValueRef res;
771 res = lp_build_iround(bld, a);
772 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
773 return res;
774 }
775 }
776
777
778 LLVMValueRef
779 lp_build_floor(struct lp_build_context *bld,
780 LLVMValueRef a)
781 {
782 const struct lp_type type = bld->type;
783
784 assert(type.floating);
785
786 if(util_cpu_caps.has_sse4_1)
787 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
788 else {
789 LLVMTypeRef vec_type = lp_build_vec_type(type);
790 LLVMValueRef res;
791 res = lp_build_ifloor(bld, a);
792 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
793 return res;
794 }
795 }
796
797
798 LLVMValueRef
799 lp_build_ceil(struct lp_build_context *bld,
800 LLVMValueRef a)
801 {
802 const struct lp_type type = bld->type;
803
804 assert(type.floating);
805 assert(lp_check_value(type, a));
806
807 if(util_cpu_caps.has_sse4_1)
808 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_CEIL);
809 else {
810 LLVMTypeRef vec_type = lp_build_vec_type(type);
811 LLVMValueRef res;
812 res = lp_build_iceil(bld, a);
813 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
814 return res;
815 }
816 }
817
818
819 /**
820 * Convert to integer, through whichever rounding method that's fastest,
821 * typically truncating to zero.
822 */
823 LLVMValueRef
824 lp_build_itrunc(struct lp_build_context *bld,
825 LLVMValueRef a)
826 {
827 const struct lp_type type = bld->type;
828 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
829
830 assert(type.floating);
831 assert(lp_check_value(type, a));
832
833 return LLVMBuildFPToSI(bld->builder, a, int_vec_type, "");
834 }
835
836
837 LLVMValueRef
838 lp_build_iround(struct lp_build_context *bld,
839 LLVMValueRef a)
840 {
841 const struct lp_type type = bld->type;
842 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
843 LLVMValueRef res;
844
845 assert(type.floating);
846 assert(lp_check_value(type, a));
847
848 if(util_cpu_caps.has_sse4_1) {
849 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
850 }
851 else {
852 LLVMTypeRef vec_type = lp_build_vec_type(type);
853 LLVMValueRef mask = lp_build_int_const_scalar(type, (unsigned long long)1 << (type.width - 1));
854 LLVMValueRef sign;
855 LLVMValueRef half;
856
857 /* get sign bit */
858 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
859 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
860
861 /* sign * 0.5 */
862 half = lp_build_const_scalar(type, 0.5);
863 half = LLVMBuildBitCast(bld->builder, half, int_vec_type, "");
864 half = LLVMBuildOr(bld->builder, sign, half, "");
865 half = LLVMBuildBitCast(bld->builder, half, vec_type, "");
866
867 res = LLVMBuildAdd(bld->builder, a, half, "");
868 }
869
870 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
871
872 return res;
873 }
874
875
876 LLVMValueRef
877 lp_build_ifloor(struct lp_build_context *bld,
878 LLVMValueRef a)
879 {
880 const struct lp_type type = bld->type;
881 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
882 LLVMValueRef res;
883
884 assert(type.floating);
885 assert(lp_check_value(type, a));
886
887 if(util_cpu_caps.has_sse4_1) {
888 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
889 }
890 else {
891 /* Take the sign bit and add it to 1 constant */
892 LLVMTypeRef vec_type = lp_build_vec_type(type);
893 unsigned mantissa = lp_mantissa(type);
894 LLVMValueRef mask = lp_build_int_const_scalar(type, (unsigned long long)1 << (type.width - 1));
895 LLVMValueRef sign;
896 LLVMValueRef offset;
897
898 /* sign = a < 0 ? ~0 : 0 */
899 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
900 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
901 sign = LLVMBuildAShr(bld->builder, sign, lp_build_int_const_scalar(type, type.width - 1), "");
902
903 /* offset = -0.99999(9)f */
904 offset = lp_build_const_scalar(type, -(double)(((unsigned long long)1 << mantissa) - 1)/((unsigned long long)1 << mantissa));
905 offset = LLVMConstBitCast(offset, int_vec_type);
906
907 /* offset = a < 0 ? -0.99999(9)f : 0.0f */
908 offset = LLVMBuildAnd(bld->builder, offset, sign, "");
909 offset = LLVMBuildBitCast(bld->builder, offset, vec_type, "");
910
911 res = LLVMBuildAdd(bld->builder, a, offset, "");
912 }
913
914 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
915
916 return res;
917 }
918
919
920 LLVMValueRef
921 lp_build_iceil(struct lp_build_context *bld,
922 LLVMValueRef a)
923 {
924 const struct lp_type type = bld->type;
925 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
926 LLVMValueRef res;
927
928 assert(type.floating);
929 assert(lp_check_value(type, a));
930
931 if(util_cpu_caps.has_sse4_1) {
932 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_CEIL);
933 }
934 else {
935 assert(0);
936 res = bld->undef;
937 }
938
939 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
940
941 return res;
942 }
943
944
945 LLVMValueRef
946 lp_build_sqrt(struct lp_build_context *bld,
947 LLVMValueRef a)
948 {
949 const struct lp_type type = bld->type;
950 LLVMTypeRef vec_type = lp_build_vec_type(type);
951 char intrinsic[32];
952
953 /* TODO: optimize the constant case */
954 /* TODO: optimize the constant case */
955
956 assert(type.floating);
957 util_snprintf(intrinsic, sizeof intrinsic, "llvm.sqrt.v%uf%u", type.length, type.width);
958
959 return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
960 }
961
962
963 LLVMValueRef
964 lp_build_rcp(struct lp_build_context *bld,
965 LLVMValueRef a)
966 {
967 const struct lp_type type = bld->type;
968
969 if(a == bld->zero)
970 return bld->undef;
971 if(a == bld->one)
972 return bld->one;
973 if(a == bld->undef)
974 return bld->undef;
975
976 assert(type.floating);
977
978 if(LLVMIsConstant(a))
979 return LLVMConstFDiv(bld->one, a);
980
981 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
982 /* FIXME: improve precision */
983 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rcp.ps", lp_build_vec_type(type), a);
984
985 return LLVMBuildFDiv(bld->builder, bld->one, a, "");
986 }
987
988
989 /**
990 * Generate 1/sqrt(a)
991 */
992 LLVMValueRef
993 lp_build_rsqrt(struct lp_build_context *bld,
994 LLVMValueRef a)
995 {
996 const struct lp_type type = bld->type;
997
998 assert(type.floating);
999
1000 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
1001 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rsqrt.ps", lp_build_vec_type(type), a);
1002
1003 return lp_build_rcp(bld, lp_build_sqrt(bld, a));
1004 }
1005
1006
1007 /**
1008 * Generate cos(a)
1009 */
1010 LLVMValueRef
1011 lp_build_cos(struct lp_build_context *bld,
1012 LLVMValueRef a)
1013 {
1014 const struct lp_type type = bld->type;
1015 LLVMTypeRef vec_type = lp_build_vec_type(type);
1016 char intrinsic[32];
1017
1018 /* TODO: optimize the constant case */
1019
1020 assert(type.floating);
1021 util_snprintf(intrinsic, sizeof intrinsic, "llvm.cos.v%uf%u", type.length, type.width);
1022
1023 return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
1024 }
1025
1026
1027 /**
1028 * Generate sin(a)
1029 */
1030 LLVMValueRef
1031 lp_build_sin(struct lp_build_context *bld,
1032 LLVMValueRef a)
1033 {
1034 const struct lp_type type = bld->type;
1035 LLVMTypeRef vec_type = lp_build_vec_type(type);
1036 char intrinsic[32];
1037
1038 /* TODO: optimize the constant case */
1039
1040 assert(type.floating);
1041 util_snprintf(intrinsic, sizeof intrinsic, "llvm.sin.v%uf%u", type.length, type.width);
1042
1043 return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
1044 }
1045
1046
1047 /**
1048 * Generate pow(x, y)
1049 */
1050 LLVMValueRef
1051 lp_build_pow(struct lp_build_context *bld,
1052 LLVMValueRef x,
1053 LLVMValueRef y)
1054 {
1055 /* TODO: optimize the constant case */
1056 if(LLVMIsConstant(x) && LLVMIsConstant(y))
1057 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1058 __FUNCTION__);
1059
1060 return lp_build_exp2(bld, lp_build_mul(bld, lp_build_log2(bld, x), y));
1061 }
1062
1063
1064 /**
1065 * Generate exp(x)
1066 */
1067 LLVMValueRef
1068 lp_build_exp(struct lp_build_context *bld,
1069 LLVMValueRef x)
1070 {
1071 /* log2(e) = 1/log(2) */
1072 LLVMValueRef log2e = lp_build_const_scalar(bld->type, 1.4426950408889634);
1073
1074 return lp_build_mul(bld, log2e, lp_build_exp2(bld, x));
1075 }
1076
1077
1078 /**
1079 * Generate log(x)
1080 */
1081 LLVMValueRef
1082 lp_build_log(struct lp_build_context *bld,
1083 LLVMValueRef x)
1084 {
1085 /* log(2) */
1086 LLVMValueRef log2 = lp_build_const_scalar(bld->type, 0.69314718055994529);
1087
1088 return lp_build_mul(bld, log2, lp_build_exp2(bld, x));
1089 }
1090
1091
1092 #define EXP_POLY_DEGREE 3
1093 #define LOG_POLY_DEGREE 5
1094
1095
1096 /**
1097 * Generate polynomial.
1098 * Ex: coeffs[0] + x * coeffs[1] + x^2 * coeffs[2].
1099 */
1100 static LLVMValueRef
1101 lp_build_polynomial(struct lp_build_context *bld,
1102 LLVMValueRef x,
1103 const double *coeffs,
1104 unsigned num_coeffs)
1105 {
1106 const struct lp_type type = bld->type;
1107 LLVMValueRef res = NULL;
1108 unsigned i;
1109
1110 /* TODO: optimize the constant case */
1111 if(LLVMIsConstant(x))
1112 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1113 __FUNCTION__);
1114
1115 for (i = num_coeffs; i--; ) {
1116 LLVMValueRef coeff = lp_build_const_scalar(type, coeffs[i]);
1117 if(res)
1118 res = lp_build_add(bld, coeff, lp_build_mul(bld, x, res));
1119 else
1120 res = coeff;
1121 }
1122
1123 if(res)
1124 return res;
1125 else
1126 return bld->undef;
1127 }
1128
1129
1130 /**
1131 * Minimax polynomial fit of 2**x, in range [-0.5, 0.5[
1132 */
1133 const double lp_build_exp2_polynomial[] = {
1134 #if EXP_POLY_DEGREE == 5
1135 9.9999994e-1, 6.9315308e-1, 2.4015361e-1, 5.5826318e-2, 8.9893397e-3, 1.8775767e-3
1136 #elif EXP_POLY_DEGREE == 4
1137 1.0000026, 6.9300383e-1, 2.4144275e-1, 5.2011464e-2, 1.3534167e-2
1138 #elif EXP_POLY_DEGREE == 3
1139 9.9992520e-1, 6.9583356e-1, 2.2606716e-1, 7.8024521e-2
1140 #elif EXP_POLY_DEGREE == 2
1141 1.0017247, 6.5763628e-1, 3.3718944e-1
1142 #else
1143 #error
1144 #endif
1145 };
1146
1147
1148 void
1149 lp_build_exp2_approx(struct lp_build_context *bld,
1150 LLVMValueRef x,
1151 LLVMValueRef *p_exp2_int_part,
1152 LLVMValueRef *p_frac_part,
1153 LLVMValueRef *p_exp2)
1154 {
1155 const struct lp_type type = bld->type;
1156 LLVMTypeRef vec_type = lp_build_vec_type(type);
1157 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1158 LLVMValueRef ipart = NULL;
1159 LLVMValueRef fpart = NULL;
1160 LLVMValueRef expipart = NULL;
1161 LLVMValueRef expfpart = NULL;
1162 LLVMValueRef res = NULL;
1163
1164 if(p_exp2_int_part || p_frac_part || p_exp2) {
1165 /* TODO: optimize the constant case */
1166 if(LLVMIsConstant(x))
1167 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1168 __FUNCTION__);
1169
1170 assert(type.floating && type.width == 32);
1171
1172 x = lp_build_min(bld, x, lp_build_const_scalar(type, 129.0));
1173 x = lp_build_max(bld, x, lp_build_const_scalar(type, -126.99999));
1174
1175 /* ipart = int(x - 0.5) */
1176 ipart = LLVMBuildSub(bld->builder, x, lp_build_const_scalar(type, 0.5f), "");
1177 ipart = LLVMBuildFPToSI(bld->builder, ipart, int_vec_type, "");
1178
1179 /* fpart = x - ipart */
1180 fpart = LLVMBuildSIToFP(bld->builder, ipart, vec_type, "");
1181 fpart = LLVMBuildSub(bld->builder, x, fpart, "");
1182 }
1183
1184 if(p_exp2_int_part || p_exp2) {
1185 /* expipart = (float) (1 << ipart) */
1186 expipart = LLVMBuildAdd(bld->builder, ipart, lp_build_int_const_scalar(type, 127), "");
1187 expipart = LLVMBuildShl(bld->builder, expipart, lp_build_int_const_scalar(type, 23), "");
1188 expipart = LLVMBuildBitCast(bld->builder, expipart, vec_type, "");
1189 }
1190
1191 if(p_exp2) {
1192 expfpart = lp_build_polynomial(bld, fpart, lp_build_exp2_polynomial,
1193 Elements(lp_build_exp2_polynomial));
1194
1195 res = LLVMBuildMul(bld->builder, expipart, expfpart, "");
1196 }
1197
1198 if(p_exp2_int_part)
1199 *p_exp2_int_part = expipart;
1200
1201 if(p_frac_part)
1202 *p_frac_part = fpart;
1203
1204 if(p_exp2)
1205 *p_exp2 = res;
1206 }
1207
1208
1209 LLVMValueRef
1210 lp_build_exp2(struct lp_build_context *bld,
1211 LLVMValueRef x)
1212 {
1213 LLVMValueRef res;
1214 lp_build_exp2_approx(bld, x, NULL, NULL, &res);
1215 return res;
1216 }
1217
1218
1219 /**
1220 * Minimax polynomial fit of log2(x)/(x - 1), for x in range [1, 2[
1221 * These coefficients can be generate with
1222 * http://www.boost.org/doc/libs/1_36_0/libs/math/doc/sf_and_dist/html/math_toolkit/toolkit/internals2/minimax.html
1223 */
1224 const double lp_build_log2_polynomial[] = {
1225 #if LOG_POLY_DEGREE == 6
1226 3.11578814719469302614, -3.32419399085241980044, 2.59883907202499966007, -1.23152682416275988241, 0.318212422185251071475, -0.0344359067839062357313
1227 #elif LOG_POLY_DEGREE == 5
1228 2.8882704548164776201, -2.52074962577807006663, 1.48116647521213171641, -0.465725644288844778798, 0.0596515482674574969533
1229 #elif LOG_POLY_DEGREE == 4
1230 2.61761038894603480148, -1.75647175389045657003, 0.688243882994381274313, -0.107254423828329604454
1231 #elif LOG_POLY_DEGREE == 3
1232 2.28330284476918490682, -1.04913055217340124191, 0.204446009836232697516
1233 #else
1234 #error
1235 #endif
1236 };
1237
1238
1239 /**
1240 * See http://www.devmaster.net/forums/showthread.php?p=43580
1241 */
1242 void
1243 lp_build_log2_approx(struct lp_build_context *bld,
1244 LLVMValueRef x,
1245 LLVMValueRef *p_exp,
1246 LLVMValueRef *p_floor_log2,
1247 LLVMValueRef *p_log2)
1248 {
1249 const struct lp_type type = bld->type;
1250 LLVMTypeRef vec_type = lp_build_vec_type(type);
1251 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1252
1253 LLVMValueRef expmask = lp_build_int_const_scalar(type, 0x7f800000);
1254 LLVMValueRef mantmask = lp_build_int_const_scalar(type, 0x007fffff);
1255 LLVMValueRef one = LLVMConstBitCast(bld->one, int_vec_type);
1256
1257 LLVMValueRef i = NULL;
1258 LLVMValueRef exp = NULL;
1259 LLVMValueRef mant = NULL;
1260 LLVMValueRef logexp = NULL;
1261 LLVMValueRef logmant = NULL;
1262 LLVMValueRef res = NULL;
1263
1264 if(p_exp || p_floor_log2 || p_log2) {
1265 /* TODO: optimize the constant case */
1266 if(LLVMIsConstant(x))
1267 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1268 __FUNCTION__);
1269
1270 assert(type.floating && type.width == 32);
1271
1272 i = LLVMBuildBitCast(bld->builder, x, int_vec_type, "");
1273
1274 /* exp = (float) exponent(x) */
1275 exp = LLVMBuildAnd(bld->builder, i, expmask, "");
1276 }
1277
1278 if(p_floor_log2 || p_log2) {
1279 logexp = LLVMBuildLShr(bld->builder, exp, lp_build_int_const_scalar(type, 23), "");
1280 logexp = LLVMBuildSub(bld->builder, logexp, lp_build_int_const_scalar(type, 127), "");
1281 logexp = LLVMBuildSIToFP(bld->builder, logexp, vec_type, "");
1282 }
1283
1284 if(p_log2) {
1285 /* mant = (float) mantissa(x) */
1286 mant = LLVMBuildAnd(bld->builder, i, mantmask, "");
1287 mant = LLVMBuildOr(bld->builder, mant, one, "");
1288 mant = LLVMBuildBitCast(bld->builder, mant, vec_type, "");
1289
1290 logmant = lp_build_polynomial(bld, mant, lp_build_log2_polynomial,
1291 Elements(lp_build_log2_polynomial));
1292
1293 /* This effectively increases the polynomial degree by one, but ensures that log2(1) == 0*/
1294 logmant = LLVMBuildMul(bld->builder, logmant, LLVMBuildSub(bld->builder, mant, bld->one, ""), "");
1295
1296 res = LLVMBuildAdd(bld->builder, logmant, logexp, "");
1297 }
1298
1299 if(p_exp)
1300 *p_exp = exp;
1301
1302 if(p_floor_log2)
1303 *p_floor_log2 = logexp;
1304
1305 if(p_log2)
1306 *p_log2 = res;
1307 }
1308
1309
1310 LLVMValueRef
1311 lp_build_log2(struct lp_build_context *bld,
1312 LLVMValueRef x)
1313 {
1314 LLVMValueRef res;
1315 lp_build_log2_approx(bld, x, NULL, NULL, &res);
1316 return res;
1317 }