llvmpipe: Make lerping work for 8.8 fixed point values.
[mesa.git] / src / gallium / drivers / llvmpipe / lp_bld_arit.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper
32 *
33 * LLVM IR doesn't support all basic arithmetic operations we care about (most
34 * notably min/max and saturated operations), and it is often necessary to
35 * resort machine-specific intrinsics directly. The functions here hide all
36 * these implementation details from the other modules.
37 *
38 * We also do simple expressions simplification here. Reasons are:
39 * - it is very easy given we have all necessary information readily available
40 * - LLVM optimization passes fail to simplify several vector expressions
41 * - We often know value constraints which the optimization passes have no way
42 * of knowing, such as when source arguments are known to be in [0, 1] range.
43 *
44 * @author Jose Fonseca <jfonseca@vmware.com>
45 */
46
47
48 #include "util/u_memory.h"
49 #include "util/u_debug.h"
50 #include "util/u_string.h"
51 #include "util/u_cpu_detect.h"
52
53 #include "lp_bld_type.h"
54 #include "lp_bld_const.h"
55 #include "lp_bld_intr.h"
56 #include "lp_bld_logic.h"
57 #include "lp_bld_pack.h"
58 #include "lp_bld_debug.h"
59 #include "lp_bld_arit.h"
60
61
62 /**
63 * Generate min(a, b)
64 * No checks for special case values of a or b = 1 or 0 are done.
65 */
66 static LLVMValueRef
67 lp_build_min_simple(struct lp_build_context *bld,
68 LLVMValueRef a,
69 LLVMValueRef b)
70 {
71 const struct lp_type type = bld->type;
72 const char *intrinsic = NULL;
73 LLVMValueRef cond;
74
75 /* TODO: optimize the constant case */
76
77 if(type.width * type.length == 128) {
78 if(type.floating) {
79 if(type.width == 32 && util_cpu_caps.has_sse)
80 intrinsic = "llvm.x86.sse.min.ps";
81 if(type.width == 64 && util_cpu_caps.has_sse2)
82 intrinsic = "llvm.x86.sse2.min.pd";
83 }
84 else {
85 if(type.width == 8 && !type.sign && util_cpu_caps.has_sse2)
86 intrinsic = "llvm.x86.sse2.pminu.b";
87 if(type.width == 8 && type.sign && util_cpu_caps.has_sse4_1)
88 intrinsic = "llvm.x86.sse41.pminsb";
89 if(type.width == 16 && !type.sign && util_cpu_caps.has_sse4_1)
90 intrinsic = "llvm.x86.sse41.pminuw";
91 if(type.width == 16 && type.sign && util_cpu_caps.has_sse2)
92 intrinsic = "llvm.x86.sse2.pmins.w";
93 if(type.width == 32 && !type.sign && util_cpu_caps.has_sse4_1)
94 intrinsic = "llvm.x86.sse41.pminud";
95 if(type.width == 32 && type.sign && util_cpu_caps.has_sse4_1)
96 intrinsic = "llvm.x86.sse41.pminsd";
97 }
98 }
99
100 if(intrinsic)
101 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
102
103 cond = lp_build_cmp(bld, PIPE_FUNC_LESS, a, b);
104 return lp_build_select(bld, cond, a, b);
105 }
106
107
108 /**
109 * Generate max(a, b)
110 * No checks for special case values of a or b = 1 or 0 are done.
111 */
112 static LLVMValueRef
113 lp_build_max_simple(struct lp_build_context *bld,
114 LLVMValueRef a,
115 LLVMValueRef b)
116 {
117 const struct lp_type type = bld->type;
118 const char *intrinsic = NULL;
119 LLVMValueRef cond;
120
121 /* TODO: optimize the constant case */
122
123 if(type.width * type.length == 128) {
124 if(type.floating) {
125 if(type.width == 32 && util_cpu_caps.has_sse)
126 intrinsic = "llvm.x86.sse.max.ps";
127 if(type.width == 64 && util_cpu_caps.has_sse2)
128 intrinsic = "llvm.x86.sse2.max.pd";
129 }
130 else {
131 if(type.width == 8 && !type.sign && util_cpu_caps.has_sse2)
132 intrinsic = "llvm.x86.sse2.pmaxu.b";
133 if(type.width == 8 && type.sign && util_cpu_caps.has_sse4_1)
134 intrinsic = "llvm.x86.sse41.pmaxsb";
135 if(type.width == 16 && !type.sign && util_cpu_caps.has_sse4_1)
136 intrinsic = "llvm.x86.sse41.pmaxuw";
137 if(type.width == 16 && type.sign && util_cpu_caps.has_sse2)
138 intrinsic = "llvm.x86.sse2.pmaxs.w";
139 if(type.width == 32 && !type.sign && util_cpu_caps.has_sse4_1)
140 intrinsic = "llvm.x86.sse41.pmaxud";
141 if(type.width == 32 && type.sign && util_cpu_caps.has_sse4_1)
142 intrinsic = "llvm.x86.sse41.pmaxsd";
143 }
144 }
145
146 if(intrinsic)
147 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
148
149 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, b);
150 return lp_build_select(bld, cond, a, b);
151 }
152
153
154 /**
155 * Generate 1 - a, or ~a depending on bld->type.
156 */
157 LLVMValueRef
158 lp_build_comp(struct lp_build_context *bld,
159 LLVMValueRef a)
160 {
161 const struct lp_type type = bld->type;
162
163 if(a == bld->one)
164 return bld->zero;
165 if(a == bld->zero)
166 return bld->one;
167
168 if(type.norm && !type.floating && !type.fixed && !type.sign) {
169 if(LLVMIsConstant(a))
170 return LLVMConstNot(a);
171 else
172 return LLVMBuildNot(bld->builder, a, "");
173 }
174
175 if(LLVMIsConstant(a))
176 return LLVMConstSub(bld->one, a);
177 else
178 return LLVMBuildSub(bld->builder, bld->one, a, "");
179 }
180
181
182 /**
183 * Generate a + b
184 */
185 LLVMValueRef
186 lp_build_add(struct lp_build_context *bld,
187 LLVMValueRef a,
188 LLVMValueRef b)
189 {
190 const struct lp_type type = bld->type;
191 LLVMValueRef res;
192
193 if(a == bld->zero)
194 return b;
195 if(b == bld->zero)
196 return a;
197 if(a == bld->undef || b == bld->undef)
198 return bld->undef;
199
200 if(bld->type.norm) {
201 const char *intrinsic = NULL;
202
203 if(a == bld->one || b == bld->one)
204 return bld->one;
205
206 if(util_cpu_caps.has_sse2 &&
207 type.width * type.length == 128 &&
208 !type.floating && !type.fixed) {
209 if(type.width == 8)
210 intrinsic = type.sign ? "llvm.x86.sse2.padds.b" : "llvm.x86.sse2.paddus.b";
211 if(type.width == 16)
212 intrinsic = type.sign ? "llvm.x86.sse2.padds.w" : "llvm.x86.sse2.paddus.w";
213 }
214
215 if(intrinsic)
216 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
217 }
218
219 if(LLVMIsConstant(a) && LLVMIsConstant(b))
220 res = LLVMConstAdd(a, b);
221 else
222 res = LLVMBuildAdd(bld->builder, a, b, "");
223
224 /* clamp to ceiling of 1.0 */
225 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
226 res = lp_build_min_simple(bld, res, bld->one);
227
228 /* XXX clamp to floor of -1 or 0??? */
229
230 return res;
231 }
232
233
234 /**
235 * Generate a - b
236 */
237 LLVMValueRef
238 lp_build_sub(struct lp_build_context *bld,
239 LLVMValueRef a,
240 LLVMValueRef b)
241 {
242 const struct lp_type type = bld->type;
243 LLVMValueRef res;
244
245 if(b == bld->zero)
246 return a;
247 if(a == bld->undef || b == bld->undef)
248 return bld->undef;
249 if(a == b)
250 return bld->zero;
251
252 if(bld->type.norm) {
253 const char *intrinsic = NULL;
254
255 if(b == bld->one)
256 return bld->zero;
257
258 if(util_cpu_caps.has_sse2 &&
259 type.width * type.length == 128 &&
260 !type.floating && !type.fixed) {
261 if(type.width == 8)
262 intrinsic = type.sign ? "llvm.x86.sse2.psubs.b" : "llvm.x86.sse2.psubus.b";
263 if(type.width == 16)
264 intrinsic = type.sign ? "llvm.x86.sse2.psubs.w" : "llvm.x86.sse2.psubus.w";
265 }
266
267 if(intrinsic)
268 return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
269 }
270
271 if(LLVMIsConstant(a) && LLVMIsConstant(b))
272 res = LLVMConstSub(a, b);
273 else
274 res = LLVMBuildSub(bld->builder, a, b, "");
275
276 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
277 res = lp_build_max_simple(bld, res, bld->zero);
278
279 return res;
280 }
281
282
283 /**
284 * Normalized 8bit multiplication.
285 *
286 * - alpha plus one
287 *
288 * makes the following approximation to the division (Sree)
289 *
290 * a*b/255 ~= (a*(b + 1)) >> 256
291 *
292 * which is the fastest method that satisfies the following OpenGL criteria
293 *
294 * 0*0 = 0 and 255*255 = 255
295 *
296 * - geometric series
297 *
298 * takes the geometric series approximation to the division
299 *
300 * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
301 *
302 * in this case just the first two terms to fit in 16bit arithmetic
303 *
304 * t/255 ~= (t + (t >> 8)) >> 8
305 *
306 * note that just by itself it doesn't satisfies the OpenGL criteria, as
307 * 255*255 = 254, so the special case b = 255 must be accounted or roundoff
308 * must be used
309 *
310 * - geometric series plus rounding
311 *
312 * when using a geometric series division instead of truncating the result
313 * use roundoff in the approximation (Jim Blinn)
314 *
315 * t/255 ~= (t + (t >> 8) + 0x80) >> 8
316 *
317 * achieving the exact results
318 *
319 * @sa Alvy Ray Smith, Image Compositing Fundamentals, Tech Memo 4, Aug 15, 1995,
320 * ftp://ftp.alvyray.com/Acrobat/4_Comp.pdf
321 * @sa Michael Herf, The "double blend trick", May 2000,
322 * http://www.stereopsis.com/doubleblend.html
323 */
324 static LLVMValueRef
325 lp_build_mul_u8n(LLVMBuilderRef builder,
326 struct lp_type i16_type,
327 LLVMValueRef a, LLVMValueRef b)
328 {
329 LLVMValueRef c8;
330 LLVMValueRef ab;
331
332 c8 = lp_build_int_const_scalar(i16_type, 8);
333
334 #if 0
335
336 /* a*b/255 ~= (a*(b + 1)) >> 256 */
337 b = LLVMBuildAdd(builder, b, lp_build_int_const_scalar(i16_type, 1), "");
338 ab = LLVMBuildMul(builder, a, b, "");
339
340 #else
341
342 /* ab/255 ~= (ab + (ab >> 8) + 0x80) >> 8 */
343 ab = LLVMBuildMul(builder, a, b, "");
344 ab = LLVMBuildAdd(builder, ab, LLVMBuildLShr(builder, ab, c8, ""), "");
345 ab = LLVMBuildAdd(builder, ab, lp_build_int_const_scalar(i16_type, 0x80), "");
346
347 #endif
348
349 ab = LLVMBuildLShr(builder, ab, c8, "");
350
351 return ab;
352 }
353
354
355 /**
356 * Generate a * b
357 */
358 LLVMValueRef
359 lp_build_mul(struct lp_build_context *bld,
360 LLVMValueRef a,
361 LLVMValueRef b)
362 {
363 const struct lp_type type = bld->type;
364 LLVMValueRef shift;
365 LLVMValueRef res;
366
367 if(a == bld->zero)
368 return bld->zero;
369 if(a == bld->one)
370 return b;
371 if(b == bld->zero)
372 return bld->zero;
373 if(b == bld->one)
374 return a;
375 if(a == bld->undef || b == bld->undef)
376 return bld->undef;
377
378 if(!type.floating && !type.fixed && type.norm) {
379 if(type.width == 8) {
380 struct lp_type i16_type = lp_wider_type(type);
381 LLVMValueRef al, ah, bl, bh, abl, abh, ab;
382
383 lp_build_unpack2(bld->builder, type, i16_type, a, &al, &ah);
384 lp_build_unpack2(bld->builder, type, i16_type, b, &bl, &bh);
385
386 /* PMULLW, PSRLW, PADDW */
387 abl = lp_build_mul_u8n(bld->builder, i16_type, al, bl);
388 abh = lp_build_mul_u8n(bld->builder, i16_type, ah, bh);
389
390 ab = lp_build_pack2(bld->builder, i16_type, type, abl, abh);
391
392 return ab;
393 }
394
395 /* FIXME */
396 assert(0);
397 }
398
399 if(type.fixed)
400 shift = lp_build_int_const_scalar(type, type.width/2);
401 else
402 shift = NULL;
403
404 if(LLVMIsConstant(a) && LLVMIsConstant(b)) {
405 res = LLVMConstMul(a, b);
406 if(shift) {
407 if(type.sign)
408 res = LLVMConstAShr(res, shift);
409 else
410 res = LLVMConstLShr(res, shift);
411 }
412 }
413 else {
414 res = LLVMBuildMul(bld->builder, a, b, "");
415 if(shift) {
416 if(type.sign)
417 res = LLVMBuildAShr(bld->builder, res, shift, "");
418 else
419 res = LLVMBuildLShr(bld->builder, res, shift, "");
420 }
421 }
422
423 return res;
424 }
425
426
427 /**
428 * Generate a / b
429 */
430 LLVMValueRef
431 lp_build_div(struct lp_build_context *bld,
432 LLVMValueRef a,
433 LLVMValueRef b)
434 {
435 const struct lp_type type = bld->type;
436
437 if(a == bld->zero)
438 return bld->zero;
439 if(a == bld->one)
440 return lp_build_rcp(bld, b);
441 if(b == bld->zero)
442 return bld->undef;
443 if(b == bld->one)
444 return a;
445 if(a == bld->undef || b == bld->undef)
446 return bld->undef;
447
448 if(LLVMIsConstant(a) && LLVMIsConstant(b))
449 return LLVMConstFDiv(a, b);
450
451 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
452 return lp_build_mul(bld, a, lp_build_rcp(bld, b));
453
454 return LLVMBuildFDiv(bld->builder, a, b, "");
455 }
456
457
458 /**
459 * Linear interpolation.
460 *
461 * This also works for integer values with a few caveats.
462 *
463 * @sa http://www.stereopsis.com/doubleblend.html
464 */
465 LLVMValueRef
466 lp_build_lerp(struct lp_build_context *bld,
467 LLVMValueRef x,
468 LLVMValueRef v0,
469 LLVMValueRef v1)
470 {
471 LLVMValueRef delta;
472 LLVMValueRef res;
473
474 delta = lp_build_sub(bld, v1, v0);
475
476 res = lp_build_mul(bld, x, delta);
477
478 res = lp_build_add(bld, v0, res);
479
480 if(bld->type.fixed)
481 /* XXX: This step is necessary for lerping 8bit colors stored on 16bits,
482 * but it will be wrong for other uses. Basically we need a more
483 * powerful lp_type, capable of further distinguishing the values
484 * interpretation from the value storage. */
485 res = LLVMBuildAnd(bld->builder, res, lp_build_int_const_scalar(bld->type, (1 << bld->type.width/2) - 1), "");
486
487 return res;
488 }
489
490
491 LLVMValueRef
492 lp_build_lerp_2d(struct lp_build_context *bld,
493 LLVMValueRef x,
494 LLVMValueRef y,
495 LLVMValueRef v00,
496 LLVMValueRef v01,
497 LLVMValueRef v10,
498 LLVMValueRef v11)
499 {
500 LLVMValueRef v0 = lp_build_lerp(bld, x, v00, v01);
501 LLVMValueRef v1 = lp_build_lerp(bld, x, v10, v11);
502 return lp_build_lerp(bld, y, v0, v1);
503 }
504
505
506 /**
507 * Generate min(a, b)
508 * Do checks for special cases.
509 */
510 LLVMValueRef
511 lp_build_min(struct lp_build_context *bld,
512 LLVMValueRef a,
513 LLVMValueRef b)
514 {
515 if(a == bld->undef || b == bld->undef)
516 return bld->undef;
517
518 if(a == b)
519 return a;
520
521 if(bld->type.norm) {
522 if(a == bld->zero || b == bld->zero)
523 return bld->zero;
524 if(a == bld->one)
525 return b;
526 if(b == bld->one)
527 return a;
528 }
529
530 return lp_build_min_simple(bld, a, b);
531 }
532
533
534 /**
535 * Generate max(a, b)
536 * Do checks for special cases.
537 */
538 LLVMValueRef
539 lp_build_max(struct lp_build_context *bld,
540 LLVMValueRef a,
541 LLVMValueRef b)
542 {
543 if(a == bld->undef || b == bld->undef)
544 return bld->undef;
545
546 if(a == b)
547 return a;
548
549 if(bld->type.norm) {
550 if(a == bld->one || b == bld->one)
551 return bld->one;
552 if(a == bld->zero)
553 return b;
554 if(b == bld->zero)
555 return a;
556 }
557
558 return lp_build_max_simple(bld, a, b);
559 }
560
561
562 /**
563 * Generate abs(a)
564 */
565 LLVMValueRef
566 lp_build_abs(struct lp_build_context *bld,
567 LLVMValueRef a)
568 {
569 const struct lp_type type = bld->type;
570 LLVMTypeRef vec_type = lp_build_vec_type(type);
571
572 if(!type.sign)
573 return a;
574
575 if(type.floating) {
576 /* Mask out the sign bit */
577 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
578 LLVMValueRef mask = lp_build_int_const_scalar(type, ((unsigned long long)1 << type.width) - 1);
579 a = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
580 a = LLVMBuildAnd(bld->builder, a, mask, "");
581 a = LLVMBuildBitCast(bld->builder, a, vec_type, "");
582 return a;
583 }
584
585 if(type.width*type.length == 128 && util_cpu_caps.has_ssse3) {
586 switch(type.width) {
587 case 8:
588 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.b.128", vec_type, a);
589 case 16:
590 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.w.128", vec_type, a);
591 case 32:
592 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.d.128", vec_type, a);
593 }
594 }
595
596 return lp_build_max(bld, a, LLVMBuildNeg(bld->builder, a, ""));
597 }
598
599
600 LLVMValueRef
601 lp_build_sgn(struct lp_build_context *bld,
602 LLVMValueRef a)
603 {
604 const struct lp_type type = bld->type;
605 LLVMTypeRef vec_type = lp_build_vec_type(type);
606 LLVMValueRef cond;
607 LLVMValueRef res;
608
609 /* Handle non-zero case */
610 if(!type.sign) {
611 /* if not zero then sign must be positive */
612 res = bld->one;
613 }
614 else if(type.floating) {
615 /* Take the sign bit and add it to 1 constant */
616 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
617 LLVMValueRef mask = lp_build_int_const_scalar(type, (unsigned long long)1 << (type.width - 1));
618 LLVMValueRef sign;
619 LLVMValueRef one;
620 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
621 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
622 one = LLVMConstBitCast(bld->one, int_vec_type);
623 res = LLVMBuildOr(bld->builder, sign, one, "");
624 res = LLVMBuildBitCast(bld->builder, res, vec_type, "");
625 }
626 else
627 {
628 LLVMValueRef minus_one = lp_build_const_scalar(type, -1.0);
629 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, bld->zero);
630 res = lp_build_select(bld, cond, bld->one, minus_one);
631 }
632
633 /* Handle zero */
634 cond = lp_build_cmp(bld, PIPE_FUNC_EQUAL, a, bld->zero);
635 res = lp_build_select(bld, cond, bld->zero, bld->one);
636
637 return res;
638 }
639
640
641 enum lp_build_round_sse41_mode
642 {
643 LP_BUILD_ROUND_SSE41_NEAREST = 0,
644 LP_BUILD_ROUND_SSE41_FLOOR = 1,
645 LP_BUILD_ROUND_SSE41_CEIL = 2,
646 LP_BUILD_ROUND_SSE41_TRUNCATE = 3
647 };
648
649
650 static INLINE LLVMValueRef
651 lp_build_round_sse41(struct lp_build_context *bld,
652 LLVMValueRef a,
653 enum lp_build_round_sse41_mode mode)
654 {
655 const struct lp_type type = bld->type;
656 LLVMTypeRef vec_type = lp_build_vec_type(type);
657 const char *intrinsic;
658
659 assert(type.floating);
660 assert(type.width*type.length == 128);
661 assert(lp_check_value(type, a));
662 assert(util_cpu_caps.has_sse4_1);
663
664 switch(type.width) {
665 case 32:
666 intrinsic = "llvm.x86.sse41.round.ps";
667 break;
668 case 64:
669 intrinsic = "llvm.x86.sse41.round.pd";
670 break;
671 default:
672 assert(0);
673 return bld->undef;
674 }
675
676 return lp_build_intrinsic_binary(bld->builder, intrinsic, vec_type, a,
677 LLVMConstInt(LLVMInt32Type(), mode, 0));
678 }
679
680
681 LLVMValueRef
682 lp_build_trunc(struct lp_build_context *bld,
683 LLVMValueRef a)
684 {
685 const struct lp_type type = bld->type;
686
687 assert(type.floating);
688 assert(lp_check_value(type, a));
689
690 if(util_cpu_caps.has_sse4_1)
691 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_TRUNCATE);
692 else {
693 LLVMTypeRef vec_type = lp_build_vec_type(type);
694 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
695 LLVMValueRef res;
696 res = LLVMBuildFPToSI(bld->builder, a, int_vec_type, "");
697 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
698 return res;
699 }
700 }
701
702
703 LLVMValueRef
704 lp_build_round(struct lp_build_context *bld,
705 LLVMValueRef a)
706 {
707 const struct lp_type type = bld->type;
708
709 assert(type.floating);
710 assert(lp_check_value(type, a));
711
712 if(util_cpu_caps.has_sse4_1)
713 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
714 else {
715 LLVMTypeRef vec_type = lp_build_vec_type(type);
716 LLVMValueRef res;
717 res = lp_build_iround(bld, a);
718 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
719 return res;
720 }
721 }
722
723
724 LLVMValueRef
725 lp_build_floor(struct lp_build_context *bld,
726 LLVMValueRef a)
727 {
728 const struct lp_type type = bld->type;
729
730 assert(type.floating);
731
732 if(util_cpu_caps.has_sse4_1)
733 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
734 else {
735 LLVMTypeRef vec_type = lp_build_vec_type(type);
736 LLVMValueRef res;
737 res = lp_build_ifloor(bld, a);
738 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
739 return res;
740 }
741 }
742
743
744 LLVMValueRef
745 lp_build_ceil(struct lp_build_context *bld,
746 LLVMValueRef a)
747 {
748 const struct lp_type type = bld->type;
749
750 assert(type.floating);
751 assert(lp_check_value(type, a));
752
753 if(util_cpu_caps.has_sse4_1)
754 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_CEIL);
755 else {
756 LLVMTypeRef vec_type = lp_build_vec_type(type);
757 LLVMValueRef res;
758 res = lp_build_iceil(bld, a);
759 res = LLVMBuildSIToFP(bld->builder, res, vec_type, "");
760 return res;
761 }
762 }
763
764
765 /**
766 * Convert to integer, through whichever rounding method that's fastest,
767 * typically truncating to zero.
768 */
769 LLVMValueRef
770 lp_build_itrunc(struct lp_build_context *bld,
771 LLVMValueRef a)
772 {
773 const struct lp_type type = bld->type;
774 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
775
776 assert(type.floating);
777 assert(lp_check_value(type, a));
778
779 return LLVMBuildFPToSI(bld->builder, a, int_vec_type, "");
780 }
781
782
783 LLVMValueRef
784 lp_build_iround(struct lp_build_context *bld,
785 LLVMValueRef a)
786 {
787 const struct lp_type type = bld->type;
788 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
789 LLVMValueRef res;
790
791 assert(type.floating);
792 assert(lp_check_value(type, a));
793
794 if(util_cpu_caps.has_sse4_1) {
795 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
796 }
797 else {
798 LLVMTypeRef vec_type = lp_build_vec_type(type);
799 LLVMValueRef mask = lp_build_int_const_scalar(type, (unsigned long long)1 << (type.width - 1));
800 LLVMValueRef sign;
801 LLVMValueRef half;
802
803 /* get sign bit */
804 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
805 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
806
807 /* sign * 0.5 */
808 half = lp_build_const_scalar(type, 0.5);
809 half = LLVMBuildBitCast(bld->builder, half, int_vec_type, "");
810 half = LLVMBuildOr(bld->builder, sign, half, "");
811 half = LLVMBuildBitCast(bld->builder, half, vec_type, "");
812
813 res = LLVMBuildAdd(bld->builder, a, half, "");
814 }
815
816 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
817
818 return res;
819 }
820
821
822 LLVMValueRef
823 lp_build_ifloor(struct lp_build_context *bld,
824 LLVMValueRef a)
825 {
826 const struct lp_type type = bld->type;
827 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
828 LLVMValueRef res;
829
830 assert(type.floating);
831 assert(lp_check_value(type, a));
832
833 if(util_cpu_caps.has_sse4_1) {
834 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
835 }
836 else {
837 /* Take the sign bit and add it to 1 constant */
838 LLVMTypeRef vec_type = lp_build_vec_type(type);
839 unsigned mantissa = lp_mantissa(type);
840 LLVMValueRef mask = lp_build_int_const_scalar(type, (unsigned long long)1 << (type.width - 1));
841 LLVMValueRef sign;
842 LLVMValueRef offset;
843
844 /* sign = a < 0 ? ~0 : 0 */
845 sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
846 sign = LLVMBuildAnd(bld->builder, sign, mask, "");
847 sign = LLVMBuildAShr(bld->builder, sign, lp_build_int_const_scalar(type, type.width - 1), "");
848
849 /* offset = -0.99999(9)f */
850 offset = lp_build_const_scalar(type, -(double)(((unsigned long long)1 << mantissa) - 1)/((unsigned long long)1 << mantissa));
851 offset = LLVMConstBitCast(offset, int_vec_type);
852
853 /* offset = a < 0 ? -0.99999(9)f : 0.0f */
854 offset = LLVMBuildAnd(bld->builder, offset, sign, "");
855 offset = LLVMBuildBitCast(bld->builder, offset, vec_type, "");
856
857 res = LLVMBuildAdd(bld->builder, a, offset, "");
858 }
859
860 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
861
862 return res;
863 }
864
865
866 LLVMValueRef
867 lp_build_iceil(struct lp_build_context *bld,
868 LLVMValueRef a)
869 {
870 const struct lp_type type = bld->type;
871 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
872 LLVMValueRef res;
873
874 assert(type.floating);
875 assert(lp_check_value(type, a));
876
877 if(util_cpu_caps.has_sse4_1) {
878 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_CEIL);
879 }
880 else {
881 assert(0);
882 res = bld->undef;
883 }
884
885 res = LLVMBuildFPToSI(bld->builder, res, int_vec_type, "");
886
887 return res;
888 }
889
890
891 LLVMValueRef
892 lp_build_sqrt(struct lp_build_context *bld,
893 LLVMValueRef a)
894 {
895 const struct lp_type type = bld->type;
896 LLVMTypeRef vec_type = lp_build_vec_type(type);
897 char intrinsic[32];
898
899 /* TODO: optimize the constant case */
900 /* TODO: optimize the constant case */
901
902 assert(type.floating);
903 util_snprintf(intrinsic, sizeof intrinsic, "llvm.sqrt.v%uf%u", type.length, type.width);
904
905 return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
906 }
907
908
909 LLVMValueRef
910 lp_build_rcp(struct lp_build_context *bld,
911 LLVMValueRef a)
912 {
913 const struct lp_type type = bld->type;
914
915 if(a == bld->zero)
916 return bld->undef;
917 if(a == bld->one)
918 return bld->one;
919 if(a == bld->undef)
920 return bld->undef;
921
922 assert(type.floating);
923
924 if(LLVMIsConstant(a))
925 return LLVMConstFDiv(bld->one, a);
926
927 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
928 /* FIXME: improve precision */
929 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rcp.ps", lp_build_vec_type(type), a);
930
931 return LLVMBuildFDiv(bld->builder, bld->one, a, "");
932 }
933
934
935 /**
936 * Generate 1/sqrt(a)
937 */
938 LLVMValueRef
939 lp_build_rsqrt(struct lp_build_context *bld,
940 LLVMValueRef a)
941 {
942 const struct lp_type type = bld->type;
943
944 assert(type.floating);
945
946 if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
947 return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rsqrt.ps", lp_build_vec_type(type), a);
948
949 return lp_build_rcp(bld, lp_build_sqrt(bld, a));
950 }
951
952
953 /**
954 * Generate cos(a)
955 */
956 LLVMValueRef
957 lp_build_cos(struct lp_build_context *bld,
958 LLVMValueRef a)
959 {
960 const struct lp_type type = bld->type;
961 LLVMTypeRef vec_type = lp_build_vec_type(type);
962 char intrinsic[32];
963
964 /* TODO: optimize the constant case */
965
966 assert(type.floating);
967 util_snprintf(intrinsic, sizeof intrinsic, "llvm.cos.v%uf%u", type.length, type.width);
968
969 return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
970 }
971
972
973 /**
974 * Generate sin(a)
975 */
976 LLVMValueRef
977 lp_build_sin(struct lp_build_context *bld,
978 LLVMValueRef a)
979 {
980 const struct lp_type type = bld->type;
981 LLVMTypeRef vec_type = lp_build_vec_type(type);
982 char intrinsic[32];
983
984 /* TODO: optimize the constant case */
985
986 assert(type.floating);
987 util_snprintf(intrinsic, sizeof intrinsic, "llvm.sin.v%uf%u", type.length, type.width);
988
989 return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
990 }
991
992
993 /**
994 * Generate pow(x, y)
995 */
996 LLVMValueRef
997 lp_build_pow(struct lp_build_context *bld,
998 LLVMValueRef x,
999 LLVMValueRef y)
1000 {
1001 /* TODO: optimize the constant case */
1002 if(LLVMIsConstant(x) && LLVMIsConstant(y))
1003 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1004 __FUNCTION__);
1005
1006 return lp_build_exp2(bld, lp_build_mul(bld, lp_build_log2(bld, x), y));
1007 }
1008
1009
1010 /**
1011 * Generate exp(x)
1012 */
1013 LLVMValueRef
1014 lp_build_exp(struct lp_build_context *bld,
1015 LLVMValueRef x)
1016 {
1017 /* log2(e) = 1/log(2) */
1018 LLVMValueRef log2e = lp_build_const_scalar(bld->type, 1.4426950408889634);
1019
1020 return lp_build_mul(bld, log2e, lp_build_exp2(bld, x));
1021 }
1022
1023
1024 /**
1025 * Generate log(x)
1026 */
1027 LLVMValueRef
1028 lp_build_log(struct lp_build_context *bld,
1029 LLVMValueRef x)
1030 {
1031 /* log(2) */
1032 LLVMValueRef log2 = lp_build_const_scalar(bld->type, 1.4426950408889634);
1033
1034 return lp_build_mul(bld, log2, lp_build_exp2(bld, x));
1035 }
1036
1037
1038 #define EXP_POLY_DEGREE 3
1039 #define LOG_POLY_DEGREE 5
1040
1041
1042 /**
1043 * Generate polynomial.
1044 * Ex: x^2 * coeffs[0] + x * coeffs[1] + coeffs[2].
1045 */
1046 static LLVMValueRef
1047 lp_build_polynomial(struct lp_build_context *bld,
1048 LLVMValueRef x,
1049 const double *coeffs,
1050 unsigned num_coeffs)
1051 {
1052 const struct lp_type type = bld->type;
1053 LLVMValueRef res = NULL;
1054 unsigned i;
1055
1056 /* TODO: optimize the constant case */
1057 if(LLVMIsConstant(x))
1058 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1059 __FUNCTION__);
1060
1061 for (i = num_coeffs; i--; ) {
1062 LLVMValueRef coeff = lp_build_const_scalar(type, coeffs[i]);
1063 if(res)
1064 res = lp_build_add(bld, coeff, lp_build_mul(bld, x, res));
1065 else
1066 res = coeff;
1067 }
1068
1069 if(res)
1070 return res;
1071 else
1072 return bld->undef;
1073 }
1074
1075
1076 /**
1077 * Minimax polynomial fit of 2**x, in range [-0.5, 0.5[
1078 */
1079 const double lp_build_exp2_polynomial[] = {
1080 #if EXP_POLY_DEGREE == 5
1081 9.9999994e-1, 6.9315308e-1, 2.4015361e-1, 5.5826318e-2, 8.9893397e-3, 1.8775767e-3
1082 #elif EXP_POLY_DEGREE == 4
1083 1.0000026, 6.9300383e-1, 2.4144275e-1, 5.2011464e-2, 1.3534167e-2
1084 #elif EXP_POLY_DEGREE == 3
1085 9.9992520e-1, 6.9583356e-1, 2.2606716e-1, 7.8024521e-2
1086 #elif EXP_POLY_DEGREE == 2
1087 1.0017247, 6.5763628e-1, 3.3718944e-1
1088 #else
1089 #error
1090 #endif
1091 };
1092
1093
1094 void
1095 lp_build_exp2_approx(struct lp_build_context *bld,
1096 LLVMValueRef x,
1097 LLVMValueRef *p_exp2_int_part,
1098 LLVMValueRef *p_frac_part,
1099 LLVMValueRef *p_exp2)
1100 {
1101 const struct lp_type type = bld->type;
1102 LLVMTypeRef vec_type = lp_build_vec_type(type);
1103 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1104 LLVMValueRef ipart = NULL;
1105 LLVMValueRef fpart = NULL;
1106 LLVMValueRef expipart = NULL;
1107 LLVMValueRef expfpart = NULL;
1108 LLVMValueRef res = NULL;
1109
1110 if(p_exp2_int_part || p_frac_part || p_exp2) {
1111 /* TODO: optimize the constant case */
1112 if(LLVMIsConstant(x))
1113 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1114 __FUNCTION__);
1115
1116 assert(type.floating && type.width == 32);
1117
1118 x = lp_build_min(bld, x, lp_build_const_scalar(type, 129.0));
1119 x = lp_build_max(bld, x, lp_build_const_scalar(type, -126.99999));
1120
1121 /* ipart = int(x - 0.5) */
1122 ipart = LLVMBuildSub(bld->builder, x, lp_build_const_scalar(type, 0.5f), "");
1123 ipart = LLVMBuildFPToSI(bld->builder, ipart, int_vec_type, "");
1124
1125 /* fpart = x - ipart */
1126 fpart = LLVMBuildSIToFP(bld->builder, ipart, vec_type, "");
1127 fpart = LLVMBuildSub(bld->builder, x, fpart, "");
1128 }
1129
1130 if(p_exp2_int_part || p_exp2) {
1131 /* expipart = (float) (1 << ipart) */
1132 expipart = LLVMBuildAdd(bld->builder, ipart, lp_build_int_const_scalar(type, 127), "");
1133 expipart = LLVMBuildShl(bld->builder, expipart, lp_build_int_const_scalar(type, 23), "");
1134 expipart = LLVMBuildBitCast(bld->builder, expipart, vec_type, "");
1135 }
1136
1137 if(p_exp2) {
1138 expfpart = lp_build_polynomial(bld, fpart, lp_build_exp2_polynomial,
1139 Elements(lp_build_exp2_polynomial));
1140
1141 res = LLVMBuildMul(bld->builder, expipart, expfpart, "");
1142 }
1143
1144 if(p_exp2_int_part)
1145 *p_exp2_int_part = expipart;
1146
1147 if(p_frac_part)
1148 *p_frac_part = fpart;
1149
1150 if(p_exp2)
1151 *p_exp2 = res;
1152 }
1153
1154
1155 LLVMValueRef
1156 lp_build_exp2(struct lp_build_context *bld,
1157 LLVMValueRef x)
1158 {
1159 LLVMValueRef res;
1160 lp_build_exp2_approx(bld, x, NULL, NULL, &res);
1161 return res;
1162 }
1163
1164
1165 /**
1166 * Minimax polynomial fit of log2(x)/(x - 1), for x in range [1, 2[
1167 * These coefficients can be generate with
1168 * http://www.boost.org/doc/libs/1_36_0/libs/math/doc/sf_and_dist/html/math_toolkit/toolkit/internals2/minimax.html
1169 */
1170 const double lp_build_log2_polynomial[] = {
1171 #if LOG_POLY_DEGREE == 6
1172 3.11578814719469302614, -3.32419399085241980044, 2.59883907202499966007, -1.23152682416275988241, 0.318212422185251071475, -0.0344359067839062357313
1173 #elif LOG_POLY_DEGREE == 5
1174 2.8882704548164776201, -2.52074962577807006663, 1.48116647521213171641, -0.465725644288844778798, 0.0596515482674574969533
1175 #elif LOG_POLY_DEGREE == 4
1176 2.61761038894603480148, -1.75647175389045657003, 0.688243882994381274313, -0.107254423828329604454
1177 #elif LOG_POLY_DEGREE == 3
1178 2.28330284476918490682, -1.04913055217340124191, 0.204446009836232697516
1179 #else
1180 #error
1181 #endif
1182 };
1183
1184
1185 /**
1186 * See http://www.devmaster.net/forums/showthread.php?p=43580
1187 */
1188 void
1189 lp_build_log2_approx(struct lp_build_context *bld,
1190 LLVMValueRef x,
1191 LLVMValueRef *p_exp,
1192 LLVMValueRef *p_floor_log2,
1193 LLVMValueRef *p_log2)
1194 {
1195 const struct lp_type type = bld->type;
1196 LLVMTypeRef vec_type = lp_build_vec_type(type);
1197 LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
1198
1199 LLVMValueRef expmask = lp_build_int_const_scalar(type, 0x7f800000);
1200 LLVMValueRef mantmask = lp_build_int_const_scalar(type, 0x007fffff);
1201 LLVMValueRef one = LLVMConstBitCast(bld->one, int_vec_type);
1202
1203 LLVMValueRef i = NULL;
1204 LLVMValueRef exp = NULL;
1205 LLVMValueRef mant = NULL;
1206 LLVMValueRef logexp = NULL;
1207 LLVMValueRef logmant = NULL;
1208 LLVMValueRef res = NULL;
1209
1210 if(p_exp || p_floor_log2 || p_log2) {
1211 /* TODO: optimize the constant case */
1212 if(LLVMIsConstant(x))
1213 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
1214 __FUNCTION__);
1215
1216 assert(type.floating && type.width == 32);
1217
1218 i = LLVMBuildBitCast(bld->builder, x, int_vec_type, "");
1219
1220 /* exp = (float) exponent(x) */
1221 exp = LLVMBuildAnd(bld->builder, i, expmask, "");
1222 }
1223
1224 if(p_floor_log2 || p_log2) {
1225 logexp = LLVMBuildLShr(bld->builder, exp, lp_build_int_const_scalar(type, 23), "");
1226 logexp = LLVMBuildSub(bld->builder, logexp, lp_build_int_const_scalar(type, 127), "");
1227 logexp = LLVMBuildSIToFP(bld->builder, logexp, vec_type, "");
1228 }
1229
1230 if(p_log2) {
1231 /* mant = (float) mantissa(x) */
1232 mant = LLVMBuildAnd(bld->builder, i, mantmask, "");
1233 mant = LLVMBuildOr(bld->builder, mant, one, "");
1234 mant = LLVMBuildSIToFP(bld->builder, mant, vec_type, "");
1235
1236 logmant = lp_build_polynomial(bld, mant, lp_build_log2_polynomial,
1237 Elements(lp_build_log2_polynomial));
1238
1239 /* This effectively increases the polynomial degree by one, but ensures that log2(1) == 0*/
1240 logmant = LLVMBuildMul(bld->builder, logmant, LLVMBuildMul(bld->builder, mant, bld->one, ""), "");
1241
1242 res = LLVMBuildAdd(bld->builder, logmant, logexp, "");
1243 }
1244
1245 if(p_exp)
1246 *p_exp = exp;
1247
1248 if(p_floor_log2)
1249 *p_floor_log2 = logexp;
1250
1251 if(p_log2)
1252 *p_log2 = res;
1253 }
1254
1255
1256 LLVMValueRef
1257 lp_build_log2(struct lp_build_context *bld,
1258 LLVMValueRef x)
1259 {
1260 LLVMValueRef res;
1261 lp_build_log2_approx(bld, x, NULL, NULL, &res);
1262 return res;
1263 }