gallivm: Altivec floating-point rounding
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_arit.c
1 /**************************************************************************
2 *
3 * Copyright 2009-2010 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper
32 *
33 * LLVM IR doesn't support all basic arithmetic operations we care about (most
34 * notably min/max and saturated operations), and it is often necessary to
35 * resort machine-specific intrinsics directly. The functions here hide all
36 * these implementation details from the other modules.
37 *
38 * We also do simple expressions simplification here. Reasons are:
39 * - it is very easy given we have all necessary information readily available
40 * - LLVM optimization passes fail to simplify several vector expressions
41 * - We often know value constraints which the optimization passes have no way
42 * of knowing, such as when source arguments are known to be in [0, 1] range.
43 *
44 * @author Jose Fonseca <jfonseca@vmware.com>
45 */
46
47
48 #include "util/u_memory.h"
49 #include "util/u_debug.h"
50 #include "util/u_math.h"
51 #include "util/u_string.h"
52 #include "util/u_cpu_detect.h"
53
54 #include "lp_bld_type.h"
55 #include "lp_bld_const.h"
56 #include "lp_bld_init.h"
57 #include "lp_bld_intr.h"
58 #include "lp_bld_logic.h"
59 #include "lp_bld_pack.h"
60 #include "lp_bld_debug.h"
61 #include "lp_bld_arit.h"
62
63 #include "float.h"
64
65 #define EXP_POLY_DEGREE 5
66
67 #define LOG_POLY_DEGREE 4
68
69
70 /**
71 * Generate min(a, b)
72 * No checks for special case values of a or b = 1 or 0 are done.
73 */
74 static LLVMValueRef
75 lp_build_min_simple(struct lp_build_context *bld,
76 LLVMValueRef a,
77 LLVMValueRef b)
78 {
79 const struct lp_type type = bld->type;
80 const char *intrinsic = NULL;
81 unsigned intr_size = 0;
82 LLVMValueRef cond;
83
84 assert(lp_check_value(type, a));
85 assert(lp_check_value(type, b));
86
87 /* TODO: optimize the constant case */
88
89 if (type.floating && util_cpu_caps.has_sse) {
90 if (type.width == 32) {
91 if (type.length == 1) {
92 intrinsic = "llvm.x86.sse.min.ss";
93 intr_size = 128;
94 }
95 else if (type.length <= 4 || !util_cpu_caps.has_avx) {
96 intrinsic = "llvm.x86.sse.min.ps";
97 intr_size = 128;
98 }
99 else {
100 intrinsic = "llvm.x86.avx.min.ps.256";
101 intr_size = 256;
102 }
103 }
104 if (type.width == 64 && util_cpu_caps.has_sse2) {
105 if (type.length == 1) {
106 intrinsic = "llvm.x86.sse2.min.sd";
107 intr_size = 128;
108 }
109 else if (type.length == 2 || !util_cpu_caps.has_avx) {
110 intrinsic = "llvm.x86.sse2.min.pd";
111 intr_size = 128;
112 }
113 else {
114 intrinsic = "llvm.x86.avx.min.pd.256";
115 intr_size = 256;
116 }
117 }
118 }
119 else if (type.floating && util_cpu_caps.has_altivec) {
120 if (type.width == 32 && type.length == 4) {
121 intrinsic = "llvm.ppc.altivec.vminfp";
122 intr_size = 128;
123 }
124 } else if (util_cpu_caps.has_sse2 && type.length >= 2) {
125 intr_size = 128;
126 if ((type.width == 8 || type.width == 16) &&
127 (type.width * type.length <= 64) &&
128 (gallivm_debug & GALLIVM_DEBUG_PERF)) {
129 debug_printf("%s: inefficient code, bogus shuffle due to packing\n",
130 __FUNCTION__);
131 }
132 if (type.width == 8 && !type.sign) {
133 intrinsic = "llvm.x86.sse2.pminu.b";
134 }
135 else if (type.width == 16 && type.sign) {
136 intrinsic = "llvm.x86.sse2.pmins.w";
137 }
138 if (util_cpu_caps.has_sse4_1) {
139 if (type.width == 8 && type.sign) {
140 intrinsic = "llvm.x86.sse41.pminsb";
141 }
142 if (type.width == 16 && !type.sign) {
143 intrinsic = "llvm.x86.sse41.pminuw";
144 }
145 if (type.width == 32 && !type.sign) {
146 intrinsic = "llvm.x86.sse41.pminud";
147 }
148 if (type.width == 32 && type.sign) {
149 intrinsic = "llvm.x86.sse41.pminsd";
150 }
151 }
152 } else if (util_cpu_caps.has_altivec) {
153 intr_size = 128;
154 if (type.width == 8) {
155 if (!type.sign) {
156 intrinsic = "llvm.ppc.altivec.vminub";
157 } else {
158 intrinsic = "llvm.ppc.altivec.vminsb";
159 }
160 } else if (type.width == 16) {
161 if (!type.sign) {
162 intrinsic = "llvm.ppc.altivec.vminuh";
163 } else {
164 intrinsic = "llvm.ppc.altivec.vminsh";
165 }
166 } else if (type.width == 32) {
167 if (!type.sign) {
168 intrinsic = "llvm.ppc.altivec.vminuw";
169 } else {
170 intrinsic = "llvm.ppc.altivec.vminsw";
171 }
172 }
173 }
174
175 if(intrinsic) {
176 return lp_build_intrinsic_binary_anylength(bld->gallivm, intrinsic,
177 type,
178 intr_size, a, b);
179 }
180
181 cond = lp_build_cmp(bld, PIPE_FUNC_LESS, a, b);
182 return lp_build_select(bld, cond, a, b);
183 }
184
185
186 /**
187 * Generate max(a, b)
188 * No checks for special case values of a or b = 1 or 0 are done.
189 */
190 static LLVMValueRef
191 lp_build_max_simple(struct lp_build_context *bld,
192 LLVMValueRef a,
193 LLVMValueRef b)
194 {
195 const struct lp_type type = bld->type;
196 const char *intrinsic = NULL;
197 unsigned intr_size = 0;
198 LLVMValueRef cond;
199
200 assert(lp_check_value(type, a));
201 assert(lp_check_value(type, b));
202
203 /* TODO: optimize the constant case */
204
205 if (type.floating && util_cpu_caps.has_sse) {
206 if (type.width == 32) {
207 if (type.length == 1) {
208 intrinsic = "llvm.x86.sse.max.ss";
209 intr_size = 128;
210 }
211 else if (type.length <= 4 || !util_cpu_caps.has_avx) {
212 intrinsic = "llvm.x86.sse.max.ps";
213 intr_size = 128;
214 }
215 else {
216 intrinsic = "llvm.x86.avx.max.ps.256";
217 intr_size = 256;
218 }
219 }
220 if (type.width == 64 && util_cpu_caps.has_sse2) {
221 if (type.length == 1) {
222 intrinsic = "llvm.x86.sse2.max.sd";
223 intr_size = 128;
224 }
225 else if (type.length == 2 || !util_cpu_caps.has_avx) {
226 intrinsic = "llvm.x86.sse2.max.pd";
227 intr_size = 128;
228 }
229 else {
230 intrinsic = "llvm.x86.avx.max.pd.256";
231 intr_size = 256;
232 }
233 }
234 }
235 else if (type.floating && util_cpu_caps.has_altivec) {
236 if (type.width == 32 || type.length == 4) {
237 intrinsic = "llvm.ppc.altivec.vmaxfp";
238 intr_size = 128;
239 }
240 } else if (util_cpu_caps.has_sse2 && type.length >= 2) {
241 intr_size = 128;
242 if ((type.width == 8 || type.width == 16) &&
243 (type.width * type.length <= 64) &&
244 (gallivm_debug & GALLIVM_DEBUG_PERF)) {
245 debug_printf("%s: inefficient code, bogus shuffle due to packing\n",
246 __FUNCTION__);
247 }
248 if (type.width == 8 && !type.sign) {
249 intrinsic = "llvm.x86.sse2.pmaxu.b";
250 intr_size = 128;
251 }
252 else if (type.width == 16 && type.sign) {
253 intrinsic = "llvm.x86.sse2.pmaxs.w";
254 }
255 if (util_cpu_caps.has_sse4_1) {
256 if (type.width == 8 && type.sign) {
257 intrinsic = "llvm.x86.sse41.pmaxsb";
258 }
259 if (type.width == 16 && !type.sign) {
260 intrinsic = "llvm.x86.sse41.pmaxuw";
261 }
262 if (type.width == 32 && !type.sign) {
263 intrinsic = "llvm.x86.sse41.pmaxud";
264 }
265 if (type.width == 32 && type.sign) {
266 intrinsic = "llvm.x86.sse41.pmaxsd";
267 }
268 }
269 } else if (util_cpu_caps.has_altivec) {
270 intr_size = 128;
271 if (type.width == 8) {
272 if (!type.sign) {
273 intrinsic = "llvm.ppc.altivec.vmaxub";
274 } else {
275 intrinsic = "llvm.ppc.altivec.vmaxsb";
276 }
277 } else if (type.width == 16) {
278 if (!type.sign) {
279 intrinsic = "llvm.ppc.altivec.vmaxuh";
280 } else {
281 intrinsic = "llvm.ppc.altivec.vmaxsh";
282 }
283 } else if (type.width == 32) {
284 if (!type.sign) {
285 intrinsic = "llvm.ppc.altivec.vmaxuw";
286 } else {
287 intrinsic = "llvm.ppc.altivec.vmaxsw";
288 }
289 }
290 }
291
292 if(intrinsic) {
293 return lp_build_intrinsic_binary_anylength(bld->gallivm, intrinsic,
294 type,
295 intr_size, a, b);
296 }
297
298 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, b);
299 return lp_build_select(bld, cond, a, b);
300 }
301
302
303 /**
304 * Generate 1 - a, or ~a depending on bld->type.
305 */
306 LLVMValueRef
307 lp_build_comp(struct lp_build_context *bld,
308 LLVMValueRef a)
309 {
310 LLVMBuilderRef builder = bld->gallivm->builder;
311 const struct lp_type type = bld->type;
312
313 assert(lp_check_value(type, a));
314
315 if(a == bld->one)
316 return bld->zero;
317 if(a == bld->zero)
318 return bld->one;
319
320 if(type.norm && !type.floating && !type.fixed && !type.sign) {
321 if(LLVMIsConstant(a))
322 return LLVMConstNot(a);
323 else
324 return LLVMBuildNot(builder, a, "");
325 }
326
327 if(LLVMIsConstant(a))
328 if (type.floating)
329 return LLVMConstFSub(bld->one, a);
330 else
331 return LLVMConstSub(bld->one, a);
332 else
333 if (type.floating)
334 return LLVMBuildFSub(builder, bld->one, a, "");
335 else
336 return LLVMBuildSub(builder, bld->one, a, "");
337 }
338
339
340 /**
341 * Generate a + b
342 */
343 LLVMValueRef
344 lp_build_add(struct lp_build_context *bld,
345 LLVMValueRef a,
346 LLVMValueRef b)
347 {
348 LLVMBuilderRef builder = bld->gallivm->builder;
349 const struct lp_type type = bld->type;
350 LLVMValueRef res;
351
352 assert(lp_check_value(type, a));
353 assert(lp_check_value(type, b));
354
355 if(a == bld->zero)
356 return b;
357 if(b == bld->zero)
358 return a;
359 if(a == bld->undef || b == bld->undef)
360 return bld->undef;
361
362 if(bld->type.norm) {
363 const char *intrinsic = NULL;
364
365 if(a == bld->one || b == bld->one)
366 return bld->one;
367
368 if (type.width * type.length == 128 &&
369 !type.floating && !type.fixed) {
370 if(util_cpu_caps.has_sse2) {
371 if(type.width == 8)
372 intrinsic = type.sign ? "llvm.x86.sse2.padds.b" : "llvm.x86.sse2.paddus.b";
373 if(type.width == 16)
374 intrinsic = type.sign ? "llvm.x86.sse2.padds.w" : "llvm.x86.sse2.paddus.w";
375 } else if (util_cpu_caps.has_altivec) {
376 if(type.width == 8)
377 intrinsic = type.sign ? "llvm.ppc.altivec.vaddsbs" : "llvm.ppc.altivec.vaddubs";
378 if(type.width == 16)
379 intrinsic = type.sign ? "llvm.ppc.altivec.vaddsws" : "llvm.ppc.altivec.vadduws";
380 }
381 }
382
383 if(intrinsic)
384 return lp_build_intrinsic_binary(builder, intrinsic, lp_build_vec_type(bld->gallivm, bld->type), a, b);
385 }
386
387 if(LLVMIsConstant(a) && LLVMIsConstant(b))
388 if (type.floating)
389 res = LLVMConstFAdd(a, b);
390 else
391 res = LLVMConstAdd(a, b);
392 else
393 if (type.floating)
394 res = LLVMBuildFAdd(builder, a, b, "");
395 else
396 res = LLVMBuildAdd(builder, a, b, "");
397
398 /* clamp to ceiling of 1.0 */
399 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
400 res = lp_build_min_simple(bld, res, bld->one);
401
402 /* XXX clamp to floor of -1 or 0??? */
403
404 return res;
405 }
406
407
408 /** Return the scalar sum of the elements of a.
409 * Should avoid this operation whenever possible.
410 */
411 LLVMValueRef
412 lp_build_horizontal_add(struct lp_build_context *bld,
413 LLVMValueRef a)
414 {
415 LLVMBuilderRef builder = bld->gallivm->builder;
416 const struct lp_type type = bld->type;
417 LLVMValueRef index, res;
418 unsigned i, length;
419 LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH / 2];
420 LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH / 2];
421 LLVMValueRef vecres, elem2;
422
423 assert(lp_check_value(type, a));
424
425 if (type.length == 1) {
426 return a;
427 }
428
429 assert(!bld->type.norm);
430
431 /*
432 * for byte vectors can do much better with psadbw.
433 * Using repeated shuffle/adds here. Note with multiple vectors
434 * this can be done more efficiently as outlined in the intel
435 * optimization manual.
436 * Note: could cause data rearrangement if used with smaller element
437 * sizes.
438 */
439
440 vecres = a;
441 length = type.length / 2;
442 while (length > 1) {
443 LLVMValueRef vec1, vec2;
444 for (i = 0; i < length; i++) {
445 shuffles1[i] = lp_build_const_int32(bld->gallivm, i);
446 shuffles2[i] = lp_build_const_int32(bld->gallivm, i + length);
447 }
448 vec1 = LLVMBuildShuffleVector(builder, vecres, vecres,
449 LLVMConstVector(shuffles1, length), "");
450 vec2 = LLVMBuildShuffleVector(builder, vecres, vecres,
451 LLVMConstVector(shuffles2, length), "");
452 if (type.floating) {
453 vecres = LLVMBuildFAdd(builder, vec1, vec2, "");
454 }
455 else {
456 vecres = LLVMBuildAdd(builder, vec1, vec2, "");
457 }
458 length = length >> 1;
459 }
460
461 /* always have vector of size 2 here */
462 assert(length == 1);
463
464 index = lp_build_const_int32(bld->gallivm, 0);
465 res = LLVMBuildExtractElement(builder, vecres, index, "");
466 index = lp_build_const_int32(bld->gallivm, 1);
467 elem2 = LLVMBuildExtractElement(builder, vecres, index, "");
468
469 if (type.floating)
470 res = LLVMBuildFAdd(builder, res, elem2, "");
471 else
472 res = LLVMBuildAdd(builder, res, elem2, "");
473
474 return res;
475 }
476
477 /**
478 * Return the horizontal sums of 4 float vectors as a float4 vector.
479 * This uses the technique as outlined in Intel Optimization Manual.
480 */
481 static LLVMValueRef
482 lp_build_horizontal_add4x4f(struct lp_build_context *bld,
483 LLVMValueRef src[4])
484 {
485 struct gallivm_state *gallivm = bld->gallivm;
486 LLVMBuilderRef builder = gallivm->builder;
487 LLVMValueRef shuffles[4];
488 LLVMValueRef tmp[4];
489 LLVMValueRef sumtmp[2], shuftmp[2];
490
491 /* lower half of regs */
492 shuffles[0] = lp_build_const_int32(gallivm, 0);
493 shuffles[1] = lp_build_const_int32(gallivm, 1);
494 shuffles[2] = lp_build_const_int32(gallivm, 4);
495 shuffles[3] = lp_build_const_int32(gallivm, 5);
496 tmp[0] = LLVMBuildShuffleVector(builder, src[0], src[1],
497 LLVMConstVector(shuffles, 4), "");
498 tmp[2] = LLVMBuildShuffleVector(builder, src[2], src[3],
499 LLVMConstVector(shuffles, 4), "");
500
501 /* upper half of regs */
502 shuffles[0] = lp_build_const_int32(gallivm, 2);
503 shuffles[1] = lp_build_const_int32(gallivm, 3);
504 shuffles[2] = lp_build_const_int32(gallivm, 6);
505 shuffles[3] = lp_build_const_int32(gallivm, 7);
506 tmp[1] = LLVMBuildShuffleVector(builder, src[0], src[1],
507 LLVMConstVector(shuffles, 4), "");
508 tmp[3] = LLVMBuildShuffleVector(builder, src[2], src[3],
509 LLVMConstVector(shuffles, 4), "");
510
511 sumtmp[0] = LLVMBuildFAdd(builder, tmp[0], tmp[1], "");
512 sumtmp[1] = LLVMBuildFAdd(builder, tmp[2], tmp[3], "");
513
514 shuffles[0] = lp_build_const_int32(gallivm, 0);
515 shuffles[1] = lp_build_const_int32(gallivm, 2);
516 shuffles[2] = lp_build_const_int32(gallivm, 4);
517 shuffles[3] = lp_build_const_int32(gallivm, 6);
518 shuftmp[0] = LLVMBuildShuffleVector(builder, sumtmp[0], sumtmp[1],
519 LLVMConstVector(shuffles, 4), "");
520
521 shuffles[0] = lp_build_const_int32(gallivm, 1);
522 shuffles[1] = lp_build_const_int32(gallivm, 3);
523 shuffles[2] = lp_build_const_int32(gallivm, 5);
524 shuffles[3] = lp_build_const_int32(gallivm, 7);
525 shuftmp[1] = LLVMBuildShuffleVector(builder, sumtmp[0], sumtmp[1],
526 LLVMConstVector(shuffles, 4), "");
527
528 return LLVMBuildFAdd(builder, shuftmp[0], shuftmp[1], "");
529 }
530
531
532 /*
533 * partially horizontally add 2-4 float vectors with length nx4,
534 * i.e. only four adjacent values in each vector will be added,
535 * assuming values are really grouped in 4 which also determines
536 * output order.
537 *
538 * Return a vector of the same length as the initial vectors,
539 * with the excess elements (if any) being undefined.
540 * The element order is independent of number of input vectors.
541 * For 3 vectors x0x1x2x3x4x5x6x7, y0y1y2y3y4y5y6y7, z0z1z2z3z4z5z6z7
542 * the output order thus will be
543 * sumx0-x3,sumy0-y3,sumz0-z3,undef,sumx4-x7,sumy4-y7,sumz4z7,undef
544 */
545 LLVMValueRef
546 lp_build_hadd_partial4(struct lp_build_context *bld,
547 LLVMValueRef vectors[],
548 unsigned num_vecs)
549 {
550 struct gallivm_state *gallivm = bld->gallivm;
551 LLVMBuilderRef builder = gallivm->builder;
552 LLVMValueRef ret_vec;
553 LLVMValueRef tmp[4];
554 const char *intrinsic = NULL;
555
556 assert(num_vecs >= 2 && num_vecs <= 4);
557 assert(bld->type.floating);
558
559 /* only use this with at least 2 vectors, as it is sort of expensive
560 * (depending on cpu) and we always need two horizontal adds anyway,
561 * so a shuffle/add approach might be better.
562 */
563
564 tmp[0] = vectors[0];
565 tmp[1] = vectors[1];
566
567 tmp[2] = num_vecs > 2 ? vectors[2] : vectors[0];
568 tmp[3] = num_vecs > 3 ? vectors[3] : vectors[0];
569
570 if (util_cpu_caps.has_sse3 && bld->type.width == 32 &&
571 bld->type.length == 4) {
572 intrinsic = "llvm.x86.sse3.hadd.ps";
573 }
574 else if (util_cpu_caps.has_avx && bld->type.width == 32 &&
575 bld->type.length == 8) {
576 intrinsic = "llvm.x86.avx.hadd.ps.256";
577 }
578 if (intrinsic) {
579 tmp[0] = lp_build_intrinsic_binary(builder, intrinsic,
580 lp_build_vec_type(gallivm, bld->type),
581 tmp[0], tmp[1]);
582 if (num_vecs > 2) {
583 tmp[1] = lp_build_intrinsic_binary(builder, intrinsic,
584 lp_build_vec_type(gallivm, bld->type),
585 tmp[2], tmp[3]);
586 }
587 else {
588 tmp[1] = tmp[0];
589 }
590 return lp_build_intrinsic_binary(builder, intrinsic,
591 lp_build_vec_type(gallivm, bld->type),
592 tmp[0], tmp[1]);
593 }
594
595 if (bld->type.length == 4) {
596 ret_vec = lp_build_horizontal_add4x4f(bld, tmp);
597 }
598 else {
599 LLVMValueRef partres[LP_MAX_VECTOR_LENGTH/4];
600 unsigned j;
601 unsigned num_iter = bld->type.length / 4;
602 struct lp_type parttype = bld->type;
603 parttype.length = 4;
604 for (j = 0; j < num_iter; j++) {
605 LLVMValueRef partsrc[4];
606 unsigned i;
607 for (i = 0; i < 4; i++) {
608 partsrc[i] = lp_build_extract_range(gallivm, tmp[i], j*4, 4);
609 }
610 partres[j] = lp_build_horizontal_add4x4f(bld, partsrc);
611 }
612 ret_vec = lp_build_concat(gallivm, partres, parttype, num_iter);
613 }
614 return ret_vec;
615 }
616
617 /**
618 * Generate a - b
619 */
620 LLVMValueRef
621 lp_build_sub(struct lp_build_context *bld,
622 LLVMValueRef a,
623 LLVMValueRef b)
624 {
625 LLVMBuilderRef builder = bld->gallivm->builder;
626 const struct lp_type type = bld->type;
627 LLVMValueRef res;
628
629 assert(lp_check_value(type, a));
630 assert(lp_check_value(type, b));
631
632 if(b == bld->zero)
633 return a;
634 if(a == bld->undef || b == bld->undef)
635 return bld->undef;
636 if(a == b)
637 return bld->zero;
638
639 if(bld->type.norm) {
640 const char *intrinsic = NULL;
641
642 if(b == bld->one)
643 return bld->zero;
644
645 if (type.width * type.length == 128 &&
646 !type.floating && !type.fixed) {
647 if (util_cpu_caps.has_sse2) {
648 if(type.width == 8)
649 intrinsic = type.sign ? "llvm.x86.sse2.psubs.b" : "llvm.x86.sse2.psubus.b";
650 if(type.width == 16)
651 intrinsic = type.sign ? "llvm.x86.sse2.psubs.w" : "llvm.x86.sse2.psubus.w";
652 } else if (util_cpu_caps.has_altivec) {
653 if(type.width == 8)
654 intrinsic = type.sign ? "llvm.ppc.altivec.vsubsbs" : "llvm.ppc.altivec.vsububs";
655 if(type.width == 16)
656 intrinsic = type.sign ? "llvm.ppc.altivec.vsubsws" : "llvm.ppc.altivec.vsubuws";
657 }
658 }
659
660 if(intrinsic)
661 return lp_build_intrinsic_binary(builder, intrinsic, lp_build_vec_type(bld->gallivm, bld->type), a, b);
662 }
663
664 if(LLVMIsConstant(a) && LLVMIsConstant(b))
665 if (type.floating)
666 res = LLVMConstFSub(a, b);
667 else
668 res = LLVMConstSub(a, b);
669 else
670 if (type.floating)
671 res = LLVMBuildFSub(builder, a, b, "");
672 else
673 res = LLVMBuildSub(builder, a, b, "");
674
675 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
676 res = lp_build_max_simple(bld, res, bld->zero);
677
678 return res;
679 }
680
681
682 /**
683 * Normalized 8bit multiplication.
684 *
685 * - alpha plus one
686 *
687 * makes the following approximation to the division (Sree)
688 *
689 * a*b/255 ~= (a*(b + 1)) >> 256
690 *
691 * which is the fastest method that satisfies the following OpenGL criteria
692 *
693 * 0*0 = 0 and 255*255 = 255
694 *
695 * - geometric series
696 *
697 * takes the geometric series approximation to the division
698 *
699 * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
700 *
701 * in this case just the first two terms to fit in 16bit arithmetic
702 *
703 * t/255 ~= (t + (t >> 8)) >> 8
704 *
705 * note that just by itself it doesn't satisfies the OpenGL criteria, as
706 * 255*255 = 254, so the special case b = 255 must be accounted or roundoff
707 * must be used
708 *
709 * - geometric series plus rounding
710 *
711 * when using a geometric series division instead of truncating the result
712 * use roundoff in the approximation (Jim Blinn)
713 *
714 * t/255 ~= (t + (t >> 8) + 0x80) >> 8
715 *
716 * achieving the exact results
717 *
718 * @sa Alvy Ray Smith, Image Compositing Fundamentals, Tech Memo 4, Aug 15, 1995,
719 * ftp://ftp.alvyray.com/Acrobat/4_Comp.pdf
720 * @sa Michael Herf, The "double blend trick", May 2000,
721 * http://www.stereopsis.com/doubleblend.html
722 */
723 static LLVMValueRef
724 lp_build_mul_u8n(struct gallivm_state *gallivm,
725 struct lp_type i16_type,
726 LLVMValueRef a, LLVMValueRef b)
727 {
728 LLVMBuilderRef builder = gallivm->builder;
729 LLVMValueRef c8;
730 LLVMValueRef ab;
731
732 assert(!i16_type.floating);
733 assert(lp_check_value(i16_type, a));
734 assert(lp_check_value(i16_type, b));
735
736 c8 = lp_build_const_int_vec(gallivm, i16_type, 8);
737
738 #if 0
739
740 /* a*b/255 ~= (a*(b + 1)) >> 256 */
741 b = LLVMBuildAdd(builder, b, lp_build_const_int_vec(gallium, i16_type, 1), "");
742 ab = LLVMBuildMul(builder, a, b, "");
743
744 #else
745
746 /* ab/255 ~= (ab + (ab >> 8) + 0x80) >> 8 */
747 ab = LLVMBuildMul(builder, a, b, "");
748 ab = LLVMBuildAdd(builder, ab, LLVMBuildLShr(builder, ab, c8, ""), "");
749 ab = LLVMBuildAdd(builder, ab, lp_build_const_int_vec(gallivm, i16_type, 0x80), "");
750
751 #endif
752
753 ab = LLVMBuildLShr(builder, ab, c8, "");
754
755 return ab;
756 }
757
758 /**
759 * Normalized 16bit multiplication.
760 *
761 * Utilises same principle as above code.
762 */
763 static LLVMValueRef
764 lp_build_mul_u16n(struct gallivm_state *gallivm,
765 struct lp_type i32_type,
766 LLVMValueRef a, LLVMValueRef b)
767 {
768 LLVMBuilderRef builder = gallivm->builder;
769 LLVMValueRef c16;
770 LLVMValueRef ab;
771
772 assert(!i32_type.floating);
773 assert(lp_check_value(i32_type, a));
774 assert(lp_check_value(i32_type, b));
775
776 c16 = lp_build_const_int_vec(gallivm, i32_type, 16);
777
778 /* ab/65535 ~= (ab + (ab >> 16) + 0x8000) >> 16 */
779 ab = LLVMBuildMul(builder, a, b, "");
780 ab = LLVMBuildAdd(builder, ab, LLVMBuildLShr(builder, ab, c16, ""), "");
781 ab = LLVMBuildAdd(builder, ab, lp_build_const_int_vec(gallivm, i32_type, 0x8000), "");
782
783 ab = LLVMBuildLShr(builder, ab, c16, "");
784
785 return ab;
786 }
787
788 /**
789 * Generate a * b
790 */
791 LLVMValueRef
792 lp_build_mul(struct lp_build_context *bld,
793 LLVMValueRef a,
794 LLVMValueRef b)
795 {
796 LLVMBuilderRef builder = bld->gallivm->builder;
797 const struct lp_type type = bld->type;
798 LLVMValueRef shift;
799 LLVMValueRef res;
800
801 assert(lp_check_value(type, a));
802 assert(lp_check_value(type, b));
803
804 if(a == bld->zero)
805 return bld->zero;
806 if(a == bld->one)
807 return b;
808 if(b == bld->zero)
809 return bld->zero;
810 if(b == bld->one)
811 return a;
812 if(a == bld->undef || b == bld->undef)
813 return bld->undef;
814
815 if(!type.floating && !type.fixed && type.norm) {
816 if(type.width == 8) {
817 struct lp_type i16_type = lp_wider_type(type);
818 LLVMValueRef al, ah, bl, bh, abl, abh, ab;
819
820 lp_build_unpack2(bld->gallivm, type, i16_type, a, &al, &ah);
821 lp_build_unpack2(bld->gallivm, type, i16_type, b, &bl, &bh);
822
823 /* PMULLW, PSRLW, PADDW */
824 abl = lp_build_mul_u8n(bld->gallivm, i16_type, al, bl);
825 abh = lp_build_mul_u8n(bld->gallivm, i16_type, ah, bh);
826
827 ab = lp_build_pack2(bld->gallivm, i16_type, type, abl, abh);
828
829 return ab;
830 }
831
832 if(type.width == 16) {
833 struct lp_type i32_type = lp_wider_type(type);
834 LLVMValueRef al, ah, bl, bh, abl, abh, ab;
835
836 lp_build_unpack2(bld->gallivm, type, i32_type, a, &al, &ah);
837 lp_build_unpack2(bld->gallivm, type, i32_type, b, &bl, &bh);
838
839 /* PMULLW, PSRLW, PADDW */
840 abl = lp_build_mul_u16n(bld->gallivm, i32_type, al, bl);
841 abh = lp_build_mul_u16n(bld->gallivm, i32_type, ah, bh);
842
843 ab = lp_build_pack2(bld->gallivm, i32_type, type, abl, abh);
844
845 return ab;
846 }
847
848 /* FIXME */
849 assert(0);
850 }
851
852 if(type.fixed)
853 shift = lp_build_const_int_vec(bld->gallivm, type, type.width/2);
854 else
855 shift = NULL;
856
857 if(LLVMIsConstant(a) && LLVMIsConstant(b)) {
858 if (type.floating)
859 res = LLVMConstFMul(a, b);
860 else
861 res = LLVMConstMul(a, b);
862 if(shift) {
863 if(type.sign)
864 res = LLVMConstAShr(res, shift);
865 else
866 res = LLVMConstLShr(res, shift);
867 }
868 }
869 else {
870 if (type.floating)
871 res = LLVMBuildFMul(builder, a, b, "");
872 else
873 res = LLVMBuildMul(builder, a, b, "");
874 if(shift) {
875 if(type.sign)
876 res = LLVMBuildAShr(builder, res, shift, "");
877 else
878 res = LLVMBuildLShr(builder, res, shift, "");
879 }
880 }
881
882 return res;
883 }
884
885
886 /**
887 * Small vector x scale multiplication optimization.
888 */
889 LLVMValueRef
890 lp_build_mul_imm(struct lp_build_context *bld,
891 LLVMValueRef a,
892 int b)
893 {
894 LLVMBuilderRef builder = bld->gallivm->builder;
895 LLVMValueRef factor;
896
897 assert(lp_check_value(bld->type, a));
898
899 if(b == 0)
900 return bld->zero;
901
902 if(b == 1)
903 return a;
904
905 if(b == -1)
906 return lp_build_negate(bld, a);
907
908 if(b == 2 && bld->type.floating)
909 return lp_build_add(bld, a, a);
910
911 if(util_is_power_of_two(b)) {
912 unsigned shift = ffs(b) - 1;
913
914 if(bld->type.floating) {
915 #if 0
916 /*
917 * Power of two multiplication by directly manipulating the exponent.
918 *
919 * XXX: This might not be always faster, it will introduce a small error
920 * for multiplication by zero, and it will produce wrong results
921 * for Inf and NaN.
922 */
923 unsigned mantissa = lp_mantissa(bld->type);
924 factor = lp_build_const_int_vec(bld->gallivm, bld->type, (unsigned long long)shift << mantissa);
925 a = LLVMBuildBitCast(builder, a, lp_build_int_vec_type(bld->type), "");
926 a = LLVMBuildAdd(builder, a, factor, "");
927 a = LLVMBuildBitCast(builder, a, lp_build_vec_type(bld->gallivm, bld->type), "");
928 return a;
929 #endif
930 }
931 else {
932 factor = lp_build_const_vec(bld->gallivm, bld->type, shift);
933 return LLVMBuildShl(builder, a, factor, "");
934 }
935 }
936
937 factor = lp_build_const_vec(bld->gallivm, bld->type, (double)b);
938 return lp_build_mul(bld, a, factor);
939 }
940
941
942 /**
943 * Generate a / b
944 */
945 LLVMValueRef
946 lp_build_div(struct lp_build_context *bld,
947 LLVMValueRef a,
948 LLVMValueRef b)
949 {
950 LLVMBuilderRef builder = bld->gallivm->builder;
951 const struct lp_type type = bld->type;
952
953 assert(lp_check_value(type, a));
954 assert(lp_check_value(type, b));
955
956 if(a == bld->zero)
957 return bld->zero;
958 if(a == bld->one)
959 return lp_build_rcp(bld, b);
960 if(b == bld->zero)
961 return bld->undef;
962 if(b == bld->one)
963 return a;
964 if(a == bld->undef || b == bld->undef)
965 return bld->undef;
966
967 if(LLVMIsConstant(a) && LLVMIsConstant(b)) {
968 if (type.floating)
969 return LLVMConstFDiv(a, b);
970 else if (type.sign)
971 return LLVMConstSDiv(a, b);
972 else
973 return LLVMConstUDiv(a, b);
974 }
975
976 if(((util_cpu_caps.has_sse && type.width == 32 && type.length == 4) ||
977 (util_cpu_caps.has_avx && type.width == 32 && type.length == 8)) &&
978 type.floating)
979 return lp_build_mul(bld, a, lp_build_rcp(bld, b));
980
981 if (type.floating)
982 return LLVMBuildFDiv(builder, a, b, "");
983 else if (type.sign)
984 return LLVMBuildSDiv(builder, a, b, "");
985 else
986 return LLVMBuildUDiv(builder, a, b, "");
987 }
988
989
990 /**
991 * Linear interpolation -- without any checks.
992 *
993 * @sa http://www.stereopsis.com/doubleblend.html
994 */
995 static INLINE LLVMValueRef
996 lp_build_lerp_simple(struct lp_build_context *bld,
997 LLVMValueRef x,
998 LLVMValueRef v0,
999 LLVMValueRef v1)
1000 {
1001 LLVMBuilderRef builder = bld->gallivm->builder;
1002 LLVMValueRef delta;
1003 LLVMValueRef res;
1004
1005 assert(lp_check_value(bld->type, x));
1006 assert(lp_check_value(bld->type, v0));
1007 assert(lp_check_value(bld->type, v1));
1008
1009 delta = lp_build_sub(bld, v1, v0);
1010
1011 res = lp_build_mul(bld, x, delta);
1012
1013 res = lp_build_add(bld, v0, res);
1014
1015 if (bld->type.fixed) {
1016 /* XXX: This step is necessary for lerping 8bit colors stored on 16bits,
1017 * but it will be wrong for other uses. Basically we need a more
1018 * powerful lp_type, capable of further distinguishing the values
1019 * interpretation from the value storage. */
1020 res = LLVMBuildAnd(builder, res, lp_build_const_int_vec(bld->gallivm, bld->type, (1 << bld->type.width/2) - 1), "");
1021 }
1022
1023 return res;
1024 }
1025
1026
1027 /**
1028 * Linear interpolation.
1029 */
1030 LLVMValueRef
1031 lp_build_lerp(struct lp_build_context *bld,
1032 LLVMValueRef x,
1033 LLVMValueRef v0,
1034 LLVMValueRef v1)
1035 {
1036 LLVMBuilderRef builder = bld->gallivm->builder;
1037 const struct lp_type type = bld->type;
1038 LLVMValueRef res;
1039
1040 assert(lp_check_value(type, x));
1041 assert(lp_check_value(type, v0));
1042 assert(lp_check_value(type, v1));
1043
1044 if (type.norm) {
1045 struct lp_type wide_type;
1046 struct lp_build_context wide_bld;
1047 LLVMValueRef xl, xh, v0l, v0h, v1l, v1h, resl, resh;
1048 LLVMValueRef shift;
1049
1050 assert(type.length >= 2);
1051 assert(!type.sign);
1052
1053 /*
1054 * Create a wider type, enough to hold the intermediate result of the
1055 * multiplication.
1056 */
1057 memset(&wide_type, 0, sizeof wide_type);
1058 wide_type.fixed = TRUE;
1059 wide_type.width = type.width*2;
1060 wide_type.length = type.length/2;
1061
1062 lp_build_context_init(&wide_bld, bld->gallivm, wide_type);
1063
1064 lp_build_unpack2(bld->gallivm, type, wide_type, x, &xl, &xh);
1065 lp_build_unpack2(bld->gallivm, type, wide_type, v0, &v0l, &v0h);
1066 lp_build_unpack2(bld->gallivm, type, wide_type, v1, &v1l, &v1h);
1067
1068 /*
1069 * Scale x from [0, 255] to [0, 256]
1070 */
1071
1072 shift = lp_build_const_int_vec(bld->gallivm, wide_type, type.width - 1);
1073
1074 xl = lp_build_add(&wide_bld, xl,
1075 LLVMBuildAShr(builder, xl, shift, ""));
1076 xh = lp_build_add(&wide_bld, xh,
1077 LLVMBuildAShr(builder, xh, shift, ""));
1078
1079 /*
1080 * Lerp both halves.
1081 */
1082
1083 resl = lp_build_lerp_simple(&wide_bld, xl, v0l, v1l);
1084 resh = lp_build_lerp_simple(&wide_bld, xh, v0h, v1h);
1085
1086 res = lp_build_pack2(bld->gallivm, wide_type, type, resl, resh);
1087 } else {
1088 res = lp_build_lerp_simple(bld, x, v0, v1);
1089 }
1090
1091 return res;
1092 }
1093
1094
1095 LLVMValueRef
1096 lp_build_lerp_2d(struct lp_build_context *bld,
1097 LLVMValueRef x,
1098 LLVMValueRef y,
1099 LLVMValueRef v00,
1100 LLVMValueRef v01,
1101 LLVMValueRef v10,
1102 LLVMValueRef v11)
1103 {
1104 LLVMValueRef v0 = lp_build_lerp(bld, x, v00, v01);
1105 LLVMValueRef v1 = lp_build_lerp(bld, x, v10, v11);
1106 return lp_build_lerp(bld, y, v0, v1);
1107 }
1108
1109
1110 /**
1111 * Generate min(a, b)
1112 * Do checks for special cases.
1113 */
1114 LLVMValueRef
1115 lp_build_min(struct lp_build_context *bld,
1116 LLVMValueRef a,
1117 LLVMValueRef b)
1118 {
1119 assert(lp_check_value(bld->type, a));
1120 assert(lp_check_value(bld->type, b));
1121
1122 if(a == bld->undef || b == bld->undef)
1123 return bld->undef;
1124
1125 if(a == b)
1126 return a;
1127
1128 if (bld->type.norm) {
1129 if (!bld->type.sign) {
1130 if (a == bld->zero || b == bld->zero) {
1131 return bld->zero;
1132 }
1133 }
1134 if(a == bld->one)
1135 return b;
1136 if(b == bld->one)
1137 return a;
1138 }
1139
1140 return lp_build_min_simple(bld, a, b);
1141 }
1142
1143
1144 /**
1145 * Generate max(a, b)
1146 * Do checks for special cases.
1147 */
1148 LLVMValueRef
1149 lp_build_max(struct lp_build_context *bld,
1150 LLVMValueRef a,
1151 LLVMValueRef b)
1152 {
1153 assert(lp_check_value(bld->type, a));
1154 assert(lp_check_value(bld->type, b));
1155
1156 if(a == bld->undef || b == bld->undef)
1157 return bld->undef;
1158
1159 if(a == b)
1160 return a;
1161
1162 if(bld->type.norm) {
1163 if(a == bld->one || b == bld->one)
1164 return bld->one;
1165 if (!bld->type.sign) {
1166 if (a == bld->zero) {
1167 return b;
1168 }
1169 if (b == bld->zero) {
1170 return a;
1171 }
1172 }
1173 }
1174
1175 return lp_build_max_simple(bld, a, b);
1176 }
1177
1178
1179 /**
1180 * Generate clamp(a, min, max)
1181 * Do checks for special cases.
1182 */
1183 LLVMValueRef
1184 lp_build_clamp(struct lp_build_context *bld,
1185 LLVMValueRef a,
1186 LLVMValueRef min,
1187 LLVMValueRef max)
1188 {
1189 assert(lp_check_value(bld->type, a));
1190 assert(lp_check_value(bld->type, min));
1191 assert(lp_check_value(bld->type, max));
1192
1193 a = lp_build_min(bld, a, max);
1194 a = lp_build_max(bld, a, min);
1195 return a;
1196 }
1197
1198
1199 /**
1200 * Generate abs(a)
1201 */
1202 LLVMValueRef
1203 lp_build_abs(struct lp_build_context *bld,
1204 LLVMValueRef a)
1205 {
1206 LLVMBuilderRef builder = bld->gallivm->builder;
1207 const struct lp_type type = bld->type;
1208 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1209
1210 assert(lp_check_value(type, a));
1211
1212 if(!type.sign)
1213 return a;
1214
1215 if(type.floating) {
1216 /* Mask out the sign bit */
1217 LLVMTypeRef int_vec_type = lp_build_int_vec_type(bld->gallivm, type);
1218 unsigned long long absMask = ~(1ULL << (type.width - 1));
1219 LLVMValueRef mask = lp_build_const_int_vec(bld->gallivm, type, ((unsigned long long) absMask));
1220 a = LLVMBuildBitCast(builder, a, int_vec_type, "");
1221 a = LLVMBuildAnd(builder, a, mask, "");
1222 a = LLVMBuildBitCast(builder, a, vec_type, "");
1223 return a;
1224 }
1225
1226 if(type.width*type.length == 128 && util_cpu_caps.has_ssse3) {
1227 switch(type.width) {
1228 case 8:
1229 return lp_build_intrinsic_unary(builder, "llvm.x86.ssse3.pabs.b.128", vec_type, a);
1230 case 16:
1231 return lp_build_intrinsic_unary(builder, "llvm.x86.ssse3.pabs.w.128", vec_type, a);
1232 case 32:
1233 return lp_build_intrinsic_unary(builder, "llvm.x86.ssse3.pabs.d.128", vec_type, a);
1234 }
1235 }
1236 else if (type.width*type.length == 256 && util_cpu_caps.has_ssse3 &&
1237 (gallivm_debug & GALLIVM_DEBUG_PERF) &&
1238 (type.width == 8 || type.width == 16 || type.width == 32)) {
1239 debug_printf("%s: inefficient code, should split vectors manually\n",
1240 __FUNCTION__);
1241 }
1242
1243 return lp_build_max(bld, a, LLVMBuildNeg(builder, a, ""));
1244 }
1245
1246
1247 LLVMValueRef
1248 lp_build_negate(struct lp_build_context *bld,
1249 LLVMValueRef a)
1250 {
1251 LLVMBuilderRef builder = bld->gallivm->builder;
1252
1253 assert(lp_check_value(bld->type, a));
1254
1255 #if HAVE_LLVM >= 0x0207
1256 if (bld->type.floating)
1257 a = LLVMBuildFNeg(builder, a, "");
1258 else
1259 #endif
1260 a = LLVMBuildNeg(builder, a, "");
1261
1262 return a;
1263 }
1264
1265
1266 /** Return -1, 0 or +1 depending on the sign of a */
1267 LLVMValueRef
1268 lp_build_sgn(struct lp_build_context *bld,
1269 LLVMValueRef a)
1270 {
1271 LLVMBuilderRef builder = bld->gallivm->builder;
1272 const struct lp_type type = bld->type;
1273 LLVMValueRef cond;
1274 LLVMValueRef res;
1275
1276 assert(lp_check_value(type, a));
1277
1278 /* Handle non-zero case */
1279 if(!type.sign) {
1280 /* if not zero then sign must be positive */
1281 res = bld->one;
1282 }
1283 else if(type.floating) {
1284 LLVMTypeRef vec_type;
1285 LLVMTypeRef int_type;
1286 LLVMValueRef mask;
1287 LLVMValueRef sign;
1288 LLVMValueRef one;
1289 unsigned long long maskBit = (unsigned long long)1 << (type.width - 1);
1290
1291 int_type = lp_build_int_vec_type(bld->gallivm, type);
1292 vec_type = lp_build_vec_type(bld->gallivm, type);
1293 mask = lp_build_const_int_vec(bld->gallivm, type, maskBit);
1294
1295 /* Take the sign bit and add it to 1 constant */
1296 sign = LLVMBuildBitCast(builder, a, int_type, "");
1297 sign = LLVMBuildAnd(builder, sign, mask, "");
1298 one = LLVMConstBitCast(bld->one, int_type);
1299 res = LLVMBuildOr(builder, sign, one, "");
1300 res = LLVMBuildBitCast(builder, res, vec_type, "");
1301 }
1302 else
1303 {
1304 /* signed int/norm/fixed point */
1305 /* could use psign with sse3 and appropriate vectors here */
1306 LLVMValueRef minus_one = lp_build_const_vec(bld->gallivm, type, -1.0);
1307 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, bld->zero);
1308 res = lp_build_select(bld, cond, bld->one, minus_one);
1309 }
1310
1311 /* Handle zero */
1312 cond = lp_build_cmp(bld, PIPE_FUNC_EQUAL, a, bld->zero);
1313 res = lp_build_select(bld, cond, bld->zero, res);
1314
1315 return res;
1316 }
1317
1318
1319 /**
1320 * Set the sign of float vector 'a' according to 'sign'.
1321 * If sign==0, return abs(a).
1322 * If sign==1, return -abs(a);
1323 * Other values for sign produce undefined results.
1324 */
1325 LLVMValueRef
1326 lp_build_set_sign(struct lp_build_context *bld,
1327 LLVMValueRef a, LLVMValueRef sign)
1328 {
1329 LLVMBuilderRef builder = bld->gallivm->builder;
1330 const struct lp_type type = bld->type;
1331 LLVMTypeRef int_vec_type = lp_build_int_vec_type(bld->gallivm, type);
1332 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1333 LLVMValueRef shift = lp_build_const_int_vec(bld->gallivm, type, type.width - 1);
1334 LLVMValueRef mask = lp_build_const_int_vec(bld->gallivm, type,
1335 ~((unsigned long long) 1 << (type.width - 1)));
1336 LLVMValueRef val, res;
1337
1338 assert(type.floating);
1339 assert(lp_check_value(type, a));
1340
1341 /* val = reinterpret_cast<int>(a) */
1342 val = LLVMBuildBitCast(builder, a, int_vec_type, "");
1343 /* val = val & mask */
1344 val = LLVMBuildAnd(builder, val, mask, "");
1345 /* sign = sign << shift */
1346 sign = LLVMBuildShl(builder, sign, shift, "");
1347 /* res = val | sign */
1348 res = LLVMBuildOr(builder, val, sign, "");
1349 /* res = reinterpret_cast<float>(res) */
1350 res = LLVMBuildBitCast(builder, res, vec_type, "");
1351
1352 return res;
1353 }
1354
1355
1356 /**
1357 * Convert vector of (or scalar) int to vector of (or scalar) float.
1358 */
1359 LLVMValueRef
1360 lp_build_int_to_float(struct lp_build_context *bld,
1361 LLVMValueRef a)
1362 {
1363 LLVMBuilderRef builder = bld->gallivm->builder;
1364 const struct lp_type type = bld->type;
1365 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1366
1367 assert(type.floating);
1368
1369 return LLVMBuildSIToFP(builder, a, vec_type, "");
1370 }
1371
1372 static boolean
1373 arch_rounding_available(const struct lp_type type)
1374 {
1375 if ((util_cpu_caps.has_sse4_1 &&
1376 (type.length == 1 || type.width*type.length == 128)) ||
1377 (util_cpu_caps.has_avx && type.width*type.length == 256))
1378 return TRUE;
1379 else if ((util_cpu_caps.has_altivec &&
1380 (type.width == 32 && type.length == 4)))
1381 return TRUE;
1382
1383 return FALSE;
1384 }
1385
1386 enum lp_build_round_mode
1387 {
1388 LP_BUILD_ROUND_NEAREST = 0,
1389 LP_BUILD_ROUND_FLOOR = 1,
1390 LP_BUILD_ROUND_CEIL = 2,
1391 LP_BUILD_ROUND_TRUNCATE = 3
1392 };
1393
1394 /**
1395 * Helper for SSE4.1's ROUNDxx instructions.
1396 *
1397 * NOTE: In the SSE4.1's nearest mode, if two values are equally close, the
1398 * result is the even value. That is, rounding 2.5 will be 2.0, and not 3.0.
1399 */
1400 static INLINE LLVMValueRef
1401 lp_build_round_sse41(struct lp_build_context *bld,
1402 LLVMValueRef a,
1403 enum lp_build_round_mode mode)
1404 {
1405 LLVMBuilderRef builder = bld->gallivm->builder;
1406 const struct lp_type type = bld->type;
1407 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
1408 const char *intrinsic;
1409 LLVMValueRef res;
1410
1411 assert(type.floating);
1412
1413 assert(lp_check_value(type, a));
1414 assert(util_cpu_caps.has_sse4_1);
1415
1416 if (type.length == 1) {
1417 LLVMTypeRef vec_type;
1418 LLVMValueRef undef;
1419 LLVMValueRef args[3];
1420 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
1421
1422 switch(type.width) {
1423 case 32:
1424 intrinsic = "llvm.x86.sse41.round.ss";
1425 break;
1426 case 64:
1427 intrinsic = "llvm.x86.sse41.round.sd";
1428 break;
1429 default:
1430 assert(0);
1431 return bld->undef;
1432 }
1433
1434 vec_type = LLVMVectorType(bld->elem_type, 4);
1435
1436 undef = LLVMGetUndef(vec_type);
1437
1438 args[0] = undef;
1439 args[1] = LLVMBuildInsertElement(builder, undef, a, index0, "");
1440 args[2] = LLVMConstInt(i32t, mode, 0);
1441
1442 res = lp_build_intrinsic(builder, intrinsic,
1443 vec_type, args, Elements(args));
1444
1445 res = LLVMBuildExtractElement(builder, res, index0, "");
1446 }
1447 else {
1448 if (type.width * type.length == 128) {
1449 switch(type.width) {
1450 case 32:
1451 intrinsic = "llvm.x86.sse41.round.ps";
1452 break;
1453 case 64:
1454 intrinsic = "llvm.x86.sse41.round.pd";
1455 break;
1456 default:
1457 assert(0);
1458 return bld->undef;
1459 }
1460 }
1461 else {
1462 assert(type.width * type.length == 256);
1463 assert(util_cpu_caps.has_avx);
1464
1465 switch(type.width) {
1466 case 32:
1467 intrinsic = "llvm.x86.avx.round.ps.256";
1468 break;
1469 case 64:
1470 intrinsic = "llvm.x86.avx.round.pd.256";
1471 break;
1472 default:
1473 assert(0);
1474 return bld->undef;
1475 }
1476 }
1477
1478 res = lp_build_intrinsic_binary(builder, intrinsic,
1479 bld->vec_type, a,
1480 LLVMConstInt(i32t, mode, 0));
1481 }
1482
1483 return res;
1484 }
1485
1486
1487 static INLINE LLVMValueRef
1488 lp_build_iround_nearest_sse2(struct lp_build_context *bld,
1489 LLVMValueRef a)
1490 {
1491 LLVMBuilderRef builder = bld->gallivm->builder;
1492 const struct lp_type type = bld->type;
1493 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
1494 LLVMTypeRef ret_type = lp_build_int_vec_type(bld->gallivm, type);
1495 const char *intrinsic;
1496 LLVMValueRef res;
1497
1498 assert(type.floating);
1499 /* using the double precision conversions is a bit more complicated */
1500 assert(type.width == 32);
1501
1502 assert(lp_check_value(type, a));
1503 assert(util_cpu_caps.has_sse2);
1504
1505 /* This is relying on MXCSR rounding mode, which should always be nearest. */
1506 if (type.length == 1) {
1507 LLVMTypeRef vec_type;
1508 LLVMValueRef undef;
1509 LLVMValueRef arg;
1510 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
1511
1512 vec_type = LLVMVectorType(bld->elem_type, 4);
1513
1514 intrinsic = "llvm.x86.sse.cvtss2si";
1515
1516 undef = LLVMGetUndef(vec_type);
1517
1518 arg = LLVMBuildInsertElement(builder, undef, a, index0, "");
1519
1520 res = lp_build_intrinsic_unary(builder, intrinsic,
1521 ret_type, arg);
1522 }
1523 else {
1524 if (type.width* type.length == 128) {
1525 intrinsic = "llvm.x86.sse2.cvtps2dq";
1526 }
1527 else {
1528 assert(type.width*type.length == 256);
1529 assert(util_cpu_caps.has_avx);
1530
1531 intrinsic = "llvm.x86.avx.cvt.ps2dq.256";
1532 }
1533 res = lp_build_intrinsic_unary(builder, intrinsic,
1534 ret_type, a);
1535 }
1536
1537 return res;
1538 }
1539
1540
1541 /*
1542 */
1543 static INLINE LLVMValueRef
1544 lp_build_round_altivec(struct lp_build_context *bld,
1545 LLVMValueRef a,
1546 enum lp_build_round_mode mode)
1547 {
1548 LLVMBuilderRef builder = bld->gallivm->builder;
1549 const struct lp_type type = bld->type;
1550 const char *intrinsic = NULL;
1551
1552 assert(type.floating);
1553
1554 assert(lp_check_value(type, a));
1555 assert(util_cpu_caps.has_altivec);
1556
1557 switch (mode) {
1558 case LP_BUILD_ROUND_NEAREST:
1559 intrinsic = "llvm.ppc.altivec.vrfin";
1560 break;
1561 case LP_BUILD_ROUND_FLOOR:
1562 intrinsic = "llvm.ppc.altivec.vrfim";
1563 break;
1564 case LP_BUILD_ROUND_CEIL:
1565 intrinsic = "llvm.ppc.altivec.vrfip";
1566 break;
1567 case LP_BUILD_ROUND_TRUNCATE:
1568 intrinsic = "llvm.ppc.altivec.vrfiz";
1569 break;
1570 }
1571
1572 return lp_build_intrinsic_unary(builder, intrinsic, bld->vec_type, a);
1573 }
1574
1575 static INLINE LLVMValueRef
1576 lp_build_round_arch(struct lp_build_context *bld,
1577 LLVMValueRef a,
1578 enum lp_build_round_mode mode)
1579 {
1580 if (util_cpu_caps.has_sse4_1)
1581 return lp_build_round_sse41(bld, a, mode);
1582 else /* (util_cpu_caps.has_altivec) */
1583 return lp_build_round_altivec(bld, a, mode);
1584 }
1585
1586 /**
1587 * Return the integer part of a float (vector) value (== round toward zero).
1588 * The returned value is a float (vector).
1589 * Ex: trunc(-1.5) = -1.0
1590 */
1591 LLVMValueRef
1592 lp_build_trunc(struct lp_build_context *bld,
1593 LLVMValueRef a)
1594 {
1595 LLVMBuilderRef builder = bld->gallivm->builder;
1596 const struct lp_type type = bld->type;
1597
1598 assert(type.floating);
1599 assert(lp_check_value(type, a));
1600
1601 if (arch_rounding_available(type)) {
1602 return lp_build_round_arch(bld, a, LP_BUILD_ROUND_TRUNCATE);
1603 }
1604 else {
1605 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1606 LLVMTypeRef int_vec_type = lp_build_int_vec_type(bld->gallivm, type);
1607 LLVMValueRef res;
1608 res = LLVMBuildFPToSI(builder, a, int_vec_type, "");
1609 res = LLVMBuildSIToFP(builder, res, vec_type, "");
1610 return res;
1611 }
1612 }
1613
1614
1615 /**
1616 * Return float (vector) rounded to nearest integer (vector). The returned
1617 * value is a float (vector).
1618 * Ex: round(0.9) = 1.0
1619 * Ex: round(-1.5) = -2.0
1620 */
1621 LLVMValueRef
1622 lp_build_round(struct lp_build_context *bld,
1623 LLVMValueRef a)
1624 {
1625 LLVMBuilderRef builder = bld->gallivm->builder;
1626 const struct lp_type type = bld->type;
1627
1628 assert(type.floating);
1629 assert(lp_check_value(type, a));
1630
1631 if (arch_rounding_available(type)) {
1632 return lp_build_round_arch(bld, a, LP_BUILD_ROUND_NEAREST);
1633 }
1634 else {
1635 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1636 LLVMValueRef res;
1637 res = lp_build_iround(bld, a);
1638 res = LLVMBuildSIToFP(builder, res, vec_type, "");
1639 return res;
1640 }
1641 }
1642
1643
1644 /**
1645 * Return floor of float (vector), result is a float (vector)
1646 * Ex: floor(1.1) = 1.0
1647 * Ex: floor(-1.1) = -2.0
1648 */
1649 LLVMValueRef
1650 lp_build_floor(struct lp_build_context *bld,
1651 LLVMValueRef a)
1652 {
1653 LLVMBuilderRef builder = bld->gallivm->builder;
1654 const struct lp_type type = bld->type;
1655
1656 assert(type.floating);
1657 assert(lp_check_value(type, a));
1658
1659 if (arch_rounding_available(type)) {
1660 return lp_build_round_arch(bld, a, LP_BUILD_ROUND_FLOOR);
1661 }
1662 else {
1663 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1664 LLVMValueRef res;
1665 res = lp_build_ifloor(bld, a);
1666 res = LLVMBuildSIToFP(builder, res, vec_type, "");
1667 return res;
1668 }
1669 }
1670
1671
1672 /**
1673 * Return ceiling of float (vector), returning float (vector).
1674 * Ex: ceil( 1.1) = 2.0
1675 * Ex: ceil(-1.1) = -1.0
1676 */
1677 LLVMValueRef
1678 lp_build_ceil(struct lp_build_context *bld,
1679 LLVMValueRef a)
1680 {
1681 LLVMBuilderRef builder = bld->gallivm->builder;
1682 const struct lp_type type = bld->type;
1683
1684 assert(type.floating);
1685 assert(lp_check_value(type, a));
1686
1687 if (arch_rounding_available(type)) {
1688 return lp_build_round_arch(bld, a, LP_BUILD_ROUND_CEIL);
1689 }
1690 else {
1691 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1692 LLVMValueRef res;
1693 res = lp_build_iceil(bld, a);
1694 res = LLVMBuildSIToFP(builder, res, vec_type, "");
1695 return res;
1696 }
1697 }
1698
1699
1700 /**
1701 * Return fractional part of 'a' computed as a - floor(a)
1702 * Typically used in texture coord arithmetic.
1703 */
1704 LLVMValueRef
1705 lp_build_fract(struct lp_build_context *bld,
1706 LLVMValueRef a)
1707 {
1708 assert(bld->type.floating);
1709 return lp_build_sub(bld, a, lp_build_floor(bld, a));
1710 }
1711
1712
1713 /**
1714 * Prevent returning a fractional part of 1.0 for very small negative values of
1715 * 'a' by clamping against 0.99999(9).
1716 */
1717 static inline LLVMValueRef
1718 clamp_fract(struct lp_build_context *bld, LLVMValueRef fract)
1719 {
1720 LLVMValueRef max;
1721
1722 /* this is the largest number smaller than 1.0 representable as float */
1723 max = lp_build_const_vec(bld->gallivm, bld->type,
1724 1.0 - 1.0/(1LL << (lp_mantissa(bld->type) + 1)));
1725 return lp_build_min(bld, fract, max);
1726 }
1727
1728
1729 /**
1730 * Same as lp_build_fract, but guarantees that the result is always smaller
1731 * than one.
1732 */
1733 LLVMValueRef
1734 lp_build_fract_safe(struct lp_build_context *bld,
1735 LLVMValueRef a)
1736 {
1737 return clamp_fract(bld, lp_build_fract(bld, a));
1738 }
1739
1740
1741 /**
1742 * Return the integer part of a float (vector) value (== round toward zero).
1743 * The returned value is an integer (vector).
1744 * Ex: itrunc(-1.5) = -1
1745 */
1746 LLVMValueRef
1747 lp_build_itrunc(struct lp_build_context *bld,
1748 LLVMValueRef a)
1749 {
1750 LLVMBuilderRef builder = bld->gallivm->builder;
1751 const struct lp_type type = bld->type;
1752 LLVMTypeRef int_vec_type = lp_build_int_vec_type(bld->gallivm, type);
1753
1754 assert(type.floating);
1755 assert(lp_check_value(type, a));
1756
1757 return LLVMBuildFPToSI(builder, a, int_vec_type, "");
1758 }
1759
1760
1761 /**
1762 * Return float (vector) rounded to nearest integer (vector). The returned
1763 * value is an integer (vector).
1764 * Ex: iround(0.9) = 1
1765 * Ex: iround(-1.5) = -2
1766 */
1767 LLVMValueRef
1768 lp_build_iround(struct lp_build_context *bld,
1769 LLVMValueRef a)
1770 {
1771 LLVMBuilderRef builder = bld->gallivm->builder;
1772 const struct lp_type type = bld->type;
1773 LLVMTypeRef int_vec_type = bld->int_vec_type;
1774 LLVMValueRef res;
1775
1776 assert(type.floating);
1777
1778 assert(lp_check_value(type, a));
1779
1780 if ((util_cpu_caps.has_sse2 &&
1781 ((type.width == 32) && (type.length == 1 || type.length == 4))) ||
1782 (util_cpu_caps.has_avx && type.width == 32 && type.length == 8)) {
1783 return lp_build_iround_nearest_sse2(bld, a);
1784 }
1785 if (arch_rounding_available(type)) {
1786 res = lp_build_round_arch(bld, a, LP_BUILD_ROUND_NEAREST);
1787 }
1788 else {
1789 LLVMValueRef half;
1790
1791 half = lp_build_const_vec(bld->gallivm, type, 0.5);
1792
1793 if (type.sign) {
1794 LLVMTypeRef vec_type = bld->vec_type;
1795 LLVMValueRef mask = lp_build_const_int_vec(bld->gallivm, type,
1796 (unsigned long long)1 << (type.width - 1));
1797 LLVMValueRef sign;
1798
1799 /* get sign bit */
1800 sign = LLVMBuildBitCast(builder, a, int_vec_type, "");
1801 sign = LLVMBuildAnd(builder, sign, mask, "");
1802
1803 /* sign * 0.5 */
1804 half = LLVMBuildBitCast(builder, half, int_vec_type, "");
1805 half = LLVMBuildOr(builder, sign, half, "");
1806 half = LLVMBuildBitCast(builder, half, vec_type, "");
1807 }
1808
1809 res = LLVMBuildFAdd(builder, a, half, "");
1810 }
1811
1812 res = LLVMBuildFPToSI(builder, res, int_vec_type, "");
1813
1814 return res;
1815 }
1816
1817
1818 /**
1819 * Return floor of float (vector), result is an int (vector)
1820 * Ex: ifloor(1.1) = 1.0
1821 * Ex: ifloor(-1.1) = -2.0
1822 */
1823 LLVMValueRef
1824 lp_build_ifloor(struct lp_build_context *bld,
1825 LLVMValueRef a)
1826 {
1827 LLVMBuilderRef builder = bld->gallivm->builder;
1828 const struct lp_type type = bld->type;
1829 LLVMTypeRef int_vec_type = bld->int_vec_type;
1830 LLVMValueRef res;
1831
1832 assert(type.floating);
1833 assert(lp_check_value(type, a));
1834
1835 res = a;
1836 if (type.sign) {
1837 if (arch_rounding_available(type)) {
1838 res = lp_build_round_arch(bld, a, LP_BUILD_ROUND_FLOOR);
1839 }
1840 else {
1841 /* Take the sign bit and add it to 1 constant */
1842 LLVMTypeRef vec_type = bld->vec_type;
1843 unsigned mantissa = lp_mantissa(type);
1844 LLVMValueRef mask = lp_build_const_int_vec(bld->gallivm, type,
1845 (unsigned long long)1 << (type.width - 1));
1846 LLVMValueRef sign;
1847 LLVMValueRef offset;
1848
1849 /* sign = a < 0 ? ~0 : 0 */
1850 sign = LLVMBuildBitCast(builder, a, int_vec_type, "");
1851 sign = LLVMBuildAnd(builder, sign, mask, "");
1852 sign = LLVMBuildAShr(builder, sign,
1853 lp_build_const_int_vec(bld->gallivm, type,
1854 type.width - 1),
1855 "ifloor.sign");
1856
1857 /* offset = -0.99999(9)f */
1858 offset = lp_build_const_vec(bld->gallivm, type,
1859 -(double)(((unsigned long long)1 << mantissa) - 10)/((unsigned long long)1 << mantissa));
1860 offset = LLVMConstBitCast(offset, int_vec_type);
1861
1862 /* offset = a < 0 ? offset : 0.0f */
1863 offset = LLVMBuildAnd(builder, offset, sign, "");
1864 offset = LLVMBuildBitCast(builder, offset, vec_type, "ifloor.offset");
1865
1866 res = LLVMBuildFAdd(builder, res, offset, "ifloor.res");
1867 }
1868 }
1869
1870 /* round to nearest (toward zero) */
1871 res = LLVMBuildFPToSI(builder, res, int_vec_type, "ifloor.res");
1872
1873 return res;
1874 }
1875
1876
1877 /**
1878 * Return ceiling of float (vector), returning int (vector).
1879 * Ex: iceil( 1.1) = 2
1880 * Ex: iceil(-1.1) = -1
1881 */
1882 LLVMValueRef
1883 lp_build_iceil(struct lp_build_context *bld,
1884 LLVMValueRef a)
1885 {
1886 LLVMBuilderRef builder = bld->gallivm->builder;
1887 const struct lp_type type = bld->type;
1888 LLVMTypeRef int_vec_type = bld->int_vec_type;
1889 LLVMValueRef res;
1890
1891 assert(type.floating);
1892 assert(lp_check_value(type, a));
1893
1894 if (arch_rounding_available(type)) {
1895 res = lp_build_round_arch(bld, a, LP_BUILD_ROUND_CEIL);
1896 }
1897 else {
1898 LLVMTypeRef vec_type = bld->vec_type;
1899 unsigned mantissa = lp_mantissa(type);
1900 LLVMValueRef offset;
1901
1902 /* offset = 0.99999(9)f */
1903 offset = lp_build_const_vec(bld->gallivm, type,
1904 (double)(((unsigned long long)1 << mantissa) - 10)/((unsigned long long)1 << mantissa));
1905
1906 if (type.sign) {
1907 LLVMValueRef mask = lp_build_const_int_vec(bld->gallivm, type,
1908 (unsigned long long)1 << (type.width - 1));
1909 LLVMValueRef sign;
1910
1911 /* sign = a < 0 ? 0 : ~0 */
1912 sign = LLVMBuildBitCast(builder, a, int_vec_type, "");
1913 sign = LLVMBuildAnd(builder, sign, mask, "");
1914 sign = LLVMBuildAShr(builder, sign,
1915 lp_build_const_int_vec(bld->gallivm, type,
1916 type.width - 1),
1917 "iceil.sign");
1918 sign = LLVMBuildNot(builder, sign, "iceil.not");
1919
1920 /* offset = a < 0 ? 0.0 : offset */
1921 offset = LLVMConstBitCast(offset, int_vec_type);
1922 offset = LLVMBuildAnd(builder, offset, sign, "");
1923 offset = LLVMBuildBitCast(builder, offset, vec_type, "iceil.offset");
1924 }
1925
1926 res = LLVMBuildFAdd(builder, a, offset, "iceil.res");
1927 }
1928
1929 /* round to nearest (toward zero) */
1930 res = LLVMBuildFPToSI(builder, res, int_vec_type, "iceil.res");
1931
1932 return res;
1933 }
1934
1935
1936 /**
1937 * Combined ifloor() & fract().
1938 *
1939 * Preferred to calling the functions separately, as it will ensure that the
1940 * strategy (floor() vs ifloor()) that results in less redundant work is used.
1941 */
1942 void
1943 lp_build_ifloor_fract(struct lp_build_context *bld,
1944 LLVMValueRef a,
1945 LLVMValueRef *out_ipart,
1946 LLVMValueRef *out_fpart)
1947 {
1948 LLVMBuilderRef builder = bld->gallivm->builder;
1949 const struct lp_type type = bld->type;
1950 LLVMValueRef ipart;
1951
1952 assert(type.floating);
1953 assert(lp_check_value(type, a));
1954
1955 if (arch_rounding_available(type)) {
1956 /*
1957 * floor() is easier.
1958 */
1959
1960 ipart = lp_build_floor(bld, a);
1961 *out_fpart = LLVMBuildFSub(builder, a, ipart, "fpart");
1962 *out_ipart = LLVMBuildFPToSI(builder, ipart, bld->int_vec_type, "ipart");
1963 }
1964 else {
1965 /*
1966 * ifloor() is easier.
1967 */
1968
1969 *out_ipart = lp_build_ifloor(bld, a);
1970 ipart = LLVMBuildSIToFP(builder, *out_ipart, bld->vec_type, "ipart");
1971 *out_fpart = LLVMBuildFSub(builder, a, ipart, "fpart");
1972 }
1973 }
1974
1975
1976 /**
1977 * Same as lp_build_ifloor_fract, but guarantees that the fractional part is
1978 * always smaller than one.
1979 */
1980 void
1981 lp_build_ifloor_fract_safe(struct lp_build_context *bld,
1982 LLVMValueRef a,
1983 LLVMValueRef *out_ipart,
1984 LLVMValueRef *out_fpart)
1985 {
1986 lp_build_ifloor_fract(bld, a, out_ipart, out_fpart);
1987 *out_fpart = clamp_fract(bld, *out_fpart);
1988 }
1989
1990
1991 LLVMValueRef
1992 lp_build_sqrt(struct lp_build_context *bld,
1993 LLVMValueRef a)
1994 {
1995 LLVMBuilderRef builder = bld->gallivm->builder;
1996 const struct lp_type type = bld->type;
1997 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1998 char intrinsic[32];
1999
2000 assert(lp_check_value(type, a));
2001
2002 /* TODO: optimize the constant case */
2003
2004 assert(type.floating);
2005 if (type.length == 1) {
2006 util_snprintf(intrinsic, sizeof intrinsic, "llvm.sqrt.f%u", type.width);
2007 }
2008 else {
2009 util_snprintf(intrinsic, sizeof intrinsic, "llvm.sqrt.v%uf%u", type.length, type.width);
2010 }
2011
2012 return lp_build_intrinsic_unary(builder, intrinsic, vec_type, a);
2013 }
2014
2015
2016 /**
2017 * Do one Newton-Raphson step to improve reciprocate precision:
2018 *
2019 * x_{i+1} = x_i * (2 - a * x_i)
2020 *
2021 * XXX: Unfortunately this won't give IEEE-754 conformant results for 0 or
2022 * +/-Inf, giving NaN instead. Certain applications rely on this behavior,
2023 * such as Google Earth, which does RCP(RSQRT(0.0) when drawing the Earth's
2024 * halo. It would be necessary to clamp the argument to prevent this.
2025 *
2026 * See also:
2027 * - http://en.wikipedia.org/wiki/Division_(digital)#Newton.E2.80.93Raphson_division
2028 * - http://softwarecommunity.intel.com/articles/eng/1818.htm
2029 */
2030 static INLINE LLVMValueRef
2031 lp_build_rcp_refine(struct lp_build_context *bld,
2032 LLVMValueRef a,
2033 LLVMValueRef rcp_a)
2034 {
2035 LLVMBuilderRef builder = bld->gallivm->builder;
2036 LLVMValueRef two = lp_build_const_vec(bld->gallivm, bld->type, 2.0);
2037 LLVMValueRef res;
2038
2039 res = LLVMBuildFMul(builder, a, rcp_a, "");
2040 res = LLVMBuildFSub(builder, two, res, "");
2041 res = LLVMBuildFMul(builder, rcp_a, res, "");
2042
2043 return res;
2044 }
2045
2046
2047 LLVMValueRef
2048 lp_build_rcp(struct lp_build_context *bld,
2049 LLVMValueRef a)
2050 {
2051 LLVMBuilderRef builder = bld->gallivm->builder;
2052 const struct lp_type type = bld->type;
2053
2054 assert(lp_check_value(type, a));
2055
2056 if(a == bld->zero)
2057 return bld->undef;
2058 if(a == bld->one)
2059 return bld->one;
2060 if(a == bld->undef)
2061 return bld->undef;
2062
2063 assert(type.floating);
2064
2065 if(LLVMIsConstant(a))
2066 return LLVMConstFDiv(bld->one, a);
2067
2068 /*
2069 * We don't use RCPPS because:
2070 * - it only has 10bits of precision
2071 * - it doesn't even get the reciprocate of 1.0 exactly
2072 * - doing Newton-Rapshon steps yields wrong (NaN) values for 0.0 or Inf
2073 * - for recent processors the benefit over DIVPS is marginal, a case
2074 * dependent
2075 *
2076 * We could still use it on certain processors if benchmarks show that the
2077 * RCPPS plus necessary workarounds are still preferrable to DIVPS; or for
2078 * particular uses that require less workarounds.
2079 */
2080
2081 if (FALSE && ((util_cpu_caps.has_sse && type.width == 32 && type.length == 4) ||
2082 (util_cpu_caps.has_avx && type.width == 32 && type.length == 8))){
2083 const unsigned num_iterations = 0;
2084 LLVMValueRef res;
2085 unsigned i;
2086 const char *intrinsic = NULL;
2087
2088 if (type.length == 4) {
2089 intrinsic = "llvm.x86.sse.rcp.ps";
2090 }
2091 else {
2092 intrinsic = "llvm.x86.avx.rcp.ps.256";
2093 }
2094
2095 res = lp_build_intrinsic_unary(builder, intrinsic, bld->vec_type, a);
2096
2097 for (i = 0; i < num_iterations; ++i) {
2098 res = lp_build_rcp_refine(bld, a, res);
2099 }
2100
2101 return res;
2102 }
2103
2104 return LLVMBuildFDiv(builder, bld->one, a, "");
2105 }
2106
2107
2108 /**
2109 * Do one Newton-Raphson step to improve rsqrt precision:
2110 *
2111 * x_{i+1} = 0.5 * x_i * (3.0 - a * x_i * x_i)
2112 *
2113 * See also Intel 64 and IA-32 Architectures Optimization Manual.
2114 */
2115 static INLINE LLVMValueRef
2116 lp_build_rsqrt_refine(struct lp_build_context *bld,
2117 LLVMValueRef a,
2118 LLVMValueRef rsqrt_a)
2119 {
2120 LLVMBuilderRef builder = bld->gallivm->builder;
2121 LLVMValueRef half = lp_build_const_vec(bld->gallivm, bld->type, 0.5);
2122 LLVMValueRef three = lp_build_const_vec(bld->gallivm, bld->type, 3.0);
2123 LLVMValueRef res;
2124
2125 res = LLVMBuildFMul(builder, rsqrt_a, rsqrt_a, "");
2126 res = LLVMBuildFMul(builder, a, res, "");
2127 res = LLVMBuildFSub(builder, three, res, "");
2128 res = LLVMBuildFMul(builder, rsqrt_a, res, "");
2129 res = LLVMBuildFMul(builder, half, res, "");
2130
2131 return res;
2132 }
2133
2134
2135 /**
2136 * Generate 1/sqrt(a).
2137 * Result is undefined for values < 0, infinity for +0.
2138 */
2139 LLVMValueRef
2140 lp_build_rsqrt(struct lp_build_context *bld,
2141 LLVMValueRef a)
2142 {
2143 LLVMBuilderRef builder = bld->gallivm->builder;
2144 const struct lp_type type = bld->type;
2145
2146 assert(lp_check_value(type, a));
2147
2148 assert(type.floating);
2149
2150 /*
2151 * This should be faster but all denormals will end up as infinity.
2152 */
2153 if (0 && ((util_cpu_caps.has_sse && type.width == 32 && type.length == 4) ||
2154 (util_cpu_caps.has_avx && type.width == 32 && type.length == 8))) {
2155 const unsigned num_iterations = 1;
2156 LLVMValueRef res;
2157 unsigned i;
2158 const char *intrinsic = NULL;
2159
2160 if (type.length == 4) {
2161 intrinsic = "llvm.x86.sse.rsqrt.ps";
2162 }
2163 else {
2164 intrinsic = "llvm.x86.avx.rsqrt.ps.256";
2165 }
2166 if (num_iterations) {
2167 /*
2168 * Newton-Raphson will result in NaN instead of infinity for zero,
2169 * and NaN instead of zero for infinity.
2170 * Also, need to ensure rsqrt(1.0) == 1.0.
2171 * All numbers smaller than FLT_MIN will result in +infinity
2172 * (rsqrtps treats all denormals as zero).
2173 */
2174 /*
2175 * Certain non-c99 compilers don't know INFINITY and might not support
2176 * hacks to evaluate it at compile time neither.
2177 */
2178 const unsigned posinf_int = 0x7F800000;
2179 LLVMValueRef cmp;
2180 LLVMValueRef flt_min = lp_build_const_vec(bld->gallivm, type, FLT_MIN);
2181 LLVMValueRef inf = lp_build_const_int_vec(bld->gallivm, type, posinf_int);
2182
2183 inf = LLVMBuildBitCast(builder, inf, lp_build_vec_type(bld->gallivm, type), "");
2184
2185 res = lp_build_intrinsic_unary(builder, intrinsic, bld->vec_type, a);
2186
2187 for (i = 0; i < num_iterations; ++i) {
2188 res = lp_build_rsqrt_refine(bld, a, res);
2189 }
2190 cmp = lp_build_compare(bld->gallivm, type, PIPE_FUNC_LESS, a, flt_min);
2191 res = lp_build_select(bld, cmp, inf, res);
2192 cmp = lp_build_compare(bld->gallivm, type, PIPE_FUNC_EQUAL, a, inf);
2193 res = lp_build_select(bld, cmp, bld->zero, res);
2194 cmp = lp_build_compare(bld->gallivm, type, PIPE_FUNC_EQUAL, a, bld->one);
2195 res = lp_build_select(bld, cmp, bld->one, res);
2196 }
2197 else {
2198 /* rsqrt(1.0) != 1.0 here */
2199 res = lp_build_intrinsic_unary(builder, intrinsic, bld->vec_type, a);
2200
2201 }
2202
2203 return res;
2204 }
2205
2206 return lp_build_rcp(bld, lp_build_sqrt(bld, a));
2207 }
2208
2209
2210 /**
2211 * Generate sin(a) using SSE2
2212 */
2213 LLVMValueRef
2214 lp_build_sin(struct lp_build_context *bld,
2215 LLVMValueRef a)
2216 {
2217 struct gallivm_state *gallivm = bld->gallivm;
2218 LLVMBuilderRef builder = gallivm->builder;
2219 struct lp_type int_type = lp_int_type(bld->type);
2220 LLVMBuilderRef b = builder;
2221
2222 /*
2223 * take the absolute value,
2224 * x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
2225 */
2226
2227 LLVMValueRef inv_sig_mask = lp_build_const_int_vec(gallivm, bld->type, ~0x80000000);
2228 LLVMValueRef a_v4si = LLVMBuildBitCast(b, a, bld->int_vec_type, "a_v4si");
2229
2230 LLVMValueRef absi = LLVMBuildAnd(b, a_v4si, inv_sig_mask, "absi");
2231 LLVMValueRef x_abs = LLVMBuildBitCast(b, absi, bld->vec_type, "x_abs");
2232
2233 /*
2234 * extract the sign bit (upper one)
2235 * sign_bit = _mm_and_ps(sign_bit, *(v4sf*)_ps_sign_mask);
2236 */
2237 LLVMValueRef sig_mask = lp_build_const_int_vec(gallivm, bld->type, 0x80000000);
2238 LLVMValueRef sign_bit_i = LLVMBuildAnd(b, a_v4si, sig_mask, "sign_bit_i");
2239
2240 /*
2241 * scale by 4/Pi
2242 * y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
2243 */
2244
2245 LLVMValueRef FOPi = lp_build_const_vec(gallivm, bld->type, 1.27323954473516);
2246 LLVMValueRef scale_y = LLVMBuildFMul(b, x_abs, FOPi, "scale_y");
2247
2248 /*
2249 * store the integer part of y in mm0
2250 * emm2 = _mm_cvttps_epi32(y);
2251 */
2252
2253 LLVMValueRef emm2_i = LLVMBuildFPToSI(b, scale_y, bld->int_vec_type, "emm2_i");
2254
2255 /*
2256 * j=(j+1) & (~1) (see the cephes sources)
2257 * emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
2258 */
2259
2260 LLVMValueRef all_one = lp_build_const_int_vec(gallivm, bld->type, 1);
2261 LLVMValueRef emm2_add = LLVMBuildAdd(b, emm2_i, all_one, "emm2_add");
2262 /*
2263 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
2264 */
2265 LLVMValueRef inv_one = lp_build_const_int_vec(gallivm, bld->type, ~1);
2266 LLVMValueRef emm2_and = LLVMBuildAnd(b, emm2_add, inv_one, "emm2_and");
2267
2268 /*
2269 * y = _mm_cvtepi32_ps(emm2);
2270 */
2271 LLVMValueRef y_2 = LLVMBuildSIToFP(b, emm2_and, bld->vec_type, "y_2");
2272
2273 /* get the swap sign flag
2274 * emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
2275 */
2276 LLVMValueRef pi32_4 = lp_build_const_int_vec(gallivm, bld->type, 4);
2277 LLVMValueRef emm0_and = LLVMBuildAnd(b, emm2_add, pi32_4, "emm0_and");
2278
2279 /*
2280 * emm2 = _mm_slli_epi32(emm0, 29);
2281 */
2282 LLVMValueRef const_29 = lp_build_const_int_vec(gallivm, bld->type, 29);
2283 LLVMValueRef swap_sign_bit = LLVMBuildShl(b, emm0_and, const_29, "swap_sign_bit");
2284
2285 /*
2286 * get the polynom selection mask
2287 * there is one polynom for 0 <= x <= Pi/4
2288 * and another one for Pi/4<x<=Pi/2
2289 * Both branches will be computed.
2290 *
2291 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
2292 * emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
2293 */
2294
2295 LLVMValueRef pi32_2 = lp_build_const_int_vec(gallivm, bld->type, 2);
2296 LLVMValueRef emm2_3 = LLVMBuildAnd(b, emm2_and, pi32_2, "emm2_3");
2297 LLVMValueRef poly_mask = lp_build_compare(gallivm,
2298 int_type, PIPE_FUNC_EQUAL,
2299 emm2_3, lp_build_const_int_vec(gallivm, bld->type, 0));
2300 /*
2301 * sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
2302 */
2303 LLVMValueRef sign_bit_1 = LLVMBuildXor(b, sign_bit_i, swap_sign_bit, "sign_bit");
2304
2305 /*
2306 * _PS_CONST(minus_cephes_DP1, -0.78515625);
2307 * _PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
2308 * _PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
2309 */
2310 LLVMValueRef DP1 = lp_build_const_vec(gallivm, bld->type, -0.78515625);
2311 LLVMValueRef DP2 = lp_build_const_vec(gallivm, bld->type, -2.4187564849853515625e-4);
2312 LLVMValueRef DP3 = lp_build_const_vec(gallivm, bld->type, -3.77489497744594108e-8);
2313
2314 /*
2315 * The magic pass: "Extended precision modular arithmetic"
2316 * x = ((x - y * DP1) - y * DP2) - y * DP3;
2317 * xmm1 = _mm_mul_ps(y, xmm1);
2318 * xmm2 = _mm_mul_ps(y, xmm2);
2319 * xmm3 = _mm_mul_ps(y, xmm3);
2320 */
2321 LLVMValueRef xmm1 = LLVMBuildFMul(b, y_2, DP1, "xmm1");
2322 LLVMValueRef xmm2 = LLVMBuildFMul(b, y_2, DP2, "xmm2");
2323 LLVMValueRef xmm3 = LLVMBuildFMul(b, y_2, DP3, "xmm3");
2324
2325 /*
2326 * x = _mm_add_ps(x, xmm1);
2327 * x = _mm_add_ps(x, xmm2);
2328 * x = _mm_add_ps(x, xmm3);
2329 */
2330
2331 LLVMValueRef x_1 = LLVMBuildFAdd(b, x_abs, xmm1, "x_1");
2332 LLVMValueRef x_2 = LLVMBuildFAdd(b, x_1, xmm2, "x_2");
2333 LLVMValueRef x_3 = LLVMBuildFAdd(b, x_2, xmm3, "x_3");
2334
2335 /*
2336 * Evaluate the first polynom (0 <= x <= Pi/4)
2337 *
2338 * z = _mm_mul_ps(x,x);
2339 */
2340 LLVMValueRef z = LLVMBuildFMul(b, x_3, x_3, "z");
2341
2342 /*
2343 * _PS_CONST(coscof_p0, 2.443315711809948E-005);
2344 * _PS_CONST(coscof_p1, -1.388731625493765E-003);
2345 * _PS_CONST(coscof_p2, 4.166664568298827E-002);
2346 */
2347 LLVMValueRef coscof_p0 = lp_build_const_vec(gallivm, bld->type, 2.443315711809948E-005);
2348 LLVMValueRef coscof_p1 = lp_build_const_vec(gallivm, bld->type, -1.388731625493765E-003);
2349 LLVMValueRef coscof_p2 = lp_build_const_vec(gallivm, bld->type, 4.166664568298827E-002);
2350
2351 /*
2352 * y = *(v4sf*)_ps_coscof_p0;
2353 * y = _mm_mul_ps(y, z);
2354 */
2355 LLVMValueRef y_3 = LLVMBuildFMul(b, z, coscof_p0, "y_3");
2356 LLVMValueRef y_4 = LLVMBuildFAdd(b, y_3, coscof_p1, "y_4");
2357 LLVMValueRef y_5 = LLVMBuildFMul(b, y_4, z, "y_5");
2358 LLVMValueRef y_6 = LLVMBuildFAdd(b, y_5, coscof_p2, "y_6");
2359 LLVMValueRef y_7 = LLVMBuildFMul(b, y_6, z, "y_7");
2360 LLVMValueRef y_8 = LLVMBuildFMul(b, y_7, z, "y_8");
2361
2362
2363 /*
2364 * tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
2365 * y = _mm_sub_ps(y, tmp);
2366 * y = _mm_add_ps(y, *(v4sf*)_ps_1);
2367 */
2368 LLVMValueRef half = lp_build_const_vec(gallivm, bld->type, 0.5);
2369 LLVMValueRef tmp = LLVMBuildFMul(b, z, half, "tmp");
2370 LLVMValueRef y_9 = LLVMBuildFSub(b, y_8, tmp, "y_8");
2371 LLVMValueRef one = lp_build_const_vec(gallivm, bld->type, 1.0);
2372 LLVMValueRef y_10 = LLVMBuildFAdd(b, y_9, one, "y_9");
2373
2374 /*
2375 * _PS_CONST(sincof_p0, -1.9515295891E-4);
2376 * _PS_CONST(sincof_p1, 8.3321608736E-3);
2377 * _PS_CONST(sincof_p2, -1.6666654611E-1);
2378 */
2379 LLVMValueRef sincof_p0 = lp_build_const_vec(gallivm, bld->type, -1.9515295891E-4);
2380 LLVMValueRef sincof_p1 = lp_build_const_vec(gallivm, bld->type, 8.3321608736E-3);
2381 LLVMValueRef sincof_p2 = lp_build_const_vec(gallivm, bld->type, -1.6666654611E-1);
2382
2383 /*
2384 * Evaluate the second polynom (Pi/4 <= x <= 0)
2385 *
2386 * y2 = *(v4sf*)_ps_sincof_p0;
2387 * y2 = _mm_mul_ps(y2, z);
2388 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
2389 * y2 = _mm_mul_ps(y2, z);
2390 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
2391 * y2 = _mm_mul_ps(y2, z);
2392 * y2 = _mm_mul_ps(y2, x);
2393 * y2 = _mm_add_ps(y2, x);
2394 */
2395
2396 LLVMValueRef y2_3 = LLVMBuildFMul(b, z, sincof_p0, "y2_3");
2397 LLVMValueRef y2_4 = LLVMBuildFAdd(b, y2_3, sincof_p1, "y2_4");
2398 LLVMValueRef y2_5 = LLVMBuildFMul(b, y2_4, z, "y2_5");
2399 LLVMValueRef y2_6 = LLVMBuildFAdd(b, y2_5, sincof_p2, "y2_6");
2400 LLVMValueRef y2_7 = LLVMBuildFMul(b, y2_6, z, "y2_7");
2401 LLVMValueRef y2_8 = LLVMBuildFMul(b, y2_7, x_3, "y2_8");
2402 LLVMValueRef y2_9 = LLVMBuildFAdd(b, y2_8, x_3, "y2_9");
2403
2404 /*
2405 * select the correct result from the two polynoms
2406 * xmm3 = poly_mask;
2407 * y2 = _mm_and_ps(xmm3, y2); //, xmm3);
2408 * y = _mm_andnot_ps(xmm3, y);
2409 * y = _mm_add_ps(y,y2);
2410 */
2411 LLVMValueRef y2_i = LLVMBuildBitCast(b, y2_9, bld->int_vec_type, "y2_i");
2412 LLVMValueRef y_i = LLVMBuildBitCast(b, y_10, bld->int_vec_type, "y_i");
2413 LLVMValueRef y2_and = LLVMBuildAnd(b, y2_i, poly_mask, "y2_and");
2414 LLVMValueRef inv = lp_build_const_int_vec(gallivm, bld->type, ~0);
2415 LLVMValueRef poly_mask_inv = LLVMBuildXor(b, poly_mask, inv, "poly_mask_inv");
2416 LLVMValueRef y_and = LLVMBuildAnd(b, y_i, poly_mask_inv, "y_and");
2417 LLVMValueRef y_combine = LLVMBuildAdd(b, y_and, y2_and, "y_combine");
2418
2419 /*
2420 * update the sign
2421 * y = _mm_xor_ps(y, sign_bit);
2422 */
2423 LLVMValueRef y_sign = LLVMBuildXor(b, y_combine, sign_bit_1, "y_sin");
2424 LLVMValueRef y_result = LLVMBuildBitCast(b, y_sign, bld->vec_type, "y_result");
2425 return y_result;
2426 }
2427
2428
2429 /**
2430 * Generate cos(a) using SSE2
2431 */
2432 LLVMValueRef
2433 lp_build_cos(struct lp_build_context *bld,
2434 LLVMValueRef a)
2435 {
2436 struct gallivm_state *gallivm = bld->gallivm;
2437 LLVMBuilderRef builder = gallivm->builder;
2438 struct lp_type int_type = lp_int_type(bld->type);
2439 LLVMBuilderRef b = builder;
2440
2441 /*
2442 * take the absolute value,
2443 * x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
2444 */
2445
2446 LLVMValueRef inv_sig_mask = lp_build_const_int_vec(gallivm, bld->type, ~0x80000000);
2447 LLVMValueRef a_v4si = LLVMBuildBitCast(b, a, bld->int_vec_type, "a_v4si");
2448
2449 LLVMValueRef absi = LLVMBuildAnd(b, a_v4si, inv_sig_mask, "absi");
2450 LLVMValueRef x_abs = LLVMBuildBitCast(b, absi, bld->vec_type, "x_abs");
2451
2452 /*
2453 * scale by 4/Pi
2454 * y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
2455 */
2456
2457 LLVMValueRef FOPi = lp_build_const_vec(gallivm, bld->type, 1.27323954473516);
2458 LLVMValueRef scale_y = LLVMBuildFMul(b, x_abs, FOPi, "scale_y");
2459
2460 /*
2461 * store the integer part of y in mm0
2462 * emm2 = _mm_cvttps_epi32(y);
2463 */
2464
2465 LLVMValueRef emm2_i = LLVMBuildFPToSI(b, scale_y, bld->int_vec_type, "emm2_i");
2466
2467 /*
2468 * j=(j+1) & (~1) (see the cephes sources)
2469 * emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
2470 */
2471
2472 LLVMValueRef all_one = lp_build_const_int_vec(gallivm, bld->type, 1);
2473 LLVMValueRef emm2_add = LLVMBuildAdd(b, emm2_i, all_one, "emm2_add");
2474 /*
2475 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
2476 */
2477 LLVMValueRef inv_one = lp_build_const_int_vec(gallivm, bld->type, ~1);
2478 LLVMValueRef emm2_and = LLVMBuildAnd(b, emm2_add, inv_one, "emm2_and");
2479
2480 /*
2481 * y = _mm_cvtepi32_ps(emm2);
2482 */
2483 LLVMValueRef y_2 = LLVMBuildSIToFP(b, emm2_and, bld->vec_type, "y_2");
2484
2485
2486 /*
2487 * emm2 = _mm_sub_epi32(emm2, *(v4si*)_pi32_2);
2488 */
2489 LLVMValueRef const_2 = lp_build_const_int_vec(gallivm, bld->type, 2);
2490 LLVMValueRef emm2_2 = LLVMBuildSub(b, emm2_and, const_2, "emm2_2");
2491
2492
2493 /* get the swap sign flag
2494 * emm0 = _mm_andnot_si128(emm2, *(v4si*)_pi32_4);
2495 */
2496 LLVMValueRef inv = lp_build_const_int_vec(gallivm, bld->type, ~0);
2497 LLVMValueRef emm0_not = LLVMBuildXor(b, emm2_2, inv, "emm0_not");
2498 LLVMValueRef pi32_4 = lp_build_const_int_vec(gallivm, bld->type, 4);
2499 LLVMValueRef emm0_and = LLVMBuildAnd(b, emm0_not, pi32_4, "emm0_and");
2500
2501 /*
2502 * emm2 = _mm_slli_epi32(emm0, 29);
2503 */
2504 LLVMValueRef const_29 = lp_build_const_int_vec(gallivm, bld->type, 29);
2505 LLVMValueRef sign_bit = LLVMBuildShl(b, emm0_and, const_29, "sign_bit");
2506
2507 /*
2508 * get the polynom selection mask
2509 * there is one polynom for 0 <= x <= Pi/4
2510 * and another one for Pi/4<x<=Pi/2
2511 * Both branches will be computed.
2512 *
2513 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
2514 * emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
2515 */
2516
2517 LLVMValueRef pi32_2 = lp_build_const_int_vec(gallivm, bld->type, 2);
2518 LLVMValueRef emm2_3 = LLVMBuildAnd(b, emm2_2, pi32_2, "emm2_3");
2519 LLVMValueRef poly_mask = lp_build_compare(gallivm,
2520 int_type, PIPE_FUNC_EQUAL,
2521 emm2_3, lp_build_const_int_vec(gallivm, bld->type, 0));
2522
2523 /*
2524 * _PS_CONST(minus_cephes_DP1, -0.78515625);
2525 * _PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
2526 * _PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
2527 */
2528 LLVMValueRef DP1 = lp_build_const_vec(gallivm, bld->type, -0.78515625);
2529 LLVMValueRef DP2 = lp_build_const_vec(gallivm, bld->type, -2.4187564849853515625e-4);
2530 LLVMValueRef DP3 = lp_build_const_vec(gallivm, bld->type, -3.77489497744594108e-8);
2531
2532 /*
2533 * The magic pass: "Extended precision modular arithmetic"
2534 * x = ((x - y * DP1) - y * DP2) - y * DP3;
2535 * xmm1 = _mm_mul_ps(y, xmm1);
2536 * xmm2 = _mm_mul_ps(y, xmm2);
2537 * xmm3 = _mm_mul_ps(y, xmm3);
2538 */
2539 LLVMValueRef xmm1 = LLVMBuildFMul(b, y_2, DP1, "xmm1");
2540 LLVMValueRef xmm2 = LLVMBuildFMul(b, y_2, DP2, "xmm2");
2541 LLVMValueRef xmm3 = LLVMBuildFMul(b, y_2, DP3, "xmm3");
2542
2543 /*
2544 * x = _mm_add_ps(x, xmm1);
2545 * x = _mm_add_ps(x, xmm2);
2546 * x = _mm_add_ps(x, xmm3);
2547 */
2548
2549 LLVMValueRef x_1 = LLVMBuildFAdd(b, x_abs, xmm1, "x_1");
2550 LLVMValueRef x_2 = LLVMBuildFAdd(b, x_1, xmm2, "x_2");
2551 LLVMValueRef x_3 = LLVMBuildFAdd(b, x_2, xmm3, "x_3");
2552
2553 /*
2554 * Evaluate the first polynom (0 <= x <= Pi/4)
2555 *
2556 * z = _mm_mul_ps(x,x);
2557 */
2558 LLVMValueRef z = LLVMBuildFMul(b, x_3, x_3, "z");
2559
2560 /*
2561 * _PS_CONST(coscof_p0, 2.443315711809948E-005);
2562 * _PS_CONST(coscof_p1, -1.388731625493765E-003);
2563 * _PS_CONST(coscof_p2, 4.166664568298827E-002);
2564 */
2565 LLVMValueRef coscof_p0 = lp_build_const_vec(gallivm, bld->type, 2.443315711809948E-005);
2566 LLVMValueRef coscof_p1 = lp_build_const_vec(gallivm, bld->type, -1.388731625493765E-003);
2567 LLVMValueRef coscof_p2 = lp_build_const_vec(gallivm, bld->type, 4.166664568298827E-002);
2568
2569 /*
2570 * y = *(v4sf*)_ps_coscof_p0;
2571 * y = _mm_mul_ps(y, z);
2572 */
2573 LLVMValueRef y_3 = LLVMBuildFMul(b, z, coscof_p0, "y_3");
2574 LLVMValueRef y_4 = LLVMBuildFAdd(b, y_3, coscof_p1, "y_4");
2575 LLVMValueRef y_5 = LLVMBuildFMul(b, y_4, z, "y_5");
2576 LLVMValueRef y_6 = LLVMBuildFAdd(b, y_5, coscof_p2, "y_6");
2577 LLVMValueRef y_7 = LLVMBuildFMul(b, y_6, z, "y_7");
2578 LLVMValueRef y_8 = LLVMBuildFMul(b, y_7, z, "y_8");
2579
2580
2581 /*
2582 * tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
2583 * y = _mm_sub_ps(y, tmp);
2584 * y = _mm_add_ps(y, *(v4sf*)_ps_1);
2585 */
2586 LLVMValueRef half = lp_build_const_vec(gallivm, bld->type, 0.5);
2587 LLVMValueRef tmp = LLVMBuildFMul(b, z, half, "tmp");
2588 LLVMValueRef y_9 = LLVMBuildFSub(b, y_8, tmp, "y_8");
2589 LLVMValueRef one = lp_build_const_vec(gallivm, bld->type, 1.0);
2590 LLVMValueRef y_10 = LLVMBuildFAdd(b, y_9, one, "y_9");
2591
2592 /*
2593 * _PS_CONST(sincof_p0, -1.9515295891E-4);
2594 * _PS_CONST(sincof_p1, 8.3321608736E-3);
2595 * _PS_CONST(sincof_p2, -1.6666654611E-1);
2596 */
2597 LLVMValueRef sincof_p0 = lp_build_const_vec(gallivm, bld->type, -1.9515295891E-4);
2598 LLVMValueRef sincof_p1 = lp_build_const_vec(gallivm, bld->type, 8.3321608736E-3);
2599 LLVMValueRef sincof_p2 = lp_build_const_vec(gallivm, bld->type, -1.6666654611E-1);
2600
2601 /*
2602 * Evaluate the second polynom (Pi/4 <= x <= 0)
2603 *
2604 * y2 = *(v4sf*)_ps_sincof_p0;
2605 * y2 = _mm_mul_ps(y2, z);
2606 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
2607 * y2 = _mm_mul_ps(y2, z);
2608 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
2609 * y2 = _mm_mul_ps(y2, z);
2610 * y2 = _mm_mul_ps(y2, x);
2611 * y2 = _mm_add_ps(y2, x);
2612 */
2613
2614 LLVMValueRef y2_3 = LLVMBuildFMul(b, z, sincof_p0, "y2_3");
2615 LLVMValueRef y2_4 = LLVMBuildFAdd(b, y2_3, sincof_p1, "y2_4");
2616 LLVMValueRef y2_5 = LLVMBuildFMul(b, y2_4, z, "y2_5");
2617 LLVMValueRef y2_6 = LLVMBuildFAdd(b, y2_5, sincof_p2, "y2_6");
2618 LLVMValueRef y2_7 = LLVMBuildFMul(b, y2_6, z, "y2_7");
2619 LLVMValueRef y2_8 = LLVMBuildFMul(b, y2_7, x_3, "y2_8");
2620 LLVMValueRef y2_9 = LLVMBuildFAdd(b, y2_8, x_3, "y2_9");
2621
2622 /*
2623 * select the correct result from the two polynoms
2624 * xmm3 = poly_mask;
2625 * y2 = _mm_and_ps(xmm3, y2); //, xmm3);
2626 * y = _mm_andnot_ps(xmm3, y);
2627 * y = _mm_add_ps(y,y2);
2628 */
2629 LLVMValueRef y2_i = LLVMBuildBitCast(b, y2_9, bld->int_vec_type, "y2_i");
2630 LLVMValueRef y_i = LLVMBuildBitCast(b, y_10, bld->int_vec_type, "y_i");
2631 LLVMValueRef y2_and = LLVMBuildAnd(b, y2_i, poly_mask, "y2_and");
2632 LLVMValueRef poly_mask_inv = LLVMBuildXor(b, poly_mask, inv, "poly_mask_inv");
2633 LLVMValueRef y_and = LLVMBuildAnd(b, y_i, poly_mask_inv, "y_and");
2634 LLVMValueRef y_combine = LLVMBuildAdd(b, y_and, y2_and, "y_combine");
2635
2636 /*
2637 * update the sign
2638 * y = _mm_xor_ps(y, sign_bit);
2639 */
2640 LLVMValueRef y_sign = LLVMBuildXor(b, y_combine, sign_bit, "y_sin");
2641 LLVMValueRef y_result = LLVMBuildBitCast(b, y_sign, bld->vec_type, "y_result");
2642 return y_result;
2643 }
2644
2645
2646 /**
2647 * Generate pow(x, y)
2648 */
2649 LLVMValueRef
2650 lp_build_pow(struct lp_build_context *bld,
2651 LLVMValueRef x,
2652 LLVMValueRef y)
2653 {
2654 /* TODO: optimize the constant case */
2655 if (gallivm_debug & GALLIVM_DEBUG_PERF &&
2656 LLVMIsConstant(x) && LLVMIsConstant(y)) {
2657 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
2658 __FUNCTION__);
2659 }
2660
2661 return lp_build_exp2(bld, lp_build_mul(bld, lp_build_log2(bld, x), y));
2662 }
2663
2664
2665 /**
2666 * Generate exp(x)
2667 */
2668 LLVMValueRef
2669 lp_build_exp(struct lp_build_context *bld,
2670 LLVMValueRef x)
2671 {
2672 /* log2(e) = 1/log(2) */
2673 LLVMValueRef log2e = lp_build_const_vec(bld->gallivm, bld->type,
2674 1.4426950408889634);
2675
2676 assert(lp_check_value(bld->type, x));
2677
2678 return lp_build_exp2(bld, lp_build_mul(bld, log2e, x));
2679 }
2680
2681
2682 /**
2683 * Generate log(x)
2684 */
2685 LLVMValueRef
2686 lp_build_log(struct lp_build_context *bld,
2687 LLVMValueRef x)
2688 {
2689 /* log(2) */
2690 LLVMValueRef log2 = lp_build_const_vec(bld->gallivm, bld->type,
2691 0.69314718055994529);
2692
2693 assert(lp_check_value(bld->type, x));
2694
2695 return lp_build_mul(bld, log2, lp_build_log2(bld, x));
2696 }
2697
2698
2699 /**
2700 * Generate polynomial.
2701 * Ex: coeffs[0] + x * coeffs[1] + x^2 * coeffs[2].
2702 */
2703 static LLVMValueRef
2704 lp_build_polynomial(struct lp_build_context *bld,
2705 LLVMValueRef x,
2706 const double *coeffs,
2707 unsigned num_coeffs)
2708 {
2709 const struct lp_type type = bld->type;
2710 LLVMValueRef even = NULL, odd = NULL;
2711 LLVMValueRef x2;
2712 unsigned i;
2713
2714 assert(lp_check_value(bld->type, x));
2715
2716 /* TODO: optimize the constant case */
2717 if (gallivm_debug & GALLIVM_DEBUG_PERF &&
2718 LLVMIsConstant(x)) {
2719 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
2720 __FUNCTION__);
2721 }
2722
2723 /*
2724 * Calculate odd and even terms seperately to decrease data dependency
2725 * Ex:
2726 * c[0] + x^2 * c[2] + x^4 * c[4] ...
2727 * + x * (c[1] + x^2 * c[3] + x^4 * c[5]) ...
2728 */
2729 x2 = lp_build_mul(bld, x, x);
2730
2731 for (i = num_coeffs; i--; ) {
2732 LLVMValueRef coeff;
2733
2734 coeff = lp_build_const_vec(bld->gallivm, type, coeffs[i]);
2735
2736 if (i % 2 == 0) {
2737 if (even)
2738 even = lp_build_add(bld, coeff, lp_build_mul(bld, x2, even));
2739 else
2740 even = coeff;
2741 } else {
2742 if (odd)
2743 odd = lp_build_add(bld, coeff, lp_build_mul(bld, x2, odd));
2744 else
2745 odd = coeff;
2746 }
2747 }
2748
2749 if (odd)
2750 return lp_build_add(bld, lp_build_mul(bld, odd, x), even);
2751 else if (even)
2752 return even;
2753 else
2754 return bld->undef;
2755 }
2756
2757
2758 /**
2759 * Minimax polynomial fit of 2**x, in range [0, 1[
2760 */
2761 const double lp_build_exp2_polynomial[] = {
2762 #if EXP_POLY_DEGREE == 5
2763 0.999999925063526176901,
2764 0.693153073200168932794,
2765 0.240153617044375388211,
2766 0.0558263180532956664775,
2767 0.00898934009049466391101,
2768 0.00187757667519147912699
2769 #elif EXP_POLY_DEGREE == 4
2770 1.00000259337069434683,
2771 0.693003834469974940458,
2772 0.24144275689150793076,
2773 0.0520114606103070150235,
2774 0.0135341679161270268764
2775 #elif EXP_POLY_DEGREE == 3
2776 0.999925218562710312959,
2777 0.695833540494823811697,
2778 0.226067155427249155588,
2779 0.0780245226406372992967
2780 #elif EXP_POLY_DEGREE == 2
2781 1.00172476321474503578,
2782 0.657636275736077639316,
2783 0.33718943461968720704
2784 #else
2785 #error
2786 #endif
2787 };
2788
2789
2790 void
2791 lp_build_exp2_approx(struct lp_build_context *bld,
2792 LLVMValueRef x,
2793 LLVMValueRef *p_exp2_int_part,
2794 LLVMValueRef *p_frac_part,
2795 LLVMValueRef *p_exp2)
2796 {
2797 LLVMBuilderRef builder = bld->gallivm->builder;
2798 const struct lp_type type = bld->type;
2799 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
2800 LLVMValueRef ipart = NULL;
2801 LLVMValueRef fpart = NULL;
2802 LLVMValueRef expipart = NULL;
2803 LLVMValueRef expfpart = NULL;
2804 LLVMValueRef res = NULL;
2805
2806 assert(lp_check_value(bld->type, x));
2807
2808 if(p_exp2_int_part || p_frac_part || p_exp2) {
2809 /* TODO: optimize the constant case */
2810 if (gallivm_debug & GALLIVM_DEBUG_PERF &&
2811 LLVMIsConstant(x)) {
2812 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
2813 __FUNCTION__);
2814 }
2815
2816 assert(type.floating && type.width == 32);
2817
2818 x = lp_build_min(bld, x, lp_build_const_vec(bld->gallivm, type, 129.0));
2819 x = lp_build_max(bld, x, lp_build_const_vec(bld->gallivm, type, -126.99999));
2820
2821 /* ipart = floor(x) */
2822 /* fpart = x - ipart */
2823 lp_build_ifloor_fract(bld, x, &ipart, &fpart);
2824 }
2825
2826 if(p_exp2_int_part || p_exp2) {
2827 /* expipart = (float) (1 << ipart) */
2828 expipart = LLVMBuildAdd(builder, ipart,
2829 lp_build_const_int_vec(bld->gallivm, type, 127), "");
2830 expipart = LLVMBuildShl(builder, expipart,
2831 lp_build_const_int_vec(bld->gallivm, type, 23), "");
2832 expipart = LLVMBuildBitCast(builder, expipart, vec_type, "");
2833 }
2834
2835 if(p_exp2) {
2836 expfpart = lp_build_polynomial(bld, fpart, lp_build_exp2_polynomial,
2837 Elements(lp_build_exp2_polynomial));
2838
2839 res = LLVMBuildFMul(builder, expipart, expfpart, "");
2840 }
2841
2842 if(p_exp2_int_part)
2843 *p_exp2_int_part = expipart;
2844
2845 if(p_frac_part)
2846 *p_frac_part = fpart;
2847
2848 if(p_exp2)
2849 *p_exp2 = res;
2850 }
2851
2852
2853 LLVMValueRef
2854 lp_build_exp2(struct lp_build_context *bld,
2855 LLVMValueRef x)
2856 {
2857 LLVMValueRef res;
2858 lp_build_exp2_approx(bld, x, NULL, NULL, &res);
2859 return res;
2860 }
2861
2862
2863 /**
2864 * Extract the exponent of a IEEE-754 floating point value.
2865 *
2866 * Optionally apply an integer bias.
2867 *
2868 * Result is an integer value with
2869 *
2870 * ifloor(log2(x)) + bias
2871 */
2872 LLVMValueRef
2873 lp_build_extract_exponent(struct lp_build_context *bld,
2874 LLVMValueRef x,
2875 int bias)
2876 {
2877 LLVMBuilderRef builder = bld->gallivm->builder;
2878 const struct lp_type type = bld->type;
2879 unsigned mantissa = lp_mantissa(type);
2880 LLVMValueRef res;
2881
2882 assert(type.floating);
2883
2884 assert(lp_check_value(bld->type, x));
2885
2886 x = LLVMBuildBitCast(builder, x, bld->int_vec_type, "");
2887
2888 res = LLVMBuildLShr(builder, x,
2889 lp_build_const_int_vec(bld->gallivm, type, mantissa), "");
2890 res = LLVMBuildAnd(builder, res,
2891 lp_build_const_int_vec(bld->gallivm, type, 255), "");
2892 res = LLVMBuildSub(builder, res,
2893 lp_build_const_int_vec(bld->gallivm, type, 127 - bias), "");
2894
2895 return res;
2896 }
2897
2898
2899 /**
2900 * Extract the mantissa of the a floating.
2901 *
2902 * Result is a floating point value with
2903 *
2904 * x / floor(log2(x))
2905 */
2906 LLVMValueRef
2907 lp_build_extract_mantissa(struct lp_build_context *bld,
2908 LLVMValueRef x)
2909 {
2910 LLVMBuilderRef builder = bld->gallivm->builder;
2911 const struct lp_type type = bld->type;
2912 unsigned mantissa = lp_mantissa(type);
2913 LLVMValueRef mantmask = lp_build_const_int_vec(bld->gallivm, type,
2914 (1ULL << mantissa) - 1);
2915 LLVMValueRef one = LLVMConstBitCast(bld->one, bld->int_vec_type);
2916 LLVMValueRef res;
2917
2918 assert(lp_check_value(bld->type, x));
2919
2920 assert(type.floating);
2921
2922 x = LLVMBuildBitCast(builder, x, bld->int_vec_type, "");
2923
2924 /* res = x / 2**ipart */
2925 res = LLVMBuildAnd(builder, x, mantmask, "");
2926 res = LLVMBuildOr(builder, res, one, "");
2927 res = LLVMBuildBitCast(builder, res, bld->vec_type, "");
2928
2929 return res;
2930 }
2931
2932
2933
2934 /**
2935 * Minimax polynomial fit of log2((1.0 + sqrt(x))/(1.0 - sqrt(x)))/sqrt(x) ,for x in range of [0, 1/9[
2936 * These coefficients can be generate with
2937 * http://www.boost.org/doc/libs/1_36_0/libs/math/doc/sf_and_dist/html/math_toolkit/toolkit/internals2/minimax.html
2938 */
2939 const double lp_build_log2_polynomial[] = {
2940 #if LOG_POLY_DEGREE == 5
2941 2.88539008148777786488L,
2942 0.961796878841293367824L,
2943 0.577058946784739859012L,
2944 0.412914355135828735411L,
2945 0.308591899232910175289L,
2946 0.352376952300281371868L,
2947 #elif LOG_POLY_DEGREE == 4
2948 2.88539009343309178325L,
2949 0.961791550404184197881L,
2950 0.577440339438736392009L,
2951 0.403343858251329912514L,
2952 0.406718052498846252698L,
2953 #elif LOG_POLY_DEGREE == 3
2954 2.88538959748872753838L,
2955 0.961932915889597772928L,
2956 0.571118517972136195241L,
2957 0.493997535084709500285L,
2958 #else
2959 #error
2960 #endif
2961 };
2962
2963 /**
2964 * See http://www.devmaster.net/forums/showthread.php?p=43580
2965 * http://en.wikipedia.org/wiki/Logarithm#Calculation
2966 * http://www.nezumi.demon.co.uk/consult/logx.htm
2967 */
2968 void
2969 lp_build_log2_approx(struct lp_build_context *bld,
2970 LLVMValueRef x,
2971 LLVMValueRef *p_exp,
2972 LLVMValueRef *p_floor_log2,
2973 LLVMValueRef *p_log2)
2974 {
2975 LLVMBuilderRef builder = bld->gallivm->builder;
2976 const struct lp_type type = bld->type;
2977 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
2978 LLVMTypeRef int_vec_type = lp_build_int_vec_type(bld->gallivm, type);
2979
2980 LLVMValueRef expmask = lp_build_const_int_vec(bld->gallivm, type, 0x7f800000);
2981 LLVMValueRef mantmask = lp_build_const_int_vec(bld->gallivm, type, 0x007fffff);
2982 LLVMValueRef one = LLVMConstBitCast(bld->one, int_vec_type);
2983
2984 LLVMValueRef i = NULL;
2985 LLVMValueRef y = NULL;
2986 LLVMValueRef z = NULL;
2987 LLVMValueRef exp = NULL;
2988 LLVMValueRef mant = NULL;
2989 LLVMValueRef logexp = NULL;
2990 LLVMValueRef logmant = NULL;
2991 LLVMValueRef res = NULL;
2992
2993 assert(lp_check_value(bld->type, x));
2994
2995 if(p_exp || p_floor_log2 || p_log2) {
2996 /* TODO: optimize the constant case */
2997 if (gallivm_debug & GALLIVM_DEBUG_PERF &&
2998 LLVMIsConstant(x)) {
2999 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
3000 __FUNCTION__);
3001 }
3002
3003 assert(type.floating && type.width == 32);
3004
3005 /*
3006 * We don't explicitly handle denormalized numbers. They will yield a
3007 * result in the neighbourhood of -127, which appears to be adequate
3008 * enough.
3009 */
3010
3011 i = LLVMBuildBitCast(builder, x, int_vec_type, "");
3012
3013 /* exp = (float) exponent(x) */
3014 exp = LLVMBuildAnd(builder, i, expmask, "");
3015 }
3016
3017 if(p_floor_log2 || p_log2) {
3018 logexp = LLVMBuildLShr(builder, exp, lp_build_const_int_vec(bld->gallivm, type, 23), "");
3019 logexp = LLVMBuildSub(builder, logexp, lp_build_const_int_vec(bld->gallivm, type, 127), "");
3020 logexp = LLVMBuildSIToFP(builder, logexp, vec_type, "");
3021 }
3022
3023 if(p_log2) {
3024 /* mant = 1 + (float) mantissa(x) */
3025 mant = LLVMBuildAnd(builder, i, mantmask, "");
3026 mant = LLVMBuildOr(builder, mant, one, "");
3027 mant = LLVMBuildBitCast(builder, mant, vec_type, "");
3028
3029 /* y = (mant - 1) / (mant + 1) */
3030 y = lp_build_div(bld,
3031 lp_build_sub(bld, mant, bld->one),
3032 lp_build_add(bld, mant, bld->one)
3033 );
3034
3035 /* z = y^2 */
3036 z = lp_build_mul(bld, y, y);
3037
3038 /* compute P(z) */
3039 logmant = lp_build_polynomial(bld, z, lp_build_log2_polynomial,
3040 Elements(lp_build_log2_polynomial));
3041
3042 /* logmant = y * P(z) */
3043 logmant = lp_build_mul(bld, y, logmant);
3044
3045 res = lp_build_add(bld, logmant, logexp);
3046 }
3047
3048 if(p_exp) {
3049 exp = LLVMBuildBitCast(builder, exp, vec_type, "");
3050 *p_exp = exp;
3051 }
3052
3053 if(p_floor_log2)
3054 *p_floor_log2 = logexp;
3055
3056 if(p_log2)
3057 *p_log2 = res;
3058 }
3059
3060
3061 LLVMValueRef
3062 lp_build_log2(struct lp_build_context *bld,
3063 LLVMValueRef x)
3064 {
3065 LLVMValueRef res;
3066 lp_build_log2_approx(bld, x, NULL, NULL, &res);
3067 return res;
3068 }
3069
3070
3071 /**
3072 * Faster (and less accurate) log2.
3073 *
3074 * log2(x) = floor(log2(x)) - 1 + x / 2**floor(log2(x))
3075 *
3076 * Piece-wise linear approximation, with exact results when x is a
3077 * power of two.
3078 *
3079 * See http://www.flipcode.com/archives/Fast_log_Function.shtml
3080 */
3081 LLVMValueRef
3082 lp_build_fast_log2(struct lp_build_context *bld,
3083 LLVMValueRef x)
3084 {
3085 LLVMBuilderRef builder = bld->gallivm->builder;
3086 LLVMValueRef ipart;
3087 LLVMValueRef fpart;
3088
3089 assert(lp_check_value(bld->type, x));
3090
3091 assert(bld->type.floating);
3092
3093 /* ipart = floor(log2(x)) - 1 */
3094 ipart = lp_build_extract_exponent(bld, x, -1);
3095 ipart = LLVMBuildSIToFP(builder, ipart, bld->vec_type, "");
3096
3097 /* fpart = x / 2**ipart */
3098 fpart = lp_build_extract_mantissa(bld, x);
3099
3100 /* ipart + fpart */
3101 return LLVMBuildFAdd(builder, ipart, fpart, "");
3102 }
3103
3104
3105 /**
3106 * Fast implementation of iround(log2(x)).
3107 *
3108 * Not an approximation -- it should give accurate results all the time.
3109 */
3110 LLVMValueRef
3111 lp_build_ilog2(struct lp_build_context *bld,
3112 LLVMValueRef x)
3113 {
3114 LLVMBuilderRef builder = bld->gallivm->builder;
3115 LLVMValueRef sqrt2 = lp_build_const_vec(bld->gallivm, bld->type, M_SQRT2);
3116 LLVMValueRef ipart;
3117
3118 assert(bld->type.floating);
3119
3120 assert(lp_check_value(bld->type, x));
3121
3122 /* x * 2^(0.5) i.e., add 0.5 to the log2(x) */
3123 x = LLVMBuildFMul(builder, x, sqrt2, "");
3124
3125 /* ipart = floor(log2(x) + 0.5) */
3126 ipart = lp_build_extract_exponent(bld, x, 0);
3127
3128 return ipart;
3129 }
3130
3131 LLVMValueRef
3132 lp_build_mod(struct lp_build_context *bld,
3133 LLVMValueRef x,
3134 LLVMValueRef y)
3135 {
3136 LLVMBuilderRef builder = bld->gallivm->builder;
3137 LLVMValueRef res;
3138 const struct lp_type type = bld->type;
3139
3140 assert(lp_check_value(type, x));
3141 assert(lp_check_value(type, y));
3142
3143 if (type.floating)
3144 res = LLVMBuildFRem(builder, x, y, "");
3145 else if (type.sign)
3146 res = LLVMBuildSRem(builder, x, y, "");
3147 else
3148 res = LLVMBuildURem(builder, x, y, "");
3149 return res;
3150 }