gallivm: Altivec vector max/min intrisics
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_arit.c
1 /**************************************************************************
2 *
3 * Copyright 2009-2010 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper
32 *
33 * LLVM IR doesn't support all basic arithmetic operations we care about (most
34 * notably min/max and saturated operations), and it is often necessary to
35 * resort machine-specific intrinsics directly. The functions here hide all
36 * these implementation details from the other modules.
37 *
38 * We also do simple expressions simplification here. Reasons are:
39 * - it is very easy given we have all necessary information readily available
40 * - LLVM optimization passes fail to simplify several vector expressions
41 * - We often know value constraints which the optimization passes have no way
42 * of knowing, such as when source arguments are known to be in [0, 1] range.
43 *
44 * @author Jose Fonseca <jfonseca@vmware.com>
45 */
46
47
48 #include "util/u_memory.h"
49 #include "util/u_debug.h"
50 #include "util/u_math.h"
51 #include "util/u_string.h"
52 #include "util/u_cpu_detect.h"
53
54 #include "lp_bld_type.h"
55 #include "lp_bld_const.h"
56 #include "lp_bld_init.h"
57 #include "lp_bld_intr.h"
58 #include "lp_bld_logic.h"
59 #include "lp_bld_pack.h"
60 #include "lp_bld_debug.h"
61 #include "lp_bld_arit.h"
62
63 #include "float.h"
64
65 #define EXP_POLY_DEGREE 5
66
67 #define LOG_POLY_DEGREE 4
68
69
70 /**
71 * Generate min(a, b)
72 * No checks for special case values of a or b = 1 or 0 are done.
73 */
74 static LLVMValueRef
75 lp_build_min_simple(struct lp_build_context *bld,
76 LLVMValueRef a,
77 LLVMValueRef b)
78 {
79 const struct lp_type type = bld->type;
80 const char *intrinsic = NULL;
81 unsigned intr_size = 0;
82 LLVMValueRef cond;
83
84 assert(lp_check_value(type, a));
85 assert(lp_check_value(type, b));
86
87 /* TODO: optimize the constant case */
88
89 if (type.floating && util_cpu_caps.has_sse) {
90 if (type.width == 32) {
91 if (type.length == 1) {
92 intrinsic = "llvm.x86.sse.min.ss";
93 intr_size = 128;
94 }
95 else if (type.length <= 4 || !util_cpu_caps.has_avx) {
96 intrinsic = "llvm.x86.sse.min.ps";
97 intr_size = 128;
98 }
99 else {
100 intrinsic = "llvm.x86.avx.min.ps.256";
101 intr_size = 256;
102 }
103 }
104 if (type.width == 64 && util_cpu_caps.has_sse2) {
105 if (type.length == 1) {
106 intrinsic = "llvm.x86.sse2.min.sd";
107 intr_size = 128;
108 }
109 else if (type.length == 2 || !util_cpu_caps.has_avx) {
110 intrinsic = "llvm.x86.sse2.min.pd";
111 intr_size = 128;
112 }
113 else {
114 intrinsic = "llvm.x86.avx.min.pd.256";
115 intr_size = 256;
116 }
117 }
118 }
119 else if (type.floating && util_cpu_caps.has_altivec) {
120 if (type.width == 32 && type.length == 4) {
121 intrinsic = "llvm.ppc.altivec.vminfp";
122 intr_size = 128;
123 }
124 } else if (util_cpu_caps.has_sse2 && type.length >= 2) {
125 intr_size = 128;
126 if ((type.width == 8 || type.width == 16) &&
127 (type.width * type.length <= 64) &&
128 (gallivm_debug & GALLIVM_DEBUG_PERF)) {
129 debug_printf("%s: inefficient code, bogus shuffle due to packing\n",
130 __FUNCTION__);
131 }
132 if (type.width == 8 && !type.sign) {
133 intrinsic = "llvm.x86.sse2.pminu.b";
134 }
135 else if (type.width == 16 && type.sign) {
136 intrinsic = "llvm.x86.sse2.pmins.w";
137 }
138 if (util_cpu_caps.has_sse4_1) {
139 if (type.width == 8 && type.sign) {
140 intrinsic = "llvm.x86.sse41.pminsb";
141 }
142 if (type.width == 16 && !type.sign) {
143 intrinsic = "llvm.x86.sse41.pminuw";
144 }
145 if (type.width == 32 && !type.sign) {
146 intrinsic = "llvm.x86.sse41.pminud";
147 }
148 if (type.width == 32 && type.sign) {
149 intrinsic = "llvm.x86.sse41.pminsd";
150 }
151 }
152 } else if (util_cpu_caps.has_altivec) {
153 intr_size = 128;
154 if (type.width == 8) {
155 if (!type.sign) {
156 intrinsic = "llvm.ppc.altivec.vminub";
157 } else {
158 intrinsic = "llvm.ppc.altivec.vminsb";
159 }
160 } else if (type.width == 16) {
161 if (!type.sign) {
162 intrinsic = "llvm.ppc.altivec.vminuh";
163 } else {
164 intrinsic = "llvm.ppc.altivec.vminsh";
165 }
166 } else if (type.width == 32) {
167 if (!type.sign) {
168 intrinsic = "llvm.ppc.altivec.vminuw";
169 } else {
170 intrinsic = "llvm.ppc.altivec.vminsw";
171 }
172 }
173 }
174
175 if(intrinsic) {
176 return lp_build_intrinsic_binary_anylength(bld->gallivm, intrinsic,
177 type,
178 intr_size, a, b);
179 }
180
181 cond = lp_build_cmp(bld, PIPE_FUNC_LESS, a, b);
182 return lp_build_select(bld, cond, a, b);
183 }
184
185
186 /**
187 * Generate max(a, b)
188 * No checks for special case values of a or b = 1 or 0 are done.
189 */
190 static LLVMValueRef
191 lp_build_max_simple(struct lp_build_context *bld,
192 LLVMValueRef a,
193 LLVMValueRef b)
194 {
195 const struct lp_type type = bld->type;
196 const char *intrinsic = NULL;
197 unsigned intr_size = 0;
198 LLVMValueRef cond;
199
200 assert(lp_check_value(type, a));
201 assert(lp_check_value(type, b));
202
203 /* TODO: optimize the constant case */
204
205 if (type.floating && util_cpu_caps.has_sse) {
206 if (type.width == 32) {
207 if (type.length == 1) {
208 intrinsic = "llvm.x86.sse.max.ss";
209 intr_size = 128;
210 }
211 else if (type.length <= 4 || !util_cpu_caps.has_avx) {
212 intrinsic = "llvm.x86.sse.max.ps";
213 intr_size = 128;
214 }
215 else {
216 intrinsic = "llvm.x86.avx.max.ps.256";
217 intr_size = 256;
218 }
219 }
220 if (type.width == 64 && util_cpu_caps.has_sse2) {
221 if (type.length == 1) {
222 intrinsic = "llvm.x86.sse2.max.sd";
223 intr_size = 128;
224 }
225 else if (type.length == 2 || !util_cpu_caps.has_avx) {
226 intrinsic = "llvm.x86.sse2.max.pd";
227 intr_size = 128;
228 }
229 else {
230 intrinsic = "llvm.x86.avx.max.pd.256";
231 intr_size = 256;
232 }
233 }
234 }
235 else if (type.floating && util_cpu_caps.has_altivec) {
236 if (type.width == 32 || type.length == 4) {
237 intrinsic = "llvm.ppc.altivec.vmaxfp";
238 intr_size = 128;
239 }
240 } else if (util_cpu_caps.has_sse2 && type.length >= 2) {
241 intr_size = 128;
242 if ((type.width == 8 || type.width == 16) &&
243 (type.width * type.length <= 64) &&
244 (gallivm_debug & GALLIVM_DEBUG_PERF)) {
245 debug_printf("%s: inefficient code, bogus shuffle due to packing\n",
246 __FUNCTION__);
247 }
248 if (type.width == 8 && !type.sign) {
249 intrinsic = "llvm.x86.sse2.pmaxu.b";
250 intr_size = 128;
251 }
252 else if (type.width == 16 && type.sign) {
253 intrinsic = "llvm.x86.sse2.pmaxs.w";
254 }
255 if (util_cpu_caps.has_sse4_1) {
256 if (type.width == 8 && type.sign) {
257 intrinsic = "llvm.x86.sse41.pmaxsb";
258 }
259 if (type.width == 16 && !type.sign) {
260 intrinsic = "llvm.x86.sse41.pmaxuw";
261 }
262 if (type.width == 32 && !type.sign) {
263 intrinsic = "llvm.x86.sse41.pmaxud";
264 }
265 if (type.width == 32 && type.sign) {
266 intrinsic = "llvm.x86.sse41.pmaxsd";
267 }
268 }
269 } else if (util_cpu_caps.has_altivec) {
270 intr_size = 128;
271 if (type.width == 8) {
272 if (!type.sign) {
273 intrinsic = "llvm.ppc.altivec.vmaxub";
274 } else {
275 intrinsic = "llvm.ppc.altivec.vmaxsb";
276 }
277 } else if (type.width == 16) {
278 if (!type.sign) {
279 intrinsic = "llvm.ppc.altivec.vmaxuh";
280 } else {
281 intrinsic = "llvm.ppc.altivec.vmaxsh";
282 }
283 } else if (type.width == 32) {
284 if (!type.sign) {
285 intrinsic = "llvm.ppc.altivec.vmaxuw";
286 } else {
287 intrinsic = "llvm.ppc.altivec.vmaxsw";
288 }
289 }
290 }
291
292 if(intrinsic) {
293 return lp_build_intrinsic_binary_anylength(bld->gallivm, intrinsic,
294 type,
295 intr_size, a, b);
296 }
297
298 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, b);
299 return lp_build_select(bld, cond, a, b);
300 }
301
302
303 /**
304 * Generate 1 - a, or ~a depending on bld->type.
305 */
306 LLVMValueRef
307 lp_build_comp(struct lp_build_context *bld,
308 LLVMValueRef a)
309 {
310 LLVMBuilderRef builder = bld->gallivm->builder;
311 const struct lp_type type = bld->type;
312
313 assert(lp_check_value(type, a));
314
315 if(a == bld->one)
316 return bld->zero;
317 if(a == bld->zero)
318 return bld->one;
319
320 if(type.norm && !type.floating && !type.fixed && !type.sign) {
321 if(LLVMIsConstant(a))
322 return LLVMConstNot(a);
323 else
324 return LLVMBuildNot(builder, a, "");
325 }
326
327 if(LLVMIsConstant(a))
328 if (type.floating)
329 return LLVMConstFSub(bld->one, a);
330 else
331 return LLVMConstSub(bld->one, a);
332 else
333 if (type.floating)
334 return LLVMBuildFSub(builder, bld->one, a, "");
335 else
336 return LLVMBuildSub(builder, bld->one, a, "");
337 }
338
339
340 /**
341 * Generate a + b
342 */
343 LLVMValueRef
344 lp_build_add(struct lp_build_context *bld,
345 LLVMValueRef a,
346 LLVMValueRef b)
347 {
348 LLVMBuilderRef builder = bld->gallivm->builder;
349 const struct lp_type type = bld->type;
350 LLVMValueRef res;
351
352 assert(lp_check_value(type, a));
353 assert(lp_check_value(type, b));
354
355 if(a == bld->zero)
356 return b;
357 if(b == bld->zero)
358 return a;
359 if(a == bld->undef || b == bld->undef)
360 return bld->undef;
361
362 if(bld->type.norm) {
363 const char *intrinsic = NULL;
364
365 if(a == bld->one || b == bld->one)
366 return bld->one;
367
368 if(util_cpu_caps.has_sse2 &&
369 type.width * type.length == 128 &&
370 !type.floating && !type.fixed) {
371 if(type.width == 8)
372 intrinsic = type.sign ? "llvm.x86.sse2.padds.b" : "llvm.x86.sse2.paddus.b";
373 if(type.width == 16)
374 intrinsic = type.sign ? "llvm.x86.sse2.padds.w" : "llvm.x86.sse2.paddus.w";
375 }
376
377 if(intrinsic)
378 return lp_build_intrinsic_binary(builder, intrinsic, lp_build_vec_type(bld->gallivm, bld->type), a, b);
379 }
380
381 if(LLVMIsConstant(a) && LLVMIsConstant(b))
382 if (type.floating)
383 res = LLVMConstFAdd(a, b);
384 else
385 res = LLVMConstAdd(a, b);
386 else
387 if (type.floating)
388 res = LLVMBuildFAdd(builder, a, b, "");
389 else
390 res = LLVMBuildAdd(builder, a, b, "");
391
392 /* clamp to ceiling of 1.0 */
393 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
394 res = lp_build_min_simple(bld, res, bld->one);
395
396 /* XXX clamp to floor of -1 or 0??? */
397
398 return res;
399 }
400
401
402 /** Return the scalar sum of the elements of a.
403 * Should avoid this operation whenever possible.
404 */
405 LLVMValueRef
406 lp_build_horizontal_add(struct lp_build_context *bld,
407 LLVMValueRef a)
408 {
409 LLVMBuilderRef builder = bld->gallivm->builder;
410 const struct lp_type type = bld->type;
411 LLVMValueRef index, res;
412 unsigned i, length;
413 LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH / 2];
414 LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH / 2];
415 LLVMValueRef vecres, elem2;
416
417 assert(lp_check_value(type, a));
418
419 if (type.length == 1) {
420 return a;
421 }
422
423 assert(!bld->type.norm);
424
425 /*
426 * for byte vectors can do much better with psadbw.
427 * Using repeated shuffle/adds here. Note with multiple vectors
428 * this can be done more efficiently as outlined in the intel
429 * optimization manual.
430 * Note: could cause data rearrangement if used with smaller element
431 * sizes.
432 */
433
434 vecres = a;
435 length = type.length / 2;
436 while (length > 1) {
437 LLVMValueRef vec1, vec2;
438 for (i = 0; i < length; i++) {
439 shuffles1[i] = lp_build_const_int32(bld->gallivm, i);
440 shuffles2[i] = lp_build_const_int32(bld->gallivm, i + length);
441 }
442 vec1 = LLVMBuildShuffleVector(builder, vecres, vecres,
443 LLVMConstVector(shuffles1, length), "");
444 vec2 = LLVMBuildShuffleVector(builder, vecres, vecres,
445 LLVMConstVector(shuffles2, length), "");
446 if (type.floating) {
447 vecres = LLVMBuildFAdd(builder, vec1, vec2, "");
448 }
449 else {
450 vecres = LLVMBuildAdd(builder, vec1, vec2, "");
451 }
452 length = length >> 1;
453 }
454
455 /* always have vector of size 2 here */
456 assert(length == 1);
457
458 index = lp_build_const_int32(bld->gallivm, 0);
459 res = LLVMBuildExtractElement(builder, vecres, index, "");
460 index = lp_build_const_int32(bld->gallivm, 1);
461 elem2 = LLVMBuildExtractElement(builder, vecres, index, "");
462
463 if (type.floating)
464 res = LLVMBuildFAdd(builder, res, elem2, "");
465 else
466 res = LLVMBuildAdd(builder, res, elem2, "");
467
468 return res;
469 }
470
471 /**
472 * Return the horizontal sums of 4 float vectors as a float4 vector.
473 * This uses the technique as outlined in Intel Optimization Manual.
474 */
475 static LLVMValueRef
476 lp_build_horizontal_add4x4f(struct lp_build_context *bld,
477 LLVMValueRef src[4])
478 {
479 struct gallivm_state *gallivm = bld->gallivm;
480 LLVMBuilderRef builder = gallivm->builder;
481 LLVMValueRef shuffles[4];
482 LLVMValueRef tmp[4];
483 LLVMValueRef sumtmp[2], shuftmp[2];
484
485 /* lower half of regs */
486 shuffles[0] = lp_build_const_int32(gallivm, 0);
487 shuffles[1] = lp_build_const_int32(gallivm, 1);
488 shuffles[2] = lp_build_const_int32(gallivm, 4);
489 shuffles[3] = lp_build_const_int32(gallivm, 5);
490 tmp[0] = LLVMBuildShuffleVector(builder, src[0], src[1],
491 LLVMConstVector(shuffles, 4), "");
492 tmp[2] = LLVMBuildShuffleVector(builder, src[2], src[3],
493 LLVMConstVector(shuffles, 4), "");
494
495 /* upper half of regs */
496 shuffles[0] = lp_build_const_int32(gallivm, 2);
497 shuffles[1] = lp_build_const_int32(gallivm, 3);
498 shuffles[2] = lp_build_const_int32(gallivm, 6);
499 shuffles[3] = lp_build_const_int32(gallivm, 7);
500 tmp[1] = LLVMBuildShuffleVector(builder, src[0], src[1],
501 LLVMConstVector(shuffles, 4), "");
502 tmp[3] = LLVMBuildShuffleVector(builder, src[2], src[3],
503 LLVMConstVector(shuffles, 4), "");
504
505 sumtmp[0] = LLVMBuildFAdd(builder, tmp[0], tmp[1], "");
506 sumtmp[1] = LLVMBuildFAdd(builder, tmp[2], tmp[3], "");
507
508 shuffles[0] = lp_build_const_int32(gallivm, 0);
509 shuffles[1] = lp_build_const_int32(gallivm, 2);
510 shuffles[2] = lp_build_const_int32(gallivm, 4);
511 shuffles[3] = lp_build_const_int32(gallivm, 6);
512 shuftmp[0] = LLVMBuildShuffleVector(builder, sumtmp[0], sumtmp[1],
513 LLVMConstVector(shuffles, 4), "");
514
515 shuffles[0] = lp_build_const_int32(gallivm, 1);
516 shuffles[1] = lp_build_const_int32(gallivm, 3);
517 shuffles[2] = lp_build_const_int32(gallivm, 5);
518 shuffles[3] = lp_build_const_int32(gallivm, 7);
519 shuftmp[1] = LLVMBuildShuffleVector(builder, sumtmp[0], sumtmp[1],
520 LLVMConstVector(shuffles, 4), "");
521
522 return LLVMBuildFAdd(builder, shuftmp[0], shuftmp[1], "");
523 }
524
525
526 /*
527 * partially horizontally add 2-4 float vectors with length nx4,
528 * i.e. only four adjacent values in each vector will be added,
529 * assuming values are really grouped in 4 which also determines
530 * output order.
531 *
532 * Return a vector of the same length as the initial vectors,
533 * with the excess elements (if any) being undefined.
534 * The element order is independent of number of input vectors.
535 * For 3 vectors x0x1x2x3x4x5x6x7, y0y1y2y3y4y5y6y7, z0z1z2z3z4z5z6z7
536 * the output order thus will be
537 * sumx0-x3,sumy0-y3,sumz0-z3,undef,sumx4-x7,sumy4-y7,sumz4z7,undef
538 */
539 LLVMValueRef
540 lp_build_hadd_partial4(struct lp_build_context *bld,
541 LLVMValueRef vectors[],
542 unsigned num_vecs)
543 {
544 struct gallivm_state *gallivm = bld->gallivm;
545 LLVMBuilderRef builder = gallivm->builder;
546 LLVMValueRef ret_vec;
547 LLVMValueRef tmp[4];
548 const char *intrinsic = NULL;
549
550 assert(num_vecs >= 2 && num_vecs <= 4);
551 assert(bld->type.floating);
552
553 /* only use this with at least 2 vectors, as it is sort of expensive
554 * (depending on cpu) and we always need two horizontal adds anyway,
555 * so a shuffle/add approach might be better.
556 */
557
558 tmp[0] = vectors[0];
559 tmp[1] = vectors[1];
560
561 tmp[2] = num_vecs > 2 ? vectors[2] : vectors[0];
562 tmp[3] = num_vecs > 3 ? vectors[3] : vectors[0];
563
564 if (util_cpu_caps.has_sse3 && bld->type.width == 32 &&
565 bld->type.length == 4) {
566 intrinsic = "llvm.x86.sse3.hadd.ps";
567 }
568 else if (util_cpu_caps.has_avx && bld->type.width == 32 &&
569 bld->type.length == 8) {
570 intrinsic = "llvm.x86.avx.hadd.ps.256";
571 }
572 if (intrinsic) {
573 tmp[0] = lp_build_intrinsic_binary(builder, intrinsic,
574 lp_build_vec_type(gallivm, bld->type),
575 tmp[0], tmp[1]);
576 if (num_vecs > 2) {
577 tmp[1] = lp_build_intrinsic_binary(builder, intrinsic,
578 lp_build_vec_type(gallivm, bld->type),
579 tmp[2], tmp[3]);
580 }
581 else {
582 tmp[1] = tmp[0];
583 }
584 return lp_build_intrinsic_binary(builder, intrinsic,
585 lp_build_vec_type(gallivm, bld->type),
586 tmp[0], tmp[1]);
587 }
588
589 if (bld->type.length == 4) {
590 ret_vec = lp_build_horizontal_add4x4f(bld, tmp);
591 }
592 else {
593 LLVMValueRef partres[LP_MAX_VECTOR_LENGTH/4];
594 unsigned j;
595 unsigned num_iter = bld->type.length / 4;
596 struct lp_type parttype = bld->type;
597 parttype.length = 4;
598 for (j = 0; j < num_iter; j++) {
599 LLVMValueRef partsrc[4];
600 unsigned i;
601 for (i = 0; i < 4; i++) {
602 partsrc[i] = lp_build_extract_range(gallivm, tmp[i], j*4, 4);
603 }
604 partres[j] = lp_build_horizontal_add4x4f(bld, partsrc);
605 }
606 ret_vec = lp_build_concat(gallivm, partres, parttype, num_iter);
607 }
608 return ret_vec;
609 }
610
611 /**
612 * Generate a - b
613 */
614 LLVMValueRef
615 lp_build_sub(struct lp_build_context *bld,
616 LLVMValueRef a,
617 LLVMValueRef b)
618 {
619 LLVMBuilderRef builder = bld->gallivm->builder;
620 const struct lp_type type = bld->type;
621 LLVMValueRef res;
622
623 assert(lp_check_value(type, a));
624 assert(lp_check_value(type, b));
625
626 if(b == bld->zero)
627 return a;
628 if(a == bld->undef || b == bld->undef)
629 return bld->undef;
630 if(a == b)
631 return bld->zero;
632
633 if(bld->type.norm) {
634 const char *intrinsic = NULL;
635
636 if(b == bld->one)
637 return bld->zero;
638
639 if(util_cpu_caps.has_sse2 &&
640 type.width * type.length == 128 &&
641 !type.floating && !type.fixed) {
642 if(type.width == 8)
643 intrinsic = type.sign ? "llvm.x86.sse2.psubs.b" : "llvm.x86.sse2.psubus.b";
644 if(type.width == 16)
645 intrinsic = type.sign ? "llvm.x86.sse2.psubs.w" : "llvm.x86.sse2.psubus.w";
646 }
647
648 if(intrinsic)
649 return lp_build_intrinsic_binary(builder, intrinsic, lp_build_vec_type(bld->gallivm, bld->type), a, b);
650 }
651
652 if(LLVMIsConstant(a) && LLVMIsConstant(b))
653 if (type.floating)
654 res = LLVMConstFSub(a, b);
655 else
656 res = LLVMConstSub(a, b);
657 else
658 if (type.floating)
659 res = LLVMBuildFSub(builder, a, b, "");
660 else
661 res = LLVMBuildSub(builder, a, b, "");
662
663 if(bld->type.norm && (bld->type.floating || bld->type.fixed))
664 res = lp_build_max_simple(bld, res, bld->zero);
665
666 return res;
667 }
668
669
670 /**
671 * Normalized 8bit multiplication.
672 *
673 * - alpha plus one
674 *
675 * makes the following approximation to the division (Sree)
676 *
677 * a*b/255 ~= (a*(b + 1)) >> 256
678 *
679 * which is the fastest method that satisfies the following OpenGL criteria
680 *
681 * 0*0 = 0 and 255*255 = 255
682 *
683 * - geometric series
684 *
685 * takes the geometric series approximation to the division
686 *
687 * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
688 *
689 * in this case just the first two terms to fit in 16bit arithmetic
690 *
691 * t/255 ~= (t + (t >> 8)) >> 8
692 *
693 * note that just by itself it doesn't satisfies the OpenGL criteria, as
694 * 255*255 = 254, so the special case b = 255 must be accounted or roundoff
695 * must be used
696 *
697 * - geometric series plus rounding
698 *
699 * when using a geometric series division instead of truncating the result
700 * use roundoff in the approximation (Jim Blinn)
701 *
702 * t/255 ~= (t + (t >> 8) + 0x80) >> 8
703 *
704 * achieving the exact results
705 *
706 * @sa Alvy Ray Smith, Image Compositing Fundamentals, Tech Memo 4, Aug 15, 1995,
707 * ftp://ftp.alvyray.com/Acrobat/4_Comp.pdf
708 * @sa Michael Herf, The "double blend trick", May 2000,
709 * http://www.stereopsis.com/doubleblend.html
710 */
711 static LLVMValueRef
712 lp_build_mul_u8n(struct gallivm_state *gallivm,
713 struct lp_type i16_type,
714 LLVMValueRef a, LLVMValueRef b)
715 {
716 LLVMBuilderRef builder = gallivm->builder;
717 LLVMValueRef c8;
718 LLVMValueRef ab;
719
720 assert(!i16_type.floating);
721 assert(lp_check_value(i16_type, a));
722 assert(lp_check_value(i16_type, b));
723
724 c8 = lp_build_const_int_vec(gallivm, i16_type, 8);
725
726 #if 0
727
728 /* a*b/255 ~= (a*(b + 1)) >> 256 */
729 b = LLVMBuildAdd(builder, b, lp_build_const_int_vec(gallium, i16_type, 1), "");
730 ab = LLVMBuildMul(builder, a, b, "");
731
732 #else
733
734 /* ab/255 ~= (ab + (ab >> 8) + 0x80) >> 8 */
735 ab = LLVMBuildMul(builder, a, b, "");
736 ab = LLVMBuildAdd(builder, ab, LLVMBuildLShr(builder, ab, c8, ""), "");
737 ab = LLVMBuildAdd(builder, ab, lp_build_const_int_vec(gallivm, i16_type, 0x80), "");
738
739 #endif
740
741 ab = LLVMBuildLShr(builder, ab, c8, "");
742
743 return ab;
744 }
745
746 /**
747 * Normalized 16bit multiplication.
748 *
749 * Utilises same principle as above code.
750 */
751 static LLVMValueRef
752 lp_build_mul_u16n(struct gallivm_state *gallivm,
753 struct lp_type i32_type,
754 LLVMValueRef a, LLVMValueRef b)
755 {
756 LLVMBuilderRef builder = gallivm->builder;
757 LLVMValueRef c16;
758 LLVMValueRef ab;
759
760 assert(!i32_type.floating);
761 assert(lp_check_value(i32_type, a));
762 assert(lp_check_value(i32_type, b));
763
764 c16 = lp_build_const_int_vec(gallivm, i32_type, 16);
765
766 /* ab/65535 ~= (ab + (ab >> 16) + 0x8000) >> 16 */
767 ab = LLVMBuildMul(builder, a, b, "");
768 ab = LLVMBuildAdd(builder, ab, LLVMBuildLShr(builder, ab, c16, ""), "");
769 ab = LLVMBuildAdd(builder, ab, lp_build_const_int_vec(gallivm, i32_type, 0x8000), "");
770
771 ab = LLVMBuildLShr(builder, ab, c16, "");
772
773 return ab;
774 }
775
776 /**
777 * Generate a * b
778 */
779 LLVMValueRef
780 lp_build_mul(struct lp_build_context *bld,
781 LLVMValueRef a,
782 LLVMValueRef b)
783 {
784 LLVMBuilderRef builder = bld->gallivm->builder;
785 const struct lp_type type = bld->type;
786 LLVMValueRef shift;
787 LLVMValueRef res;
788
789 assert(lp_check_value(type, a));
790 assert(lp_check_value(type, b));
791
792 if(a == bld->zero)
793 return bld->zero;
794 if(a == bld->one)
795 return b;
796 if(b == bld->zero)
797 return bld->zero;
798 if(b == bld->one)
799 return a;
800 if(a == bld->undef || b == bld->undef)
801 return bld->undef;
802
803 if(!type.floating && !type.fixed && type.norm) {
804 if(type.width == 8) {
805 struct lp_type i16_type = lp_wider_type(type);
806 LLVMValueRef al, ah, bl, bh, abl, abh, ab;
807
808 lp_build_unpack2(bld->gallivm, type, i16_type, a, &al, &ah);
809 lp_build_unpack2(bld->gallivm, type, i16_type, b, &bl, &bh);
810
811 /* PMULLW, PSRLW, PADDW */
812 abl = lp_build_mul_u8n(bld->gallivm, i16_type, al, bl);
813 abh = lp_build_mul_u8n(bld->gallivm, i16_type, ah, bh);
814
815 ab = lp_build_pack2(bld->gallivm, i16_type, type, abl, abh);
816
817 return ab;
818 }
819
820 if(type.width == 16) {
821 struct lp_type i32_type = lp_wider_type(type);
822 LLVMValueRef al, ah, bl, bh, abl, abh, ab;
823
824 lp_build_unpack2(bld->gallivm, type, i32_type, a, &al, &ah);
825 lp_build_unpack2(bld->gallivm, type, i32_type, b, &bl, &bh);
826
827 /* PMULLW, PSRLW, PADDW */
828 abl = lp_build_mul_u16n(bld->gallivm, i32_type, al, bl);
829 abh = lp_build_mul_u16n(bld->gallivm, i32_type, ah, bh);
830
831 ab = lp_build_pack2(bld->gallivm, i32_type, type, abl, abh);
832
833 return ab;
834 }
835
836 /* FIXME */
837 assert(0);
838 }
839
840 if(type.fixed)
841 shift = lp_build_const_int_vec(bld->gallivm, type, type.width/2);
842 else
843 shift = NULL;
844
845 if(LLVMIsConstant(a) && LLVMIsConstant(b)) {
846 if (type.floating)
847 res = LLVMConstFMul(a, b);
848 else
849 res = LLVMConstMul(a, b);
850 if(shift) {
851 if(type.sign)
852 res = LLVMConstAShr(res, shift);
853 else
854 res = LLVMConstLShr(res, shift);
855 }
856 }
857 else {
858 if (type.floating)
859 res = LLVMBuildFMul(builder, a, b, "");
860 else
861 res = LLVMBuildMul(builder, a, b, "");
862 if(shift) {
863 if(type.sign)
864 res = LLVMBuildAShr(builder, res, shift, "");
865 else
866 res = LLVMBuildLShr(builder, res, shift, "");
867 }
868 }
869
870 return res;
871 }
872
873
874 /**
875 * Small vector x scale multiplication optimization.
876 */
877 LLVMValueRef
878 lp_build_mul_imm(struct lp_build_context *bld,
879 LLVMValueRef a,
880 int b)
881 {
882 LLVMBuilderRef builder = bld->gallivm->builder;
883 LLVMValueRef factor;
884
885 assert(lp_check_value(bld->type, a));
886
887 if(b == 0)
888 return bld->zero;
889
890 if(b == 1)
891 return a;
892
893 if(b == -1)
894 return lp_build_negate(bld, a);
895
896 if(b == 2 && bld->type.floating)
897 return lp_build_add(bld, a, a);
898
899 if(util_is_power_of_two(b)) {
900 unsigned shift = ffs(b) - 1;
901
902 if(bld->type.floating) {
903 #if 0
904 /*
905 * Power of two multiplication by directly manipulating the exponent.
906 *
907 * XXX: This might not be always faster, it will introduce a small error
908 * for multiplication by zero, and it will produce wrong results
909 * for Inf and NaN.
910 */
911 unsigned mantissa = lp_mantissa(bld->type);
912 factor = lp_build_const_int_vec(bld->gallivm, bld->type, (unsigned long long)shift << mantissa);
913 a = LLVMBuildBitCast(builder, a, lp_build_int_vec_type(bld->type), "");
914 a = LLVMBuildAdd(builder, a, factor, "");
915 a = LLVMBuildBitCast(builder, a, lp_build_vec_type(bld->gallivm, bld->type), "");
916 return a;
917 #endif
918 }
919 else {
920 factor = lp_build_const_vec(bld->gallivm, bld->type, shift);
921 return LLVMBuildShl(builder, a, factor, "");
922 }
923 }
924
925 factor = lp_build_const_vec(bld->gallivm, bld->type, (double)b);
926 return lp_build_mul(bld, a, factor);
927 }
928
929
930 /**
931 * Generate a / b
932 */
933 LLVMValueRef
934 lp_build_div(struct lp_build_context *bld,
935 LLVMValueRef a,
936 LLVMValueRef b)
937 {
938 LLVMBuilderRef builder = bld->gallivm->builder;
939 const struct lp_type type = bld->type;
940
941 assert(lp_check_value(type, a));
942 assert(lp_check_value(type, b));
943
944 if(a == bld->zero)
945 return bld->zero;
946 if(a == bld->one)
947 return lp_build_rcp(bld, b);
948 if(b == bld->zero)
949 return bld->undef;
950 if(b == bld->one)
951 return a;
952 if(a == bld->undef || b == bld->undef)
953 return bld->undef;
954
955 if(LLVMIsConstant(a) && LLVMIsConstant(b)) {
956 if (type.floating)
957 return LLVMConstFDiv(a, b);
958 else if (type.sign)
959 return LLVMConstSDiv(a, b);
960 else
961 return LLVMConstUDiv(a, b);
962 }
963
964 if(((util_cpu_caps.has_sse && type.width == 32 && type.length == 4) ||
965 (util_cpu_caps.has_avx && type.width == 32 && type.length == 8)) &&
966 type.floating)
967 return lp_build_mul(bld, a, lp_build_rcp(bld, b));
968
969 if (type.floating)
970 return LLVMBuildFDiv(builder, a, b, "");
971 else if (type.sign)
972 return LLVMBuildSDiv(builder, a, b, "");
973 else
974 return LLVMBuildUDiv(builder, a, b, "");
975 }
976
977
978 /**
979 * Linear interpolation -- without any checks.
980 *
981 * @sa http://www.stereopsis.com/doubleblend.html
982 */
983 static INLINE LLVMValueRef
984 lp_build_lerp_simple(struct lp_build_context *bld,
985 LLVMValueRef x,
986 LLVMValueRef v0,
987 LLVMValueRef v1)
988 {
989 LLVMBuilderRef builder = bld->gallivm->builder;
990 LLVMValueRef delta;
991 LLVMValueRef res;
992
993 assert(lp_check_value(bld->type, x));
994 assert(lp_check_value(bld->type, v0));
995 assert(lp_check_value(bld->type, v1));
996
997 delta = lp_build_sub(bld, v1, v0);
998
999 res = lp_build_mul(bld, x, delta);
1000
1001 res = lp_build_add(bld, v0, res);
1002
1003 if (bld->type.fixed) {
1004 /* XXX: This step is necessary for lerping 8bit colors stored on 16bits,
1005 * but it will be wrong for other uses. Basically we need a more
1006 * powerful lp_type, capable of further distinguishing the values
1007 * interpretation from the value storage. */
1008 res = LLVMBuildAnd(builder, res, lp_build_const_int_vec(bld->gallivm, bld->type, (1 << bld->type.width/2) - 1), "");
1009 }
1010
1011 return res;
1012 }
1013
1014
1015 /**
1016 * Linear interpolation.
1017 */
1018 LLVMValueRef
1019 lp_build_lerp(struct lp_build_context *bld,
1020 LLVMValueRef x,
1021 LLVMValueRef v0,
1022 LLVMValueRef v1)
1023 {
1024 LLVMBuilderRef builder = bld->gallivm->builder;
1025 const struct lp_type type = bld->type;
1026 LLVMValueRef res;
1027
1028 assert(lp_check_value(type, x));
1029 assert(lp_check_value(type, v0));
1030 assert(lp_check_value(type, v1));
1031
1032 if (type.norm) {
1033 struct lp_type wide_type;
1034 struct lp_build_context wide_bld;
1035 LLVMValueRef xl, xh, v0l, v0h, v1l, v1h, resl, resh;
1036 LLVMValueRef shift;
1037
1038 assert(type.length >= 2);
1039 assert(!type.sign);
1040
1041 /*
1042 * Create a wider type, enough to hold the intermediate result of the
1043 * multiplication.
1044 */
1045 memset(&wide_type, 0, sizeof wide_type);
1046 wide_type.fixed = TRUE;
1047 wide_type.width = type.width*2;
1048 wide_type.length = type.length/2;
1049
1050 lp_build_context_init(&wide_bld, bld->gallivm, wide_type);
1051
1052 lp_build_unpack2(bld->gallivm, type, wide_type, x, &xl, &xh);
1053 lp_build_unpack2(bld->gallivm, type, wide_type, v0, &v0l, &v0h);
1054 lp_build_unpack2(bld->gallivm, type, wide_type, v1, &v1l, &v1h);
1055
1056 /*
1057 * Scale x from [0, 255] to [0, 256]
1058 */
1059
1060 shift = lp_build_const_int_vec(bld->gallivm, wide_type, type.width - 1);
1061
1062 xl = lp_build_add(&wide_bld, xl,
1063 LLVMBuildAShr(builder, xl, shift, ""));
1064 xh = lp_build_add(&wide_bld, xh,
1065 LLVMBuildAShr(builder, xh, shift, ""));
1066
1067 /*
1068 * Lerp both halves.
1069 */
1070
1071 resl = lp_build_lerp_simple(&wide_bld, xl, v0l, v1l);
1072 resh = lp_build_lerp_simple(&wide_bld, xh, v0h, v1h);
1073
1074 res = lp_build_pack2(bld->gallivm, wide_type, type, resl, resh);
1075 } else {
1076 res = lp_build_lerp_simple(bld, x, v0, v1);
1077 }
1078
1079 return res;
1080 }
1081
1082
1083 LLVMValueRef
1084 lp_build_lerp_2d(struct lp_build_context *bld,
1085 LLVMValueRef x,
1086 LLVMValueRef y,
1087 LLVMValueRef v00,
1088 LLVMValueRef v01,
1089 LLVMValueRef v10,
1090 LLVMValueRef v11)
1091 {
1092 LLVMValueRef v0 = lp_build_lerp(bld, x, v00, v01);
1093 LLVMValueRef v1 = lp_build_lerp(bld, x, v10, v11);
1094 return lp_build_lerp(bld, y, v0, v1);
1095 }
1096
1097
1098 /**
1099 * Generate min(a, b)
1100 * Do checks for special cases.
1101 */
1102 LLVMValueRef
1103 lp_build_min(struct lp_build_context *bld,
1104 LLVMValueRef a,
1105 LLVMValueRef b)
1106 {
1107 assert(lp_check_value(bld->type, a));
1108 assert(lp_check_value(bld->type, b));
1109
1110 if(a == bld->undef || b == bld->undef)
1111 return bld->undef;
1112
1113 if(a == b)
1114 return a;
1115
1116 if (bld->type.norm) {
1117 if (!bld->type.sign) {
1118 if (a == bld->zero || b == bld->zero) {
1119 return bld->zero;
1120 }
1121 }
1122 if(a == bld->one)
1123 return b;
1124 if(b == bld->one)
1125 return a;
1126 }
1127
1128 return lp_build_min_simple(bld, a, b);
1129 }
1130
1131
1132 /**
1133 * Generate max(a, b)
1134 * Do checks for special cases.
1135 */
1136 LLVMValueRef
1137 lp_build_max(struct lp_build_context *bld,
1138 LLVMValueRef a,
1139 LLVMValueRef b)
1140 {
1141 assert(lp_check_value(bld->type, a));
1142 assert(lp_check_value(bld->type, b));
1143
1144 if(a == bld->undef || b == bld->undef)
1145 return bld->undef;
1146
1147 if(a == b)
1148 return a;
1149
1150 if(bld->type.norm) {
1151 if(a == bld->one || b == bld->one)
1152 return bld->one;
1153 if (!bld->type.sign) {
1154 if (a == bld->zero) {
1155 return b;
1156 }
1157 if (b == bld->zero) {
1158 return a;
1159 }
1160 }
1161 }
1162
1163 return lp_build_max_simple(bld, a, b);
1164 }
1165
1166
1167 /**
1168 * Generate clamp(a, min, max)
1169 * Do checks for special cases.
1170 */
1171 LLVMValueRef
1172 lp_build_clamp(struct lp_build_context *bld,
1173 LLVMValueRef a,
1174 LLVMValueRef min,
1175 LLVMValueRef max)
1176 {
1177 assert(lp_check_value(bld->type, a));
1178 assert(lp_check_value(bld->type, min));
1179 assert(lp_check_value(bld->type, max));
1180
1181 a = lp_build_min(bld, a, max);
1182 a = lp_build_max(bld, a, min);
1183 return a;
1184 }
1185
1186
1187 /**
1188 * Generate abs(a)
1189 */
1190 LLVMValueRef
1191 lp_build_abs(struct lp_build_context *bld,
1192 LLVMValueRef a)
1193 {
1194 LLVMBuilderRef builder = bld->gallivm->builder;
1195 const struct lp_type type = bld->type;
1196 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1197
1198 assert(lp_check_value(type, a));
1199
1200 if(!type.sign)
1201 return a;
1202
1203 if(type.floating) {
1204 /* Mask out the sign bit */
1205 LLVMTypeRef int_vec_type = lp_build_int_vec_type(bld->gallivm, type);
1206 unsigned long long absMask = ~(1ULL << (type.width - 1));
1207 LLVMValueRef mask = lp_build_const_int_vec(bld->gallivm, type, ((unsigned long long) absMask));
1208 a = LLVMBuildBitCast(builder, a, int_vec_type, "");
1209 a = LLVMBuildAnd(builder, a, mask, "");
1210 a = LLVMBuildBitCast(builder, a, vec_type, "");
1211 return a;
1212 }
1213
1214 if(type.width*type.length == 128 && util_cpu_caps.has_ssse3) {
1215 switch(type.width) {
1216 case 8:
1217 return lp_build_intrinsic_unary(builder, "llvm.x86.ssse3.pabs.b.128", vec_type, a);
1218 case 16:
1219 return lp_build_intrinsic_unary(builder, "llvm.x86.ssse3.pabs.w.128", vec_type, a);
1220 case 32:
1221 return lp_build_intrinsic_unary(builder, "llvm.x86.ssse3.pabs.d.128", vec_type, a);
1222 }
1223 }
1224 else if (type.width*type.length == 256 && util_cpu_caps.has_ssse3 &&
1225 (gallivm_debug & GALLIVM_DEBUG_PERF) &&
1226 (type.width == 8 || type.width == 16 || type.width == 32)) {
1227 debug_printf("%s: inefficient code, should split vectors manually\n",
1228 __FUNCTION__);
1229 }
1230
1231 return lp_build_max(bld, a, LLVMBuildNeg(builder, a, ""));
1232 }
1233
1234
1235 LLVMValueRef
1236 lp_build_negate(struct lp_build_context *bld,
1237 LLVMValueRef a)
1238 {
1239 LLVMBuilderRef builder = bld->gallivm->builder;
1240
1241 assert(lp_check_value(bld->type, a));
1242
1243 #if HAVE_LLVM >= 0x0207
1244 if (bld->type.floating)
1245 a = LLVMBuildFNeg(builder, a, "");
1246 else
1247 #endif
1248 a = LLVMBuildNeg(builder, a, "");
1249
1250 return a;
1251 }
1252
1253
1254 /** Return -1, 0 or +1 depending on the sign of a */
1255 LLVMValueRef
1256 lp_build_sgn(struct lp_build_context *bld,
1257 LLVMValueRef a)
1258 {
1259 LLVMBuilderRef builder = bld->gallivm->builder;
1260 const struct lp_type type = bld->type;
1261 LLVMValueRef cond;
1262 LLVMValueRef res;
1263
1264 assert(lp_check_value(type, a));
1265
1266 /* Handle non-zero case */
1267 if(!type.sign) {
1268 /* if not zero then sign must be positive */
1269 res = bld->one;
1270 }
1271 else if(type.floating) {
1272 LLVMTypeRef vec_type;
1273 LLVMTypeRef int_type;
1274 LLVMValueRef mask;
1275 LLVMValueRef sign;
1276 LLVMValueRef one;
1277 unsigned long long maskBit = (unsigned long long)1 << (type.width - 1);
1278
1279 int_type = lp_build_int_vec_type(bld->gallivm, type);
1280 vec_type = lp_build_vec_type(bld->gallivm, type);
1281 mask = lp_build_const_int_vec(bld->gallivm, type, maskBit);
1282
1283 /* Take the sign bit and add it to 1 constant */
1284 sign = LLVMBuildBitCast(builder, a, int_type, "");
1285 sign = LLVMBuildAnd(builder, sign, mask, "");
1286 one = LLVMConstBitCast(bld->one, int_type);
1287 res = LLVMBuildOr(builder, sign, one, "");
1288 res = LLVMBuildBitCast(builder, res, vec_type, "");
1289 }
1290 else
1291 {
1292 /* signed int/norm/fixed point */
1293 /* could use psign with sse3 and appropriate vectors here */
1294 LLVMValueRef minus_one = lp_build_const_vec(bld->gallivm, type, -1.0);
1295 cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, bld->zero);
1296 res = lp_build_select(bld, cond, bld->one, minus_one);
1297 }
1298
1299 /* Handle zero */
1300 cond = lp_build_cmp(bld, PIPE_FUNC_EQUAL, a, bld->zero);
1301 res = lp_build_select(bld, cond, bld->zero, res);
1302
1303 return res;
1304 }
1305
1306
1307 /**
1308 * Set the sign of float vector 'a' according to 'sign'.
1309 * If sign==0, return abs(a).
1310 * If sign==1, return -abs(a);
1311 * Other values for sign produce undefined results.
1312 */
1313 LLVMValueRef
1314 lp_build_set_sign(struct lp_build_context *bld,
1315 LLVMValueRef a, LLVMValueRef sign)
1316 {
1317 LLVMBuilderRef builder = bld->gallivm->builder;
1318 const struct lp_type type = bld->type;
1319 LLVMTypeRef int_vec_type = lp_build_int_vec_type(bld->gallivm, type);
1320 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1321 LLVMValueRef shift = lp_build_const_int_vec(bld->gallivm, type, type.width - 1);
1322 LLVMValueRef mask = lp_build_const_int_vec(bld->gallivm, type,
1323 ~((unsigned long long) 1 << (type.width - 1)));
1324 LLVMValueRef val, res;
1325
1326 assert(type.floating);
1327 assert(lp_check_value(type, a));
1328
1329 /* val = reinterpret_cast<int>(a) */
1330 val = LLVMBuildBitCast(builder, a, int_vec_type, "");
1331 /* val = val & mask */
1332 val = LLVMBuildAnd(builder, val, mask, "");
1333 /* sign = sign << shift */
1334 sign = LLVMBuildShl(builder, sign, shift, "");
1335 /* res = val | sign */
1336 res = LLVMBuildOr(builder, val, sign, "");
1337 /* res = reinterpret_cast<float>(res) */
1338 res = LLVMBuildBitCast(builder, res, vec_type, "");
1339
1340 return res;
1341 }
1342
1343
1344 /**
1345 * Convert vector of (or scalar) int to vector of (or scalar) float.
1346 */
1347 LLVMValueRef
1348 lp_build_int_to_float(struct lp_build_context *bld,
1349 LLVMValueRef a)
1350 {
1351 LLVMBuilderRef builder = bld->gallivm->builder;
1352 const struct lp_type type = bld->type;
1353 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1354
1355 assert(type.floating);
1356
1357 return LLVMBuildSIToFP(builder, a, vec_type, "");
1358 }
1359
1360 static boolean
1361 sse41_rounding_available(const struct lp_type type)
1362 {
1363 if ((util_cpu_caps.has_sse4_1 &&
1364 (type.length == 1 || type.width*type.length == 128)) ||
1365 (util_cpu_caps.has_avx && type.width*type.length == 256))
1366 return TRUE;
1367
1368 return FALSE;
1369 }
1370
1371 enum lp_build_round_sse41_mode
1372 {
1373 LP_BUILD_ROUND_SSE41_NEAREST = 0,
1374 LP_BUILD_ROUND_SSE41_FLOOR = 1,
1375 LP_BUILD_ROUND_SSE41_CEIL = 2,
1376 LP_BUILD_ROUND_SSE41_TRUNCATE = 3
1377 };
1378
1379
1380 /**
1381 * Helper for SSE4.1's ROUNDxx instructions.
1382 *
1383 * NOTE: In the SSE4.1's nearest mode, if two values are equally close, the
1384 * result is the even value. That is, rounding 2.5 will be 2.0, and not 3.0.
1385 */
1386 static INLINE LLVMValueRef
1387 lp_build_round_sse41(struct lp_build_context *bld,
1388 LLVMValueRef a,
1389 enum lp_build_round_sse41_mode mode)
1390 {
1391 LLVMBuilderRef builder = bld->gallivm->builder;
1392 const struct lp_type type = bld->type;
1393 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
1394 const char *intrinsic;
1395 LLVMValueRef res;
1396
1397 assert(type.floating);
1398
1399 assert(lp_check_value(type, a));
1400 assert(util_cpu_caps.has_sse4_1);
1401
1402 if (type.length == 1) {
1403 LLVMTypeRef vec_type;
1404 LLVMValueRef undef;
1405 LLVMValueRef args[3];
1406 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
1407
1408 switch(type.width) {
1409 case 32:
1410 intrinsic = "llvm.x86.sse41.round.ss";
1411 break;
1412 case 64:
1413 intrinsic = "llvm.x86.sse41.round.sd";
1414 break;
1415 default:
1416 assert(0);
1417 return bld->undef;
1418 }
1419
1420 vec_type = LLVMVectorType(bld->elem_type, 4);
1421
1422 undef = LLVMGetUndef(vec_type);
1423
1424 args[0] = undef;
1425 args[1] = LLVMBuildInsertElement(builder, undef, a, index0, "");
1426 args[2] = LLVMConstInt(i32t, mode, 0);
1427
1428 res = lp_build_intrinsic(builder, intrinsic,
1429 vec_type, args, Elements(args));
1430
1431 res = LLVMBuildExtractElement(builder, res, index0, "");
1432 }
1433 else {
1434 if (type.width * type.length == 128) {
1435 switch(type.width) {
1436 case 32:
1437 intrinsic = "llvm.x86.sse41.round.ps";
1438 break;
1439 case 64:
1440 intrinsic = "llvm.x86.sse41.round.pd";
1441 break;
1442 default:
1443 assert(0);
1444 return bld->undef;
1445 }
1446 }
1447 else {
1448 assert(type.width * type.length == 256);
1449 assert(util_cpu_caps.has_avx);
1450
1451 switch(type.width) {
1452 case 32:
1453 intrinsic = "llvm.x86.avx.round.ps.256";
1454 break;
1455 case 64:
1456 intrinsic = "llvm.x86.avx.round.pd.256";
1457 break;
1458 default:
1459 assert(0);
1460 return bld->undef;
1461 }
1462 }
1463
1464 res = lp_build_intrinsic_binary(builder, intrinsic,
1465 bld->vec_type, a,
1466 LLVMConstInt(i32t, mode, 0));
1467 }
1468
1469 return res;
1470 }
1471
1472
1473 static INLINE LLVMValueRef
1474 lp_build_iround_nearest_sse2(struct lp_build_context *bld,
1475 LLVMValueRef a)
1476 {
1477 LLVMBuilderRef builder = bld->gallivm->builder;
1478 const struct lp_type type = bld->type;
1479 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
1480 LLVMTypeRef ret_type = lp_build_int_vec_type(bld->gallivm, type);
1481 const char *intrinsic;
1482 LLVMValueRef res;
1483
1484 assert(type.floating);
1485 /* using the double precision conversions is a bit more complicated */
1486 assert(type.width == 32);
1487
1488 assert(lp_check_value(type, a));
1489 assert(util_cpu_caps.has_sse2);
1490
1491 /* This is relying on MXCSR rounding mode, which should always be nearest. */
1492 if (type.length == 1) {
1493 LLVMTypeRef vec_type;
1494 LLVMValueRef undef;
1495 LLVMValueRef arg;
1496 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
1497
1498 vec_type = LLVMVectorType(bld->elem_type, 4);
1499
1500 intrinsic = "llvm.x86.sse.cvtss2si";
1501
1502 undef = LLVMGetUndef(vec_type);
1503
1504 arg = LLVMBuildInsertElement(builder, undef, a, index0, "");
1505
1506 res = lp_build_intrinsic_unary(builder, intrinsic,
1507 ret_type, arg);
1508 }
1509 else {
1510 if (type.width* type.length == 128) {
1511 intrinsic = "llvm.x86.sse2.cvtps2dq";
1512 }
1513 else {
1514 assert(type.width*type.length == 256);
1515 assert(util_cpu_caps.has_avx);
1516
1517 intrinsic = "llvm.x86.avx.cvt.ps2dq.256";
1518 }
1519 res = lp_build_intrinsic_unary(builder, intrinsic,
1520 ret_type, a);
1521 }
1522
1523 return res;
1524 }
1525
1526
1527 /**
1528 * Return the integer part of a float (vector) value (== round toward zero).
1529 * The returned value is a float (vector).
1530 * Ex: trunc(-1.5) = -1.0
1531 */
1532 LLVMValueRef
1533 lp_build_trunc(struct lp_build_context *bld,
1534 LLVMValueRef a)
1535 {
1536 LLVMBuilderRef builder = bld->gallivm->builder;
1537 const struct lp_type type = bld->type;
1538
1539 assert(type.floating);
1540 assert(lp_check_value(type, a));
1541
1542 if (sse41_rounding_available(type)) {
1543 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_TRUNCATE);
1544 }
1545 else {
1546 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1547 LLVMTypeRef int_vec_type = lp_build_int_vec_type(bld->gallivm, type);
1548 LLVMValueRef res;
1549 res = LLVMBuildFPToSI(builder, a, int_vec_type, "");
1550 res = LLVMBuildSIToFP(builder, res, vec_type, "");
1551 return res;
1552 }
1553 }
1554
1555
1556 /**
1557 * Return float (vector) rounded to nearest integer (vector). The returned
1558 * value is a float (vector).
1559 * Ex: round(0.9) = 1.0
1560 * Ex: round(-1.5) = -2.0
1561 */
1562 LLVMValueRef
1563 lp_build_round(struct lp_build_context *bld,
1564 LLVMValueRef a)
1565 {
1566 LLVMBuilderRef builder = bld->gallivm->builder;
1567 const struct lp_type type = bld->type;
1568
1569 assert(type.floating);
1570 assert(lp_check_value(type, a));
1571
1572 if (sse41_rounding_available(type)) {
1573 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
1574 }
1575 else {
1576 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1577 LLVMValueRef res;
1578 res = lp_build_iround(bld, a);
1579 res = LLVMBuildSIToFP(builder, res, vec_type, "");
1580 return res;
1581 }
1582 }
1583
1584
1585 /**
1586 * Return floor of float (vector), result is a float (vector)
1587 * Ex: floor(1.1) = 1.0
1588 * Ex: floor(-1.1) = -2.0
1589 */
1590 LLVMValueRef
1591 lp_build_floor(struct lp_build_context *bld,
1592 LLVMValueRef a)
1593 {
1594 LLVMBuilderRef builder = bld->gallivm->builder;
1595 const struct lp_type type = bld->type;
1596
1597 assert(type.floating);
1598 assert(lp_check_value(type, a));
1599
1600 if (sse41_rounding_available(type)) {
1601 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
1602 }
1603 else {
1604 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1605 LLVMValueRef res;
1606 res = lp_build_ifloor(bld, a);
1607 res = LLVMBuildSIToFP(builder, res, vec_type, "");
1608 return res;
1609 }
1610 }
1611
1612
1613 /**
1614 * Return ceiling of float (vector), returning float (vector).
1615 * Ex: ceil( 1.1) = 2.0
1616 * Ex: ceil(-1.1) = -1.0
1617 */
1618 LLVMValueRef
1619 lp_build_ceil(struct lp_build_context *bld,
1620 LLVMValueRef a)
1621 {
1622 LLVMBuilderRef builder = bld->gallivm->builder;
1623 const struct lp_type type = bld->type;
1624
1625 assert(type.floating);
1626 assert(lp_check_value(type, a));
1627
1628 if (sse41_rounding_available(type)) {
1629 return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_CEIL);
1630 }
1631 else {
1632 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1633 LLVMValueRef res;
1634 res = lp_build_iceil(bld, a);
1635 res = LLVMBuildSIToFP(builder, res, vec_type, "");
1636 return res;
1637 }
1638 }
1639
1640
1641 /**
1642 * Return fractional part of 'a' computed as a - floor(a)
1643 * Typically used in texture coord arithmetic.
1644 */
1645 LLVMValueRef
1646 lp_build_fract(struct lp_build_context *bld,
1647 LLVMValueRef a)
1648 {
1649 assert(bld->type.floating);
1650 return lp_build_sub(bld, a, lp_build_floor(bld, a));
1651 }
1652
1653
1654 /**
1655 * Prevent returning a fractional part of 1.0 for very small negative values of
1656 * 'a' by clamping against 0.99999(9).
1657 */
1658 static inline LLVMValueRef
1659 clamp_fract(struct lp_build_context *bld, LLVMValueRef fract)
1660 {
1661 LLVMValueRef max;
1662
1663 /* this is the largest number smaller than 1.0 representable as float */
1664 max = lp_build_const_vec(bld->gallivm, bld->type,
1665 1.0 - 1.0/(1LL << (lp_mantissa(bld->type) + 1)));
1666 return lp_build_min(bld, fract, max);
1667 }
1668
1669
1670 /**
1671 * Same as lp_build_fract, but guarantees that the result is always smaller
1672 * than one.
1673 */
1674 LLVMValueRef
1675 lp_build_fract_safe(struct lp_build_context *bld,
1676 LLVMValueRef a)
1677 {
1678 return clamp_fract(bld, lp_build_fract(bld, a));
1679 }
1680
1681
1682 /**
1683 * Return the integer part of a float (vector) value (== round toward zero).
1684 * The returned value is an integer (vector).
1685 * Ex: itrunc(-1.5) = -1
1686 */
1687 LLVMValueRef
1688 lp_build_itrunc(struct lp_build_context *bld,
1689 LLVMValueRef a)
1690 {
1691 LLVMBuilderRef builder = bld->gallivm->builder;
1692 const struct lp_type type = bld->type;
1693 LLVMTypeRef int_vec_type = lp_build_int_vec_type(bld->gallivm, type);
1694
1695 assert(type.floating);
1696 assert(lp_check_value(type, a));
1697
1698 return LLVMBuildFPToSI(builder, a, int_vec_type, "");
1699 }
1700
1701
1702 /**
1703 * Return float (vector) rounded to nearest integer (vector). The returned
1704 * value is an integer (vector).
1705 * Ex: iround(0.9) = 1
1706 * Ex: iround(-1.5) = -2
1707 */
1708 LLVMValueRef
1709 lp_build_iround(struct lp_build_context *bld,
1710 LLVMValueRef a)
1711 {
1712 LLVMBuilderRef builder = bld->gallivm->builder;
1713 const struct lp_type type = bld->type;
1714 LLVMTypeRef int_vec_type = bld->int_vec_type;
1715 LLVMValueRef res;
1716
1717 assert(type.floating);
1718
1719 assert(lp_check_value(type, a));
1720
1721 if ((util_cpu_caps.has_sse2 &&
1722 ((type.width == 32) && (type.length == 1 || type.length == 4))) ||
1723 (util_cpu_caps.has_avx && type.width == 32 && type.length == 8)) {
1724 return lp_build_iround_nearest_sse2(bld, a);
1725 }
1726 if (sse41_rounding_available(type)) {
1727 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
1728 }
1729 else {
1730 LLVMValueRef half;
1731
1732 half = lp_build_const_vec(bld->gallivm, type, 0.5);
1733
1734 if (type.sign) {
1735 LLVMTypeRef vec_type = bld->vec_type;
1736 LLVMValueRef mask = lp_build_const_int_vec(bld->gallivm, type,
1737 (unsigned long long)1 << (type.width - 1));
1738 LLVMValueRef sign;
1739
1740 /* get sign bit */
1741 sign = LLVMBuildBitCast(builder, a, int_vec_type, "");
1742 sign = LLVMBuildAnd(builder, sign, mask, "");
1743
1744 /* sign * 0.5 */
1745 half = LLVMBuildBitCast(builder, half, int_vec_type, "");
1746 half = LLVMBuildOr(builder, sign, half, "");
1747 half = LLVMBuildBitCast(builder, half, vec_type, "");
1748 }
1749
1750 res = LLVMBuildFAdd(builder, a, half, "");
1751 }
1752
1753 res = LLVMBuildFPToSI(builder, res, int_vec_type, "");
1754
1755 return res;
1756 }
1757
1758
1759 /**
1760 * Return floor of float (vector), result is an int (vector)
1761 * Ex: ifloor(1.1) = 1.0
1762 * Ex: ifloor(-1.1) = -2.0
1763 */
1764 LLVMValueRef
1765 lp_build_ifloor(struct lp_build_context *bld,
1766 LLVMValueRef a)
1767 {
1768 LLVMBuilderRef builder = bld->gallivm->builder;
1769 const struct lp_type type = bld->type;
1770 LLVMTypeRef int_vec_type = bld->int_vec_type;
1771 LLVMValueRef res;
1772
1773 assert(type.floating);
1774 assert(lp_check_value(type, a));
1775
1776 res = a;
1777 if (type.sign) {
1778 if (sse41_rounding_available(type)) {
1779 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
1780 }
1781 else {
1782 /* Take the sign bit and add it to 1 constant */
1783 LLVMTypeRef vec_type = bld->vec_type;
1784 unsigned mantissa = lp_mantissa(type);
1785 LLVMValueRef mask = lp_build_const_int_vec(bld->gallivm, type,
1786 (unsigned long long)1 << (type.width - 1));
1787 LLVMValueRef sign;
1788 LLVMValueRef offset;
1789
1790 /* sign = a < 0 ? ~0 : 0 */
1791 sign = LLVMBuildBitCast(builder, a, int_vec_type, "");
1792 sign = LLVMBuildAnd(builder, sign, mask, "");
1793 sign = LLVMBuildAShr(builder, sign,
1794 lp_build_const_int_vec(bld->gallivm, type,
1795 type.width - 1),
1796 "ifloor.sign");
1797
1798 /* offset = -0.99999(9)f */
1799 offset = lp_build_const_vec(bld->gallivm, type,
1800 -(double)(((unsigned long long)1 << mantissa) - 10)/((unsigned long long)1 << mantissa));
1801 offset = LLVMConstBitCast(offset, int_vec_type);
1802
1803 /* offset = a < 0 ? offset : 0.0f */
1804 offset = LLVMBuildAnd(builder, offset, sign, "");
1805 offset = LLVMBuildBitCast(builder, offset, vec_type, "ifloor.offset");
1806
1807 res = LLVMBuildFAdd(builder, res, offset, "ifloor.res");
1808 }
1809 }
1810
1811 /* round to nearest (toward zero) */
1812 res = LLVMBuildFPToSI(builder, res, int_vec_type, "ifloor.res");
1813
1814 return res;
1815 }
1816
1817
1818 /**
1819 * Return ceiling of float (vector), returning int (vector).
1820 * Ex: iceil( 1.1) = 2
1821 * Ex: iceil(-1.1) = -1
1822 */
1823 LLVMValueRef
1824 lp_build_iceil(struct lp_build_context *bld,
1825 LLVMValueRef a)
1826 {
1827 LLVMBuilderRef builder = bld->gallivm->builder;
1828 const struct lp_type type = bld->type;
1829 LLVMTypeRef int_vec_type = bld->int_vec_type;
1830 LLVMValueRef res;
1831
1832 assert(type.floating);
1833 assert(lp_check_value(type, a));
1834
1835 if (sse41_rounding_available(type)) {
1836 res = lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_CEIL);
1837 }
1838 else {
1839 LLVMTypeRef vec_type = bld->vec_type;
1840 unsigned mantissa = lp_mantissa(type);
1841 LLVMValueRef offset;
1842
1843 /* offset = 0.99999(9)f */
1844 offset = lp_build_const_vec(bld->gallivm, type,
1845 (double)(((unsigned long long)1 << mantissa) - 10)/((unsigned long long)1 << mantissa));
1846
1847 if (type.sign) {
1848 LLVMValueRef mask = lp_build_const_int_vec(bld->gallivm, type,
1849 (unsigned long long)1 << (type.width - 1));
1850 LLVMValueRef sign;
1851
1852 /* sign = a < 0 ? 0 : ~0 */
1853 sign = LLVMBuildBitCast(builder, a, int_vec_type, "");
1854 sign = LLVMBuildAnd(builder, sign, mask, "");
1855 sign = LLVMBuildAShr(builder, sign,
1856 lp_build_const_int_vec(bld->gallivm, type,
1857 type.width - 1),
1858 "iceil.sign");
1859 sign = LLVMBuildNot(builder, sign, "iceil.not");
1860
1861 /* offset = a < 0 ? 0.0 : offset */
1862 offset = LLVMConstBitCast(offset, int_vec_type);
1863 offset = LLVMBuildAnd(builder, offset, sign, "");
1864 offset = LLVMBuildBitCast(builder, offset, vec_type, "iceil.offset");
1865 }
1866
1867 res = LLVMBuildFAdd(builder, a, offset, "iceil.res");
1868 }
1869
1870 /* round to nearest (toward zero) */
1871 res = LLVMBuildFPToSI(builder, res, int_vec_type, "iceil.res");
1872
1873 return res;
1874 }
1875
1876
1877 /**
1878 * Combined ifloor() & fract().
1879 *
1880 * Preferred to calling the functions separately, as it will ensure that the
1881 * strategy (floor() vs ifloor()) that results in less redundant work is used.
1882 */
1883 void
1884 lp_build_ifloor_fract(struct lp_build_context *bld,
1885 LLVMValueRef a,
1886 LLVMValueRef *out_ipart,
1887 LLVMValueRef *out_fpart)
1888 {
1889 LLVMBuilderRef builder = bld->gallivm->builder;
1890 const struct lp_type type = bld->type;
1891 LLVMValueRef ipart;
1892
1893 assert(type.floating);
1894 assert(lp_check_value(type, a));
1895
1896 if (sse41_rounding_available(type)) {
1897 /*
1898 * floor() is easier.
1899 */
1900
1901 ipart = lp_build_floor(bld, a);
1902 *out_fpart = LLVMBuildFSub(builder, a, ipart, "fpart");
1903 *out_ipart = LLVMBuildFPToSI(builder, ipart, bld->int_vec_type, "ipart");
1904 }
1905 else {
1906 /*
1907 * ifloor() is easier.
1908 */
1909
1910 *out_ipart = lp_build_ifloor(bld, a);
1911 ipart = LLVMBuildSIToFP(builder, *out_ipart, bld->vec_type, "ipart");
1912 *out_fpart = LLVMBuildFSub(builder, a, ipart, "fpart");
1913 }
1914 }
1915
1916
1917 /**
1918 * Same as lp_build_ifloor_fract, but guarantees that the fractional part is
1919 * always smaller than one.
1920 */
1921 void
1922 lp_build_ifloor_fract_safe(struct lp_build_context *bld,
1923 LLVMValueRef a,
1924 LLVMValueRef *out_ipart,
1925 LLVMValueRef *out_fpart)
1926 {
1927 lp_build_ifloor_fract(bld, a, out_ipart, out_fpart);
1928 *out_fpart = clamp_fract(bld, *out_fpart);
1929 }
1930
1931
1932 LLVMValueRef
1933 lp_build_sqrt(struct lp_build_context *bld,
1934 LLVMValueRef a)
1935 {
1936 LLVMBuilderRef builder = bld->gallivm->builder;
1937 const struct lp_type type = bld->type;
1938 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
1939 char intrinsic[32];
1940
1941 assert(lp_check_value(type, a));
1942
1943 /* TODO: optimize the constant case */
1944
1945 assert(type.floating);
1946 if (type.length == 1) {
1947 util_snprintf(intrinsic, sizeof intrinsic, "llvm.sqrt.f%u", type.width);
1948 }
1949 else {
1950 util_snprintf(intrinsic, sizeof intrinsic, "llvm.sqrt.v%uf%u", type.length, type.width);
1951 }
1952
1953 return lp_build_intrinsic_unary(builder, intrinsic, vec_type, a);
1954 }
1955
1956
1957 /**
1958 * Do one Newton-Raphson step to improve reciprocate precision:
1959 *
1960 * x_{i+1} = x_i * (2 - a * x_i)
1961 *
1962 * XXX: Unfortunately this won't give IEEE-754 conformant results for 0 or
1963 * +/-Inf, giving NaN instead. Certain applications rely on this behavior,
1964 * such as Google Earth, which does RCP(RSQRT(0.0) when drawing the Earth's
1965 * halo. It would be necessary to clamp the argument to prevent this.
1966 *
1967 * See also:
1968 * - http://en.wikipedia.org/wiki/Division_(digital)#Newton.E2.80.93Raphson_division
1969 * - http://softwarecommunity.intel.com/articles/eng/1818.htm
1970 */
1971 static INLINE LLVMValueRef
1972 lp_build_rcp_refine(struct lp_build_context *bld,
1973 LLVMValueRef a,
1974 LLVMValueRef rcp_a)
1975 {
1976 LLVMBuilderRef builder = bld->gallivm->builder;
1977 LLVMValueRef two = lp_build_const_vec(bld->gallivm, bld->type, 2.0);
1978 LLVMValueRef res;
1979
1980 res = LLVMBuildFMul(builder, a, rcp_a, "");
1981 res = LLVMBuildFSub(builder, two, res, "");
1982 res = LLVMBuildFMul(builder, rcp_a, res, "");
1983
1984 return res;
1985 }
1986
1987
1988 LLVMValueRef
1989 lp_build_rcp(struct lp_build_context *bld,
1990 LLVMValueRef a)
1991 {
1992 LLVMBuilderRef builder = bld->gallivm->builder;
1993 const struct lp_type type = bld->type;
1994
1995 assert(lp_check_value(type, a));
1996
1997 if(a == bld->zero)
1998 return bld->undef;
1999 if(a == bld->one)
2000 return bld->one;
2001 if(a == bld->undef)
2002 return bld->undef;
2003
2004 assert(type.floating);
2005
2006 if(LLVMIsConstant(a))
2007 return LLVMConstFDiv(bld->one, a);
2008
2009 /*
2010 * We don't use RCPPS because:
2011 * - it only has 10bits of precision
2012 * - it doesn't even get the reciprocate of 1.0 exactly
2013 * - doing Newton-Rapshon steps yields wrong (NaN) values for 0.0 or Inf
2014 * - for recent processors the benefit over DIVPS is marginal, a case
2015 * dependent
2016 *
2017 * We could still use it on certain processors if benchmarks show that the
2018 * RCPPS plus necessary workarounds are still preferrable to DIVPS; or for
2019 * particular uses that require less workarounds.
2020 */
2021
2022 if (FALSE && ((util_cpu_caps.has_sse && type.width == 32 && type.length == 4) ||
2023 (util_cpu_caps.has_avx && type.width == 32 && type.length == 8))){
2024 const unsigned num_iterations = 0;
2025 LLVMValueRef res;
2026 unsigned i;
2027 const char *intrinsic = NULL;
2028
2029 if (type.length == 4) {
2030 intrinsic = "llvm.x86.sse.rcp.ps";
2031 }
2032 else {
2033 intrinsic = "llvm.x86.avx.rcp.ps.256";
2034 }
2035
2036 res = lp_build_intrinsic_unary(builder, intrinsic, bld->vec_type, a);
2037
2038 for (i = 0; i < num_iterations; ++i) {
2039 res = lp_build_rcp_refine(bld, a, res);
2040 }
2041
2042 return res;
2043 }
2044
2045 return LLVMBuildFDiv(builder, bld->one, a, "");
2046 }
2047
2048
2049 /**
2050 * Do one Newton-Raphson step to improve rsqrt precision:
2051 *
2052 * x_{i+1} = 0.5 * x_i * (3.0 - a * x_i * x_i)
2053 *
2054 * See also Intel 64 and IA-32 Architectures Optimization Manual.
2055 */
2056 static INLINE LLVMValueRef
2057 lp_build_rsqrt_refine(struct lp_build_context *bld,
2058 LLVMValueRef a,
2059 LLVMValueRef rsqrt_a)
2060 {
2061 LLVMBuilderRef builder = bld->gallivm->builder;
2062 LLVMValueRef half = lp_build_const_vec(bld->gallivm, bld->type, 0.5);
2063 LLVMValueRef three = lp_build_const_vec(bld->gallivm, bld->type, 3.0);
2064 LLVMValueRef res;
2065
2066 res = LLVMBuildFMul(builder, rsqrt_a, rsqrt_a, "");
2067 res = LLVMBuildFMul(builder, a, res, "");
2068 res = LLVMBuildFSub(builder, three, res, "");
2069 res = LLVMBuildFMul(builder, rsqrt_a, res, "");
2070 res = LLVMBuildFMul(builder, half, res, "");
2071
2072 return res;
2073 }
2074
2075
2076 /**
2077 * Generate 1/sqrt(a).
2078 * Result is undefined for values < 0, infinity for +0.
2079 */
2080 LLVMValueRef
2081 lp_build_rsqrt(struct lp_build_context *bld,
2082 LLVMValueRef a)
2083 {
2084 LLVMBuilderRef builder = bld->gallivm->builder;
2085 const struct lp_type type = bld->type;
2086
2087 assert(lp_check_value(type, a));
2088
2089 assert(type.floating);
2090
2091 /*
2092 * This should be faster but all denormals will end up as infinity.
2093 */
2094 if (0 && ((util_cpu_caps.has_sse && type.width == 32 && type.length == 4) ||
2095 (util_cpu_caps.has_avx && type.width == 32 && type.length == 8))) {
2096 const unsigned num_iterations = 1;
2097 LLVMValueRef res;
2098 unsigned i;
2099 const char *intrinsic = NULL;
2100
2101 if (type.length == 4) {
2102 intrinsic = "llvm.x86.sse.rsqrt.ps";
2103 }
2104 else {
2105 intrinsic = "llvm.x86.avx.rsqrt.ps.256";
2106 }
2107 if (num_iterations) {
2108 /*
2109 * Newton-Raphson will result in NaN instead of infinity for zero,
2110 * and NaN instead of zero for infinity.
2111 * Also, need to ensure rsqrt(1.0) == 1.0.
2112 * All numbers smaller than FLT_MIN will result in +infinity
2113 * (rsqrtps treats all denormals as zero).
2114 */
2115 /*
2116 * Certain non-c99 compilers don't know INFINITY and might not support
2117 * hacks to evaluate it at compile time neither.
2118 */
2119 const unsigned posinf_int = 0x7F800000;
2120 LLVMValueRef cmp;
2121 LLVMValueRef flt_min = lp_build_const_vec(bld->gallivm, type, FLT_MIN);
2122 LLVMValueRef inf = lp_build_const_int_vec(bld->gallivm, type, posinf_int);
2123
2124 inf = LLVMBuildBitCast(builder, inf, lp_build_vec_type(bld->gallivm, type), "");
2125
2126 res = lp_build_intrinsic_unary(builder, intrinsic, bld->vec_type, a);
2127
2128 for (i = 0; i < num_iterations; ++i) {
2129 res = lp_build_rsqrt_refine(bld, a, res);
2130 }
2131 cmp = lp_build_compare(bld->gallivm, type, PIPE_FUNC_LESS, a, flt_min);
2132 res = lp_build_select(bld, cmp, inf, res);
2133 cmp = lp_build_compare(bld->gallivm, type, PIPE_FUNC_EQUAL, a, inf);
2134 res = lp_build_select(bld, cmp, bld->zero, res);
2135 cmp = lp_build_compare(bld->gallivm, type, PIPE_FUNC_EQUAL, a, bld->one);
2136 res = lp_build_select(bld, cmp, bld->one, res);
2137 }
2138 else {
2139 /* rsqrt(1.0) != 1.0 here */
2140 res = lp_build_intrinsic_unary(builder, intrinsic, bld->vec_type, a);
2141
2142 }
2143
2144 return res;
2145 }
2146
2147 return lp_build_rcp(bld, lp_build_sqrt(bld, a));
2148 }
2149
2150
2151 /**
2152 * Generate sin(a) using SSE2
2153 */
2154 LLVMValueRef
2155 lp_build_sin(struct lp_build_context *bld,
2156 LLVMValueRef a)
2157 {
2158 struct gallivm_state *gallivm = bld->gallivm;
2159 LLVMBuilderRef builder = gallivm->builder;
2160 struct lp_type int_type = lp_int_type(bld->type);
2161 LLVMBuilderRef b = builder;
2162
2163 /*
2164 * take the absolute value,
2165 * x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
2166 */
2167
2168 LLVMValueRef inv_sig_mask = lp_build_const_int_vec(gallivm, bld->type, ~0x80000000);
2169 LLVMValueRef a_v4si = LLVMBuildBitCast(b, a, bld->int_vec_type, "a_v4si");
2170
2171 LLVMValueRef absi = LLVMBuildAnd(b, a_v4si, inv_sig_mask, "absi");
2172 LLVMValueRef x_abs = LLVMBuildBitCast(b, absi, bld->vec_type, "x_abs");
2173
2174 /*
2175 * extract the sign bit (upper one)
2176 * sign_bit = _mm_and_ps(sign_bit, *(v4sf*)_ps_sign_mask);
2177 */
2178 LLVMValueRef sig_mask = lp_build_const_int_vec(gallivm, bld->type, 0x80000000);
2179 LLVMValueRef sign_bit_i = LLVMBuildAnd(b, a_v4si, sig_mask, "sign_bit_i");
2180
2181 /*
2182 * scale by 4/Pi
2183 * y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
2184 */
2185
2186 LLVMValueRef FOPi = lp_build_const_vec(gallivm, bld->type, 1.27323954473516);
2187 LLVMValueRef scale_y = LLVMBuildFMul(b, x_abs, FOPi, "scale_y");
2188
2189 /*
2190 * store the integer part of y in mm0
2191 * emm2 = _mm_cvttps_epi32(y);
2192 */
2193
2194 LLVMValueRef emm2_i = LLVMBuildFPToSI(b, scale_y, bld->int_vec_type, "emm2_i");
2195
2196 /*
2197 * j=(j+1) & (~1) (see the cephes sources)
2198 * emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
2199 */
2200
2201 LLVMValueRef all_one = lp_build_const_int_vec(gallivm, bld->type, 1);
2202 LLVMValueRef emm2_add = LLVMBuildAdd(b, emm2_i, all_one, "emm2_add");
2203 /*
2204 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
2205 */
2206 LLVMValueRef inv_one = lp_build_const_int_vec(gallivm, bld->type, ~1);
2207 LLVMValueRef emm2_and = LLVMBuildAnd(b, emm2_add, inv_one, "emm2_and");
2208
2209 /*
2210 * y = _mm_cvtepi32_ps(emm2);
2211 */
2212 LLVMValueRef y_2 = LLVMBuildSIToFP(b, emm2_and, bld->vec_type, "y_2");
2213
2214 /* get the swap sign flag
2215 * emm0 = _mm_and_si128(emm2, *(v4si*)_pi32_4);
2216 */
2217 LLVMValueRef pi32_4 = lp_build_const_int_vec(gallivm, bld->type, 4);
2218 LLVMValueRef emm0_and = LLVMBuildAnd(b, emm2_add, pi32_4, "emm0_and");
2219
2220 /*
2221 * emm2 = _mm_slli_epi32(emm0, 29);
2222 */
2223 LLVMValueRef const_29 = lp_build_const_int_vec(gallivm, bld->type, 29);
2224 LLVMValueRef swap_sign_bit = LLVMBuildShl(b, emm0_and, const_29, "swap_sign_bit");
2225
2226 /*
2227 * get the polynom selection mask
2228 * there is one polynom for 0 <= x <= Pi/4
2229 * and another one for Pi/4<x<=Pi/2
2230 * Both branches will be computed.
2231 *
2232 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
2233 * emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
2234 */
2235
2236 LLVMValueRef pi32_2 = lp_build_const_int_vec(gallivm, bld->type, 2);
2237 LLVMValueRef emm2_3 = LLVMBuildAnd(b, emm2_and, pi32_2, "emm2_3");
2238 LLVMValueRef poly_mask = lp_build_compare(gallivm,
2239 int_type, PIPE_FUNC_EQUAL,
2240 emm2_3, lp_build_const_int_vec(gallivm, bld->type, 0));
2241 /*
2242 * sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
2243 */
2244 LLVMValueRef sign_bit_1 = LLVMBuildXor(b, sign_bit_i, swap_sign_bit, "sign_bit");
2245
2246 /*
2247 * _PS_CONST(minus_cephes_DP1, -0.78515625);
2248 * _PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
2249 * _PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
2250 */
2251 LLVMValueRef DP1 = lp_build_const_vec(gallivm, bld->type, -0.78515625);
2252 LLVMValueRef DP2 = lp_build_const_vec(gallivm, bld->type, -2.4187564849853515625e-4);
2253 LLVMValueRef DP3 = lp_build_const_vec(gallivm, bld->type, -3.77489497744594108e-8);
2254
2255 /*
2256 * The magic pass: "Extended precision modular arithmetic"
2257 * x = ((x - y * DP1) - y * DP2) - y * DP3;
2258 * xmm1 = _mm_mul_ps(y, xmm1);
2259 * xmm2 = _mm_mul_ps(y, xmm2);
2260 * xmm3 = _mm_mul_ps(y, xmm3);
2261 */
2262 LLVMValueRef xmm1 = LLVMBuildFMul(b, y_2, DP1, "xmm1");
2263 LLVMValueRef xmm2 = LLVMBuildFMul(b, y_2, DP2, "xmm2");
2264 LLVMValueRef xmm3 = LLVMBuildFMul(b, y_2, DP3, "xmm3");
2265
2266 /*
2267 * x = _mm_add_ps(x, xmm1);
2268 * x = _mm_add_ps(x, xmm2);
2269 * x = _mm_add_ps(x, xmm3);
2270 */
2271
2272 LLVMValueRef x_1 = LLVMBuildFAdd(b, x_abs, xmm1, "x_1");
2273 LLVMValueRef x_2 = LLVMBuildFAdd(b, x_1, xmm2, "x_2");
2274 LLVMValueRef x_3 = LLVMBuildFAdd(b, x_2, xmm3, "x_3");
2275
2276 /*
2277 * Evaluate the first polynom (0 <= x <= Pi/4)
2278 *
2279 * z = _mm_mul_ps(x,x);
2280 */
2281 LLVMValueRef z = LLVMBuildFMul(b, x_3, x_3, "z");
2282
2283 /*
2284 * _PS_CONST(coscof_p0, 2.443315711809948E-005);
2285 * _PS_CONST(coscof_p1, -1.388731625493765E-003);
2286 * _PS_CONST(coscof_p2, 4.166664568298827E-002);
2287 */
2288 LLVMValueRef coscof_p0 = lp_build_const_vec(gallivm, bld->type, 2.443315711809948E-005);
2289 LLVMValueRef coscof_p1 = lp_build_const_vec(gallivm, bld->type, -1.388731625493765E-003);
2290 LLVMValueRef coscof_p2 = lp_build_const_vec(gallivm, bld->type, 4.166664568298827E-002);
2291
2292 /*
2293 * y = *(v4sf*)_ps_coscof_p0;
2294 * y = _mm_mul_ps(y, z);
2295 */
2296 LLVMValueRef y_3 = LLVMBuildFMul(b, z, coscof_p0, "y_3");
2297 LLVMValueRef y_4 = LLVMBuildFAdd(b, y_3, coscof_p1, "y_4");
2298 LLVMValueRef y_5 = LLVMBuildFMul(b, y_4, z, "y_5");
2299 LLVMValueRef y_6 = LLVMBuildFAdd(b, y_5, coscof_p2, "y_6");
2300 LLVMValueRef y_7 = LLVMBuildFMul(b, y_6, z, "y_7");
2301 LLVMValueRef y_8 = LLVMBuildFMul(b, y_7, z, "y_8");
2302
2303
2304 /*
2305 * tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
2306 * y = _mm_sub_ps(y, tmp);
2307 * y = _mm_add_ps(y, *(v4sf*)_ps_1);
2308 */
2309 LLVMValueRef half = lp_build_const_vec(gallivm, bld->type, 0.5);
2310 LLVMValueRef tmp = LLVMBuildFMul(b, z, half, "tmp");
2311 LLVMValueRef y_9 = LLVMBuildFSub(b, y_8, tmp, "y_8");
2312 LLVMValueRef one = lp_build_const_vec(gallivm, bld->type, 1.0);
2313 LLVMValueRef y_10 = LLVMBuildFAdd(b, y_9, one, "y_9");
2314
2315 /*
2316 * _PS_CONST(sincof_p0, -1.9515295891E-4);
2317 * _PS_CONST(sincof_p1, 8.3321608736E-3);
2318 * _PS_CONST(sincof_p2, -1.6666654611E-1);
2319 */
2320 LLVMValueRef sincof_p0 = lp_build_const_vec(gallivm, bld->type, -1.9515295891E-4);
2321 LLVMValueRef sincof_p1 = lp_build_const_vec(gallivm, bld->type, 8.3321608736E-3);
2322 LLVMValueRef sincof_p2 = lp_build_const_vec(gallivm, bld->type, -1.6666654611E-1);
2323
2324 /*
2325 * Evaluate the second polynom (Pi/4 <= x <= 0)
2326 *
2327 * y2 = *(v4sf*)_ps_sincof_p0;
2328 * y2 = _mm_mul_ps(y2, z);
2329 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
2330 * y2 = _mm_mul_ps(y2, z);
2331 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
2332 * y2 = _mm_mul_ps(y2, z);
2333 * y2 = _mm_mul_ps(y2, x);
2334 * y2 = _mm_add_ps(y2, x);
2335 */
2336
2337 LLVMValueRef y2_3 = LLVMBuildFMul(b, z, sincof_p0, "y2_3");
2338 LLVMValueRef y2_4 = LLVMBuildFAdd(b, y2_3, sincof_p1, "y2_4");
2339 LLVMValueRef y2_5 = LLVMBuildFMul(b, y2_4, z, "y2_5");
2340 LLVMValueRef y2_6 = LLVMBuildFAdd(b, y2_5, sincof_p2, "y2_6");
2341 LLVMValueRef y2_7 = LLVMBuildFMul(b, y2_6, z, "y2_7");
2342 LLVMValueRef y2_8 = LLVMBuildFMul(b, y2_7, x_3, "y2_8");
2343 LLVMValueRef y2_9 = LLVMBuildFAdd(b, y2_8, x_3, "y2_9");
2344
2345 /*
2346 * select the correct result from the two polynoms
2347 * xmm3 = poly_mask;
2348 * y2 = _mm_and_ps(xmm3, y2); //, xmm3);
2349 * y = _mm_andnot_ps(xmm3, y);
2350 * y = _mm_add_ps(y,y2);
2351 */
2352 LLVMValueRef y2_i = LLVMBuildBitCast(b, y2_9, bld->int_vec_type, "y2_i");
2353 LLVMValueRef y_i = LLVMBuildBitCast(b, y_10, bld->int_vec_type, "y_i");
2354 LLVMValueRef y2_and = LLVMBuildAnd(b, y2_i, poly_mask, "y2_and");
2355 LLVMValueRef inv = lp_build_const_int_vec(gallivm, bld->type, ~0);
2356 LLVMValueRef poly_mask_inv = LLVMBuildXor(b, poly_mask, inv, "poly_mask_inv");
2357 LLVMValueRef y_and = LLVMBuildAnd(b, y_i, poly_mask_inv, "y_and");
2358 LLVMValueRef y_combine = LLVMBuildAdd(b, y_and, y2_and, "y_combine");
2359
2360 /*
2361 * update the sign
2362 * y = _mm_xor_ps(y, sign_bit);
2363 */
2364 LLVMValueRef y_sign = LLVMBuildXor(b, y_combine, sign_bit_1, "y_sin");
2365 LLVMValueRef y_result = LLVMBuildBitCast(b, y_sign, bld->vec_type, "y_result");
2366 return y_result;
2367 }
2368
2369
2370 /**
2371 * Generate cos(a) using SSE2
2372 */
2373 LLVMValueRef
2374 lp_build_cos(struct lp_build_context *bld,
2375 LLVMValueRef a)
2376 {
2377 struct gallivm_state *gallivm = bld->gallivm;
2378 LLVMBuilderRef builder = gallivm->builder;
2379 struct lp_type int_type = lp_int_type(bld->type);
2380 LLVMBuilderRef b = builder;
2381
2382 /*
2383 * take the absolute value,
2384 * x = _mm_and_ps(x, *(v4sf*)_ps_inv_sign_mask);
2385 */
2386
2387 LLVMValueRef inv_sig_mask = lp_build_const_int_vec(gallivm, bld->type, ~0x80000000);
2388 LLVMValueRef a_v4si = LLVMBuildBitCast(b, a, bld->int_vec_type, "a_v4si");
2389
2390 LLVMValueRef absi = LLVMBuildAnd(b, a_v4si, inv_sig_mask, "absi");
2391 LLVMValueRef x_abs = LLVMBuildBitCast(b, absi, bld->vec_type, "x_abs");
2392
2393 /*
2394 * scale by 4/Pi
2395 * y = _mm_mul_ps(x, *(v4sf*)_ps_cephes_FOPI);
2396 */
2397
2398 LLVMValueRef FOPi = lp_build_const_vec(gallivm, bld->type, 1.27323954473516);
2399 LLVMValueRef scale_y = LLVMBuildFMul(b, x_abs, FOPi, "scale_y");
2400
2401 /*
2402 * store the integer part of y in mm0
2403 * emm2 = _mm_cvttps_epi32(y);
2404 */
2405
2406 LLVMValueRef emm2_i = LLVMBuildFPToSI(b, scale_y, bld->int_vec_type, "emm2_i");
2407
2408 /*
2409 * j=(j+1) & (~1) (see the cephes sources)
2410 * emm2 = _mm_add_epi32(emm2, *(v4si*)_pi32_1);
2411 */
2412
2413 LLVMValueRef all_one = lp_build_const_int_vec(gallivm, bld->type, 1);
2414 LLVMValueRef emm2_add = LLVMBuildAdd(b, emm2_i, all_one, "emm2_add");
2415 /*
2416 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_inv1);
2417 */
2418 LLVMValueRef inv_one = lp_build_const_int_vec(gallivm, bld->type, ~1);
2419 LLVMValueRef emm2_and = LLVMBuildAnd(b, emm2_add, inv_one, "emm2_and");
2420
2421 /*
2422 * y = _mm_cvtepi32_ps(emm2);
2423 */
2424 LLVMValueRef y_2 = LLVMBuildSIToFP(b, emm2_and, bld->vec_type, "y_2");
2425
2426
2427 /*
2428 * emm2 = _mm_sub_epi32(emm2, *(v4si*)_pi32_2);
2429 */
2430 LLVMValueRef const_2 = lp_build_const_int_vec(gallivm, bld->type, 2);
2431 LLVMValueRef emm2_2 = LLVMBuildSub(b, emm2_and, const_2, "emm2_2");
2432
2433
2434 /* get the swap sign flag
2435 * emm0 = _mm_andnot_si128(emm2, *(v4si*)_pi32_4);
2436 */
2437 LLVMValueRef inv = lp_build_const_int_vec(gallivm, bld->type, ~0);
2438 LLVMValueRef emm0_not = LLVMBuildXor(b, emm2_2, inv, "emm0_not");
2439 LLVMValueRef pi32_4 = lp_build_const_int_vec(gallivm, bld->type, 4);
2440 LLVMValueRef emm0_and = LLVMBuildAnd(b, emm0_not, pi32_4, "emm0_and");
2441
2442 /*
2443 * emm2 = _mm_slli_epi32(emm0, 29);
2444 */
2445 LLVMValueRef const_29 = lp_build_const_int_vec(gallivm, bld->type, 29);
2446 LLVMValueRef sign_bit = LLVMBuildShl(b, emm0_and, const_29, "sign_bit");
2447
2448 /*
2449 * get the polynom selection mask
2450 * there is one polynom for 0 <= x <= Pi/4
2451 * and another one for Pi/4<x<=Pi/2
2452 * Both branches will be computed.
2453 *
2454 * emm2 = _mm_and_si128(emm2, *(v4si*)_pi32_2);
2455 * emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
2456 */
2457
2458 LLVMValueRef pi32_2 = lp_build_const_int_vec(gallivm, bld->type, 2);
2459 LLVMValueRef emm2_3 = LLVMBuildAnd(b, emm2_2, pi32_2, "emm2_3");
2460 LLVMValueRef poly_mask = lp_build_compare(gallivm,
2461 int_type, PIPE_FUNC_EQUAL,
2462 emm2_3, lp_build_const_int_vec(gallivm, bld->type, 0));
2463
2464 /*
2465 * _PS_CONST(minus_cephes_DP1, -0.78515625);
2466 * _PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4);
2467 * _PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8);
2468 */
2469 LLVMValueRef DP1 = lp_build_const_vec(gallivm, bld->type, -0.78515625);
2470 LLVMValueRef DP2 = lp_build_const_vec(gallivm, bld->type, -2.4187564849853515625e-4);
2471 LLVMValueRef DP3 = lp_build_const_vec(gallivm, bld->type, -3.77489497744594108e-8);
2472
2473 /*
2474 * The magic pass: "Extended precision modular arithmetic"
2475 * x = ((x - y * DP1) - y * DP2) - y * DP3;
2476 * xmm1 = _mm_mul_ps(y, xmm1);
2477 * xmm2 = _mm_mul_ps(y, xmm2);
2478 * xmm3 = _mm_mul_ps(y, xmm3);
2479 */
2480 LLVMValueRef xmm1 = LLVMBuildFMul(b, y_2, DP1, "xmm1");
2481 LLVMValueRef xmm2 = LLVMBuildFMul(b, y_2, DP2, "xmm2");
2482 LLVMValueRef xmm3 = LLVMBuildFMul(b, y_2, DP3, "xmm3");
2483
2484 /*
2485 * x = _mm_add_ps(x, xmm1);
2486 * x = _mm_add_ps(x, xmm2);
2487 * x = _mm_add_ps(x, xmm3);
2488 */
2489
2490 LLVMValueRef x_1 = LLVMBuildFAdd(b, x_abs, xmm1, "x_1");
2491 LLVMValueRef x_2 = LLVMBuildFAdd(b, x_1, xmm2, "x_2");
2492 LLVMValueRef x_3 = LLVMBuildFAdd(b, x_2, xmm3, "x_3");
2493
2494 /*
2495 * Evaluate the first polynom (0 <= x <= Pi/4)
2496 *
2497 * z = _mm_mul_ps(x,x);
2498 */
2499 LLVMValueRef z = LLVMBuildFMul(b, x_3, x_3, "z");
2500
2501 /*
2502 * _PS_CONST(coscof_p0, 2.443315711809948E-005);
2503 * _PS_CONST(coscof_p1, -1.388731625493765E-003);
2504 * _PS_CONST(coscof_p2, 4.166664568298827E-002);
2505 */
2506 LLVMValueRef coscof_p0 = lp_build_const_vec(gallivm, bld->type, 2.443315711809948E-005);
2507 LLVMValueRef coscof_p1 = lp_build_const_vec(gallivm, bld->type, -1.388731625493765E-003);
2508 LLVMValueRef coscof_p2 = lp_build_const_vec(gallivm, bld->type, 4.166664568298827E-002);
2509
2510 /*
2511 * y = *(v4sf*)_ps_coscof_p0;
2512 * y = _mm_mul_ps(y, z);
2513 */
2514 LLVMValueRef y_3 = LLVMBuildFMul(b, z, coscof_p0, "y_3");
2515 LLVMValueRef y_4 = LLVMBuildFAdd(b, y_3, coscof_p1, "y_4");
2516 LLVMValueRef y_5 = LLVMBuildFMul(b, y_4, z, "y_5");
2517 LLVMValueRef y_6 = LLVMBuildFAdd(b, y_5, coscof_p2, "y_6");
2518 LLVMValueRef y_7 = LLVMBuildFMul(b, y_6, z, "y_7");
2519 LLVMValueRef y_8 = LLVMBuildFMul(b, y_7, z, "y_8");
2520
2521
2522 /*
2523 * tmp = _mm_mul_ps(z, *(v4sf*)_ps_0p5);
2524 * y = _mm_sub_ps(y, tmp);
2525 * y = _mm_add_ps(y, *(v4sf*)_ps_1);
2526 */
2527 LLVMValueRef half = lp_build_const_vec(gallivm, bld->type, 0.5);
2528 LLVMValueRef tmp = LLVMBuildFMul(b, z, half, "tmp");
2529 LLVMValueRef y_9 = LLVMBuildFSub(b, y_8, tmp, "y_8");
2530 LLVMValueRef one = lp_build_const_vec(gallivm, bld->type, 1.0);
2531 LLVMValueRef y_10 = LLVMBuildFAdd(b, y_9, one, "y_9");
2532
2533 /*
2534 * _PS_CONST(sincof_p0, -1.9515295891E-4);
2535 * _PS_CONST(sincof_p1, 8.3321608736E-3);
2536 * _PS_CONST(sincof_p2, -1.6666654611E-1);
2537 */
2538 LLVMValueRef sincof_p0 = lp_build_const_vec(gallivm, bld->type, -1.9515295891E-4);
2539 LLVMValueRef sincof_p1 = lp_build_const_vec(gallivm, bld->type, 8.3321608736E-3);
2540 LLVMValueRef sincof_p2 = lp_build_const_vec(gallivm, bld->type, -1.6666654611E-1);
2541
2542 /*
2543 * Evaluate the second polynom (Pi/4 <= x <= 0)
2544 *
2545 * y2 = *(v4sf*)_ps_sincof_p0;
2546 * y2 = _mm_mul_ps(y2, z);
2547 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p1);
2548 * y2 = _mm_mul_ps(y2, z);
2549 * y2 = _mm_add_ps(y2, *(v4sf*)_ps_sincof_p2);
2550 * y2 = _mm_mul_ps(y2, z);
2551 * y2 = _mm_mul_ps(y2, x);
2552 * y2 = _mm_add_ps(y2, x);
2553 */
2554
2555 LLVMValueRef y2_3 = LLVMBuildFMul(b, z, sincof_p0, "y2_3");
2556 LLVMValueRef y2_4 = LLVMBuildFAdd(b, y2_3, sincof_p1, "y2_4");
2557 LLVMValueRef y2_5 = LLVMBuildFMul(b, y2_4, z, "y2_5");
2558 LLVMValueRef y2_6 = LLVMBuildFAdd(b, y2_5, sincof_p2, "y2_6");
2559 LLVMValueRef y2_7 = LLVMBuildFMul(b, y2_6, z, "y2_7");
2560 LLVMValueRef y2_8 = LLVMBuildFMul(b, y2_7, x_3, "y2_8");
2561 LLVMValueRef y2_9 = LLVMBuildFAdd(b, y2_8, x_3, "y2_9");
2562
2563 /*
2564 * select the correct result from the two polynoms
2565 * xmm3 = poly_mask;
2566 * y2 = _mm_and_ps(xmm3, y2); //, xmm3);
2567 * y = _mm_andnot_ps(xmm3, y);
2568 * y = _mm_add_ps(y,y2);
2569 */
2570 LLVMValueRef y2_i = LLVMBuildBitCast(b, y2_9, bld->int_vec_type, "y2_i");
2571 LLVMValueRef y_i = LLVMBuildBitCast(b, y_10, bld->int_vec_type, "y_i");
2572 LLVMValueRef y2_and = LLVMBuildAnd(b, y2_i, poly_mask, "y2_and");
2573 LLVMValueRef poly_mask_inv = LLVMBuildXor(b, poly_mask, inv, "poly_mask_inv");
2574 LLVMValueRef y_and = LLVMBuildAnd(b, y_i, poly_mask_inv, "y_and");
2575 LLVMValueRef y_combine = LLVMBuildAdd(b, y_and, y2_and, "y_combine");
2576
2577 /*
2578 * update the sign
2579 * y = _mm_xor_ps(y, sign_bit);
2580 */
2581 LLVMValueRef y_sign = LLVMBuildXor(b, y_combine, sign_bit, "y_sin");
2582 LLVMValueRef y_result = LLVMBuildBitCast(b, y_sign, bld->vec_type, "y_result");
2583 return y_result;
2584 }
2585
2586
2587 /**
2588 * Generate pow(x, y)
2589 */
2590 LLVMValueRef
2591 lp_build_pow(struct lp_build_context *bld,
2592 LLVMValueRef x,
2593 LLVMValueRef y)
2594 {
2595 /* TODO: optimize the constant case */
2596 if (gallivm_debug & GALLIVM_DEBUG_PERF &&
2597 LLVMIsConstant(x) && LLVMIsConstant(y)) {
2598 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
2599 __FUNCTION__);
2600 }
2601
2602 return lp_build_exp2(bld, lp_build_mul(bld, lp_build_log2(bld, x), y));
2603 }
2604
2605
2606 /**
2607 * Generate exp(x)
2608 */
2609 LLVMValueRef
2610 lp_build_exp(struct lp_build_context *bld,
2611 LLVMValueRef x)
2612 {
2613 /* log2(e) = 1/log(2) */
2614 LLVMValueRef log2e = lp_build_const_vec(bld->gallivm, bld->type,
2615 1.4426950408889634);
2616
2617 assert(lp_check_value(bld->type, x));
2618
2619 return lp_build_exp2(bld, lp_build_mul(bld, log2e, x));
2620 }
2621
2622
2623 /**
2624 * Generate log(x)
2625 */
2626 LLVMValueRef
2627 lp_build_log(struct lp_build_context *bld,
2628 LLVMValueRef x)
2629 {
2630 /* log(2) */
2631 LLVMValueRef log2 = lp_build_const_vec(bld->gallivm, bld->type,
2632 0.69314718055994529);
2633
2634 assert(lp_check_value(bld->type, x));
2635
2636 return lp_build_mul(bld, log2, lp_build_log2(bld, x));
2637 }
2638
2639
2640 /**
2641 * Generate polynomial.
2642 * Ex: coeffs[0] + x * coeffs[1] + x^2 * coeffs[2].
2643 */
2644 static LLVMValueRef
2645 lp_build_polynomial(struct lp_build_context *bld,
2646 LLVMValueRef x,
2647 const double *coeffs,
2648 unsigned num_coeffs)
2649 {
2650 const struct lp_type type = bld->type;
2651 LLVMValueRef even = NULL, odd = NULL;
2652 LLVMValueRef x2;
2653 unsigned i;
2654
2655 assert(lp_check_value(bld->type, x));
2656
2657 /* TODO: optimize the constant case */
2658 if (gallivm_debug & GALLIVM_DEBUG_PERF &&
2659 LLVMIsConstant(x)) {
2660 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
2661 __FUNCTION__);
2662 }
2663
2664 /*
2665 * Calculate odd and even terms seperately to decrease data dependency
2666 * Ex:
2667 * c[0] + x^2 * c[2] + x^4 * c[4] ...
2668 * + x * (c[1] + x^2 * c[3] + x^4 * c[5]) ...
2669 */
2670 x2 = lp_build_mul(bld, x, x);
2671
2672 for (i = num_coeffs; i--; ) {
2673 LLVMValueRef coeff;
2674
2675 coeff = lp_build_const_vec(bld->gallivm, type, coeffs[i]);
2676
2677 if (i % 2 == 0) {
2678 if (even)
2679 even = lp_build_add(bld, coeff, lp_build_mul(bld, x2, even));
2680 else
2681 even = coeff;
2682 } else {
2683 if (odd)
2684 odd = lp_build_add(bld, coeff, lp_build_mul(bld, x2, odd));
2685 else
2686 odd = coeff;
2687 }
2688 }
2689
2690 if (odd)
2691 return lp_build_add(bld, lp_build_mul(bld, odd, x), even);
2692 else if (even)
2693 return even;
2694 else
2695 return bld->undef;
2696 }
2697
2698
2699 /**
2700 * Minimax polynomial fit of 2**x, in range [0, 1[
2701 */
2702 const double lp_build_exp2_polynomial[] = {
2703 #if EXP_POLY_DEGREE == 5
2704 0.999999925063526176901,
2705 0.693153073200168932794,
2706 0.240153617044375388211,
2707 0.0558263180532956664775,
2708 0.00898934009049466391101,
2709 0.00187757667519147912699
2710 #elif EXP_POLY_DEGREE == 4
2711 1.00000259337069434683,
2712 0.693003834469974940458,
2713 0.24144275689150793076,
2714 0.0520114606103070150235,
2715 0.0135341679161270268764
2716 #elif EXP_POLY_DEGREE == 3
2717 0.999925218562710312959,
2718 0.695833540494823811697,
2719 0.226067155427249155588,
2720 0.0780245226406372992967
2721 #elif EXP_POLY_DEGREE == 2
2722 1.00172476321474503578,
2723 0.657636275736077639316,
2724 0.33718943461968720704
2725 #else
2726 #error
2727 #endif
2728 };
2729
2730
2731 void
2732 lp_build_exp2_approx(struct lp_build_context *bld,
2733 LLVMValueRef x,
2734 LLVMValueRef *p_exp2_int_part,
2735 LLVMValueRef *p_frac_part,
2736 LLVMValueRef *p_exp2)
2737 {
2738 LLVMBuilderRef builder = bld->gallivm->builder;
2739 const struct lp_type type = bld->type;
2740 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
2741 LLVMValueRef ipart = NULL;
2742 LLVMValueRef fpart = NULL;
2743 LLVMValueRef expipart = NULL;
2744 LLVMValueRef expfpart = NULL;
2745 LLVMValueRef res = NULL;
2746
2747 assert(lp_check_value(bld->type, x));
2748
2749 if(p_exp2_int_part || p_frac_part || p_exp2) {
2750 /* TODO: optimize the constant case */
2751 if (gallivm_debug & GALLIVM_DEBUG_PERF &&
2752 LLVMIsConstant(x)) {
2753 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
2754 __FUNCTION__);
2755 }
2756
2757 assert(type.floating && type.width == 32);
2758
2759 x = lp_build_min(bld, x, lp_build_const_vec(bld->gallivm, type, 129.0));
2760 x = lp_build_max(bld, x, lp_build_const_vec(bld->gallivm, type, -126.99999));
2761
2762 /* ipart = floor(x) */
2763 /* fpart = x - ipart */
2764 lp_build_ifloor_fract(bld, x, &ipart, &fpart);
2765 }
2766
2767 if(p_exp2_int_part || p_exp2) {
2768 /* expipart = (float) (1 << ipart) */
2769 expipart = LLVMBuildAdd(builder, ipart,
2770 lp_build_const_int_vec(bld->gallivm, type, 127), "");
2771 expipart = LLVMBuildShl(builder, expipart,
2772 lp_build_const_int_vec(bld->gallivm, type, 23), "");
2773 expipart = LLVMBuildBitCast(builder, expipart, vec_type, "");
2774 }
2775
2776 if(p_exp2) {
2777 expfpart = lp_build_polynomial(bld, fpart, lp_build_exp2_polynomial,
2778 Elements(lp_build_exp2_polynomial));
2779
2780 res = LLVMBuildFMul(builder, expipart, expfpart, "");
2781 }
2782
2783 if(p_exp2_int_part)
2784 *p_exp2_int_part = expipart;
2785
2786 if(p_frac_part)
2787 *p_frac_part = fpart;
2788
2789 if(p_exp2)
2790 *p_exp2 = res;
2791 }
2792
2793
2794 LLVMValueRef
2795 lp_build_exp2(struct lp_build_context *bld,
2796 LLVMValueRef x)
2797 {
2798 LLVMValueRef res;
2799 lp_build_exp2_approx(bld, x, NULL, NULL, &res);
2800 return res;
2801 }
2802
2803
2804 /**
2805 * Extract the exponent of a IEEE-754 floating point value.
2806 *
2807 * Optionally apply an integer bias.
2808 *
2809 * Result is an integer value with
2810 *
2811 * ifloor(log2(x)) + bias
2812 */
2813 LLVMValueRef
2814 lp_build_extract_exponent(struct lp_build_context *bld,
2815 LLVMValueRef x,
2816 int bias)
2817 {
2818 LLVMBuilderRef builder = bld->gallivm->builder;
2819 const struct lp_type type = bld->type;
2820 unsigned mantissa = lp_mantissa(type);
2821 LLVMValueRef res;
2822
2823 assert(type.floating);
2824
2825 assert(lp_check_value(bld->type, x));
2826
2827 x = LLVMBuildBitCast(builder, x, bld->int_vec_type, "");
2828
2829 res = LLVMBuildLShr(builder, x,
2830 lp_build_const_int_vec(bld->gallivm, type, mantissa), "");
2831 res = LLVMBuildAnd(builder, res,
2832 lp_build_const_int_vec(bld->gallivm, type, 255), "");
2833 res = LLVMBuildSub(builder, res,
2834 lp_build_const_int_vec(bld->gallivm, type, 127 - bias), "");
2835
2836 return res;
2837 }
2838
2839
2840 /**
2841 * Extract the mantissa of the a floating.
2842 *
2843 * Result is a floating point value with
2844 *
2845 * x / floor(log2(x))
2846 */
2847 LLVMValueRef
2848 lp_build_extract_mantissa(struct lp_build_context *bld,
2849 LLVMValueRef x)
2850 {
2851 LLVMBuilderRef builder = bld->gallivm->builder;
2852 const struct lp_type type = bld->type;
2853 unsigned mantissa = lp_mantissa(type);
2854 LLVMValueRef mantmask = lp_build_const_int_vec(bld->gallivm, type,
2855 (1ULL << mantissa) - 1);
2856 LLVMValueRef one = LLVMConstBitCast(bld->one, bld->int_vec_type);
2857 LLVMValueRef res;
2858
2859 assert(lp_check_value(bld->type, x));
2860
2861 assert(type.floating);
2862
2863 x = LLVMBuildBitCast(builder, x, bld->int_vec_type, "");
2864
2865 /* res = x / 2**ipart */
2866 res = LLVMBuildAnd(builder, x, mantmask, "");
2867 res = LLVMBuildOr(builder, res, one, "");
2868 res = LLVMBuildBitCast(builder, res, bld->vec_type, "");
2869
2870 return res;
2871 }
2872
2873
2874
2875 /**
2876 * Minimax polynomial fit of log2((1.0 + sqrt(x))/(1.0 - sqrt(x)))/sqrt(x) ,for x in range of [0, 1/9[
2877 * These coefficients can be generate with
2878 * http://www.boost.org/doc/libs/1_36_0/libs/math/doc/sf_and_dist/html/math_toolkit/toolkit/internals2/minimax.html
2879 */
2880 const double lp_build_log2_polynomial[] = {
2881 #if LOG_POLY_DEGREE == 5
2882 2.88539008148777786488L,
2883 0.961796878841293367824L,
2884 0.577058946784739859012L,
2885 0.412914355135828735411L,
2886 0.308591899232910175289L,
2887 0.352376952300281371868L,
2888 #elif LOG_POLY_DEGREE == 4
2889 2.88539009343309178325L,
2890 0.961791550404184197881L,
2891 0.577440339438736392009L,
2892 0.403343858251329912514L,
2893 0.406718052498846252698L,
2894 #elif LOG_POLY_DEGREE == 3
2895 2.88538959748872753838L,
2896 0.961932915889597772928L,
2897 0.571118517972136195241L,
2898 0.493997535084709500285L,
2899 #else
2900 #error
2901 #endif
2902 };
2903
2904 /**
2905 * See http://www.devmaster.net/forums/showthread.php?p=43580
2906 * http://en.wikipedia.org/wiki/Logarithm#Calculation
2907 * http://www.nezumi.demon.co.uk/consult/logx.htm
2908 */
2909 void
2910 lp_build_log2_approx(struct lp_build_context *bld,
2911 LLVMValueRef x,
2912 LLVMValueRef *p_exp,
2913 LLVMValueRef *p_floor_log2,
2914 LLVMValueRef *p_log2)
2915 {
2916 LLVMBuilderRef builder = bld->gallivm->builder;
2917 const struct lp_type type = bld->type;
2918 LLVMTypeRef vec_type = lp_build_vec_type(bld->gallivm, type);
2919 LLVMTypeRef int_vec_type = lp_build_int_vec_type(bld->gallivm, type);
2920
2921 LLVMValueRef expmask = lp_build_const_int_vec(bld->gallivm, type, 0x7f800000);
2922 LLVMValueRef mantmask = lp_build_const_int_vec(bld->gallivm, type, 0x007fffff);
2923 LLVMValueRef one = LLVMConstBitCast(bld->one, int_vec_type);
2924
2925 LLVMValueRef i = NULL;
2926 LLVMValueRef y = NULL;
2927 LLVMValueRef z = NULL;
2928 LLVMValueRef exp = NULL;
2929 LLVMValueRef mant = NULL;
2930 LLVMValueRef logexp = NULL;
2931 LLVMValueRef logmant = NULL;
2932 LLVMValueRef res = NULL;
2933
2934 assert(lp_check_value(bld->type, x));
2935
2936 if(p_exp || p_floor_log2 || p_log2) {
2937 /* TODO: optimize the constant case */
2938 if (gallivm_debug & GALLIVM_DEBUG_PERF &&
2939 LLVMIsConstant(x)) {
2940 debug_printf("%s: inefficient/imprecise constant arithmetic\n",
2941 __FUNCTION__);
2942 }
2943
2944 assert(type.floating && type.width == 32);
2945
2946 /*
2947 * We don't explicitly handle denormalized numbers. They will yield a
2948 * result in the neighbourhood of -127, which appears to be adequate
2949 * enough.
2950 */
2951
2952 i = LLVMBuildBitCast(builder, x, int_vec_type, "");
2953
2954 /* exp = (float) exponent(x) */
2955 exp = LLVMBuildAnd(builder, i, expmask, "");
2956 }
2957
2958 if(p_floor_log2 || p_log2) {
2959 logexp = LLVMBuildLShr(builder, exp, lp_build_const_int_vec(bld->gallivm, type, 23), "");
2960 logexp = LLVMBuildSub(builder, logexp, lp_build_const_int_vec(bld->gallivm, type, 127), "");
2961 logexp = LLVMBuildSIToFP(builder, logexp, vec_type, "");
2962 }
2963
2964 if(p_log2) {
2965 /* mant = 1 + (float) mantissa(x) */
2966 mant = LLVMBuildAnd(builder, i, mantmask, "");
2967 mant = LLVMBuildOr(builder, mant, one, "");
2968 mant = LLVMBuildBitCast(builder, mant, vec_type, "");
2969
2970 /* y = (mant - 1) / (mant + 1) */
2971 y = lp_build_div(bld,
2972 lp_build_sub(bld, mant, bld->one),
2973 lp_build_add(bld, mant, bld->one)
2974 );
2975
2976 /* z = y^2 */
2977 z = lp_build_mul(bld, y, y);
2978
2979 /* compute P(z) */
2980 logmant = lp_build_polynomial(bld, z, lp_build_log2_polynomial,
2981 Elements(lp_build_log2_polynomial));
2982
2983 /* logmant = y * P(z) */
2984 logmant = lp_build_mul(bld, y, logmant);
2985
2986 res = lp_build_add(bld, logmant, logexp);
2987 }
2988
2989 if(p_exp) {
2990 exp = LLVMBuildBitCast(builder, exp, vec_type, "");
2991 *p_exp = exp;
2992 }
2993
2994 if(p_floor_log2)
2995 *p_floor_log2 = logexp;
2996
2997 if(p_log2)
2998 *p_log2 = res;
2999 }
3000
3001
3002 LLVMValueRef
3003 lp_build_log2(struct lp_build_context *bld,
3004 LLVMValueRef x)
3005 {
3006 LLVMValueRef res;
3007 lp_build_log2_approx(bld, x, NULL, NULL, &res);
3008 return res;
3009 }
3010
3011
3012 /**
3013 * Faster (and less accurate) log2.
3014 *
3015 * log2(x) = floor(log2(x)) - 1 + x / 2**floor(log2(x))
3016 *
3017 * Piece-wise linear approximation, with exact results when x is a
3018 * power of two.
3019 *
3020 * See http://www.flipcode.com/archives/Fast_log_Function.shtml
3021 */
3022 LLVMValueRef
3023 lp_build_fast_log2(struct lp_build_context *bld,
3024 LLVMValueRef x)
3025 {
3026 LLVMBuilderRef builder = bld->gallivm->builder;
3027 LLVMValueRef ipart;
3028 LLVMValueRef fpart;
3029
3030 assert(lp_check_value(bld->type, x));
3031
3032 assert(bld->type.floating);
3033
3034 /* ipart = floor(log2(x)) - 1 */
3035 ipart = lp_build_extract_exponent(bld, x, -1);
3036 ipart = LLVMBuildSIToFP(builder, ipart, bld->vec_type, "");
3037
3038 /* fpart = x / 2**ipart */
3039 fpart = lp_build_extract_mantissa(bld, x);
3040
3041 /* ipart + fpart */
3042 return LLVMBuildFAdd(builder, ipart, fpart, "");
3043 }
3044
3045
3046 /**
3047 * Fast implementation of iround(log2(x)).
3048 *
3049 * Not an approximation -- it should give accurate results all the time.
3050 */
3051 LLVMValueRef
3052 lp_build_ilog2(struct lp_build_context *bld,
3053 LLVMValueRef x)
3054 {
3055 LLVMBuilderRef builder = bld->gallivm->builder;
3056 LLVMValueRef sqrt2 = lp_build_const_vec(bld->gallivm, bld->type, M_SQRT2);
3057 LLVMValueRef ipart;
3058
3059 assert(bld->type.floating);
3060
3061 assert(lp_check_value(bld->type, x));
3062
3063 /* x * 2^(0.5) i.e., add 0.5 to the log2(x) */
3064 x = LLVMBuildFMul(builder, x, sqrt2, "");
3065
3066 /* ipart = floor(log2(x) + 0.5) */
3067 ipart = lp_build_extract_exponent(bld, x, 0);
3068
3069 return ipart;
3070 }
3071
3072 LLVMValueRef
3073 lp_build_mod(struct lp_build_context *bld,
3074 LLVMValueRef x,
3075 LLVMValueRef y)
3076 {
3077 LLVMBuilderRef builder = bld->gallivm->builder;
3078 LLVMValueRef res;
3079 const struct lp_type type = bld->type;
3080
3081 assert(lp_check_value(type, x));
3082 assert(lp_check_value(type, y));
3083
3084 if (type.floating)
3085 res = LLVMBuildFRem(builder, x, y, "");
3086 else if (type.sign)
3087 res = LLVMBuildSRem(builder, x, y, "");
3088 else
3089 res = LLVMBuildURem(builder, x, y, "");
3090 return res;
3091 }