Merge branch 'draw-instanced'
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_conv.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper functions for type conversions.
32 *
33 * We want to use the fastest type for a given computation whenever feasible.
34 * The other side of this is that we need to be able convert between several
35 * types accurately and efficiently.
36 *
37 * Conversion between types of different bit width is quite complex since a
38 *
39 * To remember there are a few invariants in type conversions:
40 *
41 * - register width must remain constant:
42 *
43 * src_type.width * src_type.length == dst_type.width * dst_type.length
44 *
45 * - total number of elements must remain constant:
46 *
47 * src_type.length * num_srcs == dst_type.length * num_dsts
48 *
49 * It is not always possible to do the conversion both accurately and
50 * efficiently, usually due to lack of adequate machine instructions. In these
51 * cases it is important not to cut shortcuts here and sacrifice accuracy, as
52 * there this functions can be used anywhere. In the future we might have a
53 * precision parameter which can gauge the accuracy vs efficiency compromise,
54 * but for now if the data conversion between two stages happens to be the
55 * bottleneck, then most likely should just avoid converting at all and run
56 * both stages with the same type.
57 *
58 * Make sure to run lp_test_conv unit test after any change to this file.
59 *
60 * @author Jose Fonseca <jfonseca@vmware.com>
61 */
62
63
64 #include "util/u_debug.h"
65 #include "util/u_math.h"
66 #include "util/u_cpu_detect.h"
67
68 #include "lp_bld_type.h"
69 #include "lp_bld_const.h"
70 #include "lp_bld_arit.h"
71 #include "lp_bld_pack.h"
72 #include "lp_bld_conv.h"
73
74
75 /**
76 * Special case for converting clamped IEEE-754 floats to unsigned norms.
77 *
78 * The mathematical voodoo below may seem excessive but it is actually
79 * paramount we do it this way for several reasons. First, there is no single
80 * precision FP to unsigned integer conversion Intel SSE instruction. Second,
81 * secondly, even if there was, since the FP's mantissa takes only a fraction
82 * of register bits the typically scale and cast approach would require double
83 * precision for accurate results, and therefore half the throughput
84 *
85 * Although the result values can be scaled to an arbitrary bit width specified
86 * by dst_width, the actual result type will have the same width.
87 *
88 * Ex: src = { float, float, float, float }
89 * return { i32, i32, i32, i32 } where each value is in [0, 2^dst_width-1].
90 */
91 LLVMValueRef
92 lp_build_clamped_float_to_unsigned_norm(struct gallivm_state *gallivm,
93 struct lp_type src_type,
94 unsigned dst_width,
95 LLVMValueRef src)
96 {
97 LLVMBuilderRef builder = gallivm->builder;
98 LLVMTypeRef int_vec_type = lp_build_int_vec_type(gallivm, src_type);
99 LLVMValueRef res;
100 unsigned mantissa;
101
102 assert(src_type.floating);
103 assert(dst_width <= src_type.width);
104 src_type.sign = FALSE;
105
106 mantissa = lp_mantissa(src_type);
107
108 if (dst_width <= mantissa) {
109 /*
110 * Apply magic coefficients that will make the desired result to appear
111 * in the lowest significant bits of the mantissa, with correct rounding.
112 *
113 * This only works if the destination width fits in the mantissa.
114 */
115
116 unsigned long long ubound;
117 unsigned long long mask;
118 double scale;
119 double bias;
120
121 ubound = (1ULL << dst_width);
122 mask = ubound - 1;
123 scale = (double)mask/ubound;
124 bias = (double)(1ULL << (mantissa - dst_width));
125
126 res = LLVMBuildFMul(builder, src, lp_build_const_vec(gallivm, src_type, scale), "");
127 res = LLVMBuildFAdd(builder, res, lp_build_const_vec(gallivm, src_type, bias), "");
128 res = LLVMBuildBitCast(builder, res, int_vec_type, "");
129 res = LLVMBuildAnd(builder, res,
130 lp_build_const_int_vec(gallivm, src_type, mask), "");
131 }
132 else if (dst_width == (mantissa + 1)) {
133 /*
134 * The destination width matches exactly what can be represented in
135 * floating point (i.e., mantissa + 1 bits). So do a straight
136 * multiplication followed by casting. No further rounding is necessary.
137 */
138
139 double scale;
140
141 scale = (double)((1ULL << dst_width) - 1);
142
143 res = LLVMBuildFMul(builder, src,
144 lp_build_const_vec(gallivm, src_type, scale), "");
145 res = LLVMBuildFPToSI(builder, res, int_vec_type, "");
146 }
147 else {
148 /*
149 * The destination exceeds what can be represented in the floating point.
150 * So multiply by the largest power two we get away with, and when
151 * subtract the most significant bit to rescale to normalized values.
152 *
153 * The largest power of two factor we can get away is
154 * (1 << (src_type.width - 1)), because we need to use signed . In theory it
155 * should be (1 << (src_type.width - 2)), but IEEE 754 rules states
156 * INT_MIN should be returned in FPToSI, which is the correct result for
157 * values near 1.0!
158 *
159 * This means we get (src_type.width - 1) correct bits for values near 0.0,
160 * and (mantissa + 1) correct bits for values near 1.0. Equally or more
161 * important, we also get exact results for 0.0 and 1.0.
162 */
163
164 unsigned n = MIN2(src_type.width - 1, dst_width);
165
166 double scale = (double)(1ULL << n);
167 unsigned lshift = dst_width - n;
168 unsigned rshift = n;
169 LLVMValueRef lshifted;
170 LLVMValueRef rshifted;
171
172 res = LLVMBuildFMul(builder, src,
173 lp_build_const_vec(gallivm, src_type, scale), "");
174 res = LLVMBuildFPToSI(builder, res, int_vec_type, "");
175
176 /*
177 * Align the most significant bit to its final place.
178 *
179 * This will cause 1.0 to overflow to 0, but the later adjustment will
180 * get it right.
181 */
182 if (lshift) {
183 lshifted = LLVMBuildShl(builder, res,
184 lp_build_const_int_vec(gallivm, src_type,
185 lshift), "");
186 } else {
187 lshifted = res;
188 }
189
190 /*
191 * Align the most significant bit to the right.
192 */
193 rshifted = LLVMBuildAShr(builder, res,
194 lp_build_const_int_vec(gallivm, src_type, rshift),
195 "");
196
197 /*
198 * Subtract the MSB to the LSB, therefore re-scaling from
199 * (1 << dst_width) to ((1 << dst_width) - 1).
200 */
201
202 res = LLVMBuildSub(builder, lshifted, rshifted, "");
203 }
204
205 return res;
206 }
207
208
209 /**
210 * Inverse of lp_build_clamped_float_to_unsigned_norm above.
211 * Ex: src = { i32, i32, i32, i32 } with values in range [0, 2^src_width-1]
212 * return {float, float, float, float} with values in range [0, 1].
213 */
214 LLVMValueRef
215 lp_build_unsigned_norm_to_float(struct gallivm_state *gallivm,
216 unsigned src_width,
217 struct lp_type dst_type,
218 LLVMValueRef src)
219 {
220 LLVMBuilderRef builder = gallivm->builder;
221 LLVMTypeRef vec_type = lp_build_vec_type(gallivm, dst_type);
222 LLVMTypeRef int_vec_type = lp_build_int_vec_type(gallivm, dst_type);
223 LLVMValueRef bias_;
224 LLVMValueRef res;
225 unsigned mantissa;
226 unsigned n;
227 unsigned long long ubound;
228 unsigned long long mask;
229 double scale;
230 double bias;
231
232 assert(dst_type.floating);
233
234 /* Special-case int8->float, though most cases could be handled
235 * this way:
236 */
237 if (src_width == 8) {
238 scale = 1.0/255.0;
239 res = LLVMBuildSIToFP(builder, src, vec_type, "");
240 res = LLVMBuildFMul(builder, res,
241 lp_build_const_vec(gallivm, dst_type, scale), "");
242 return res;
243 }
244
245 mantissa = lp_mantissa(dst_type);
246
247 n = MIN2(mantissa, src_width);
248
249 ubound = ((unsigned long long)1 << n);
250 mask = ubound - 1;
251 scale = (double)ubound/mask;
252 bias = (double)((unsigned long long)1 << (mantissa - n));
253
254 res = src;
255
256 if(src_width > mantissa) {
257 int shift = src_width - mantissa;
258 res = LLVMBuildLShr(builder, res,
259 lp_build_const_int_vec(gallivm, dst_type, shift), "");
260 }
261
262 bias_ = lp_build_const_vec(gallivm, dst_type, bias);
263
264 res = LLVMBuildOr(builder,
265 res,
266 LLVMBuildBitCast(builder, bias_, int_vec_type, ""), "");
267
268 res = LLVMBuildBitCast(builder, res, vec_type, "");
269
270 res = LLVMBuildFSub(builder, res, bias_, "");
271 res = LLVMBuildFMul(builder, res, lp_build_const_vec(gallivm, dst_type, scale), "");
272
273 return res;
274 }
275
276
277 /**
278 * Generic type conversion.
279 *
280 * TODO: Take a precision argument, or even better, add a new precision member
281 * to the lp_type union.
282 */
283 void
284 lp_build_conv(struct gallivm_state *gallivm,
285 struct lp_type src_type,
286 struct lp_type dst_type,
287 const LLVMValueRef *src, unsigned num_srcs,
288 LLVMValueRef *dst, unsigned num_dsts)
289 {
290 LLVMBuilderRef builder = gallivm->builder;
291 struct lp_type tmp_type;
292 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
293 unsigned num_tmps;
294 unsigned i;
295
296 /* We must not loose or gain channels. Only precision */
297 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
298
299 assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
300 assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
301 assert(num_srcs <= LP_MAX_VECTOR_LENGTH);
302 assert(num_dsts <= LP_MAX_VECTOR_LENGTH);
303
304 tmp_type = src_type;
305 for(i = 0; i < num_srcs; ++i) {
306 assert(lp_check_value(src_type, src[i]));
307 tmp[i] = src[i];
308 }
309 num_tmps = num_srcs;
310
311
312 /* Special case 4x4f --> 1x16ub
313 */
314 if (src_type.floating == 1 &&
315 src_type.fixed == 0 &&
316 src_type.sign == 1 &&
317 src_type.norm == 0 &&
318 src_type.width == 32 &&
319 src_type.length == 4 &&
320
321 dst_type.floating == 0 &&
322 dst_type.fixed == 0 &&
323 dst_type.sign == 0 &&
324 dst_type.norm == 1 &&
325 dst_type.width == 8 &&
326 dst_type.length == 16 &&
327
328 util_cpu_caps.has_sse2)
329 {
330 int i;
331
332 for (i = 0; i < num_dsts; i++, src += 4) {
333 struct lp_type int16_type = dst_type;
334 struct lp_type int32_type = dst_type;
335 LLVMValueRef lo, hi;
336 LLVMValueRef src_int0;
337 LLVMValueRef src_int1;
338 LLVMValueRef src_int2;
339 LLVMValueRef src_int3;
340 LLVMTypeRef int16_vec_type;
341 LLVMTypeRef int32_vec_type;
342 LLVMTypeRef src_vec_type;
343 LLVMTypeRef dst_vec_type;
344 LLVMValueRef const_255f;
345 LLVMValueRef a, b, c, d;
346
347 int16_type.width *= 2;
348 int16_type.length /= 2;
349 int16_type.sign = 1;
350
351 int32_type.width *= 4;
352 int32_type.length /= 4;
353 int32_type.sign = 1;
354
355 src_vec_type = lp_build_vec_type(gallivm, src_type);
356 dst_vec_type = lp_build_vec_type(gallivm, dst_type);
357 int16_vec_type = lp_build_vec_type(gallivm, int16_type);
358 int32_vec_type = lp_build_vec_type(gallivm, int32_type);
359
360 const_255f = lp_build_const_vec(gallivm, src_type, 255.0f);
361
362 a = LLVMBuildFMul(builder, src[0], const_255f, "");
363 b = LLVMBuildFMul(builder, src[1], const_255f, "");
364 c = LLVMBuildFMul(builder, src[2], const_255f, "");
365 d = LLVMBuildFMul(builder, src[3], const_255f, "");
366
367 {
368 struct lp_build_context bld;
369
370 bld.gallivm = gallivm;
371 bld.type = src_type;
372 bld.vec_type = src_vec_type;
373 bld.int_elem_type = lp_build_elem_type(gallivm, int32_type);
374 bld.int_vec_type = int32_vec_type;
375 bld.undef = lp_build_undef(gallivm, src_type);
376 bld.zero = lp_build_zero(gallivm, src_type);
377 bld.one = lp_build_one(gallivm, src_type);
378
379 src_int0 = lp_build_iround(&bld, a);
380 src_int1 = lp_build_iround(&bld, b);
381 src_int2 = lp_build_iround(&bld, c);
382 src_int3 = lp_build_iround(&bld, d);
383 }
384 /* relying on clamping behavior of sse2 intrinsics here */
385 lo = lp_build_pack2(gallivm, int32_type, int16_type, src_int0, src_int1);
386 hi = lp_build_pack2(gallivm, int32_type, int16_type, src_int2, src_int3);
387 dst[i] = lp_build_pack2(gallivm, int16_type, dst_type, lo, hi);
388 }
389 return;
390 }
391
392 /*
393 * Clamp if necessary
394 */
395
396 if(memcmp(&src_type, &dst_type, sizeof src_type) != 0) {
397 struct lp_build_context bld;
398 double src_min = lp_const_min(src_type);
399 double dst_min = lp_const_min(dst_type);
400 double src_max = lp_const_max(src_type);
401 double dst_max = lp_const_max(dst_type);
402 LLVMValueRef thres;
403
404 lp_build_context_init(&bld, gallivm, tmp_type);
405
406 if(src_min < dst_min) {
407 if(dst_min == 0.0)
408 thres = bld.zero;
409 else
410 thres = lp_build_const_vec(gallivm, src_type, dst_min);
411 for(i = 0; i < num_tmps; ++i)
412 tmp[i] = lp_build_max(&bld, tmp[i], thres);
413 }
414
415 if(src_max > dst_max) {
416 if(dst_max == 1.0)
417 thres = bld.one;
418 else
419 thres = lp_build_const_vec(gallivm, src_type, dst_max);
420 for(i = 0; i < num_tmps; ++i)
421 tmp[i] = lp_build_min(&bld, tmp[i], thres);
422 }
423 }
424
425 /*
426 * Scale to the narrowest range
427 */
428
429 if(dst_type.floating) {
430 /* Nothing to do */
431 }
432 else if(tmp_type.floating) {
433 if(!dst_type.fixed && !dst_type.sign && dst_type.norm) {
434 for(i = 0; i < num_tmps; ++i) {
435 tmp[i] = lp_build_clamped_float_to_unsigned_norm(gallivm,
436 tmp_type,
437 dst_type.width,
438 tmp[i]);
439 }
440 tmp_type.floating = FALSE;
441 }
442 else {
443 double dst_scale = lp_const_scale(dst_type);
444 LLVMTypeRef tmp_vec_type;
445
446 if (dst_scale != 1.0) {
447 LLVMValueRef scale = lp_build_const_vec(gallivm, tmp_type, dst_scale);
448 for(i = 0; i < num_tmps; ++i)
449 tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
450 }
451
452 /* Use an equally sized integer for intermediate computations */
453 tmp_type.floating = FALSE;
454 tmp_vec_type = lp_build_vec_type(gallivm, tmp_type);
455 for(i = 0; i < num_tmps; ++i) {
456 #if 0
457 if(dst_type.sign)
458 tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
459 else
460 tmp[i] = LLVMBuildFPToUI(builder, tmp[i], tmp_vec_type, "");
461 #else
462 /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
463 tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
464 #endif
465 }
466 }
467 }
468 else {
469 unsigned src_shift = lp_const_shift(src_type);
470 unsigned dst_shift = lp_const_shift(dst_type);
471
472 /* FIXME: compensate different offsets too */
473 if(src_shift > dst_shift) {
474 LLVMValueRef shift = lp_build_const_int_vec(gallivm, tmp_type,
475 src_shift - dst_shift);
476 for(i = 0; i < num_tmps; ++i)
477 if(src_type.sign)
478 tmp[i] = LLVMBuildAShr(builder, tmp[i], shift, "");
479 else
480 tmp[i] = LLVMBuildLShr(builder, tmp[i], shift, "");
481 }
482 }
483
484 /*
485 * Truncate or expand bit width
486 *
487 * No data conversion should happen here, although the sign bits are
488 * crucial to avoid bad clamping.
489 */
490
491 {
492 struct lp_type new_type;
493
494 new_type = tmp_type;
495 new_type.sign = dst_type.sign;
496 new_type.width = dst_type.width;
497 new_type.length = dst_type.length;
498
499 lp_build_resize(gallivm, tmp_type, new_type, tmp, num_srcs, tmp, num_dsts);
500
501 tmp_type = new_type;
502 num_tmps = num_dsts;
503 }
504
505 /*
506 * Scale to the widest range
507 */
508
509 if(src_type.floating) {
510 /* Nothing to do */
511 }
512 else if(!src_type.floating && dst_type.floating) {
513 if(!src_type.fixed && !src_type.sign && src_type.norm) {
514 for(i = 0; i < num_tmps; ++i) {
515 tmp[i] = lp_build_unsigned_norm_to_float(gallivm,
516 src_type.width,
517 dst_type,
518 tmp[i]);
519 }
520 tmp_type.floating = TRUE;
521 }
522 else {
523 double src_scale = lp_const_scale(src_type);
524 LLVMTypeRef tmp_vec_type;
525
526 /* Use an equally sized integer for intermediate computations */
527 tmp_type.floating = TRUE;
528 tmp_type.sign = TRUE;
529 tmp_vec_type = lp_build_vec_type(gallivm, tmp_type);
530 for(i = 0; i < num_tmps; ++i) {
531 #if 0
532 if(dst_type.sign)
533 tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
534 else
535 tmp[i] = LLVMBuildUIToFP(builder, tmp[i], tmp_vec_type, "");
536 #else
537 /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
538 tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
539 #endif
540 }
541
542 if (src_scale != 1.0) {
543 LLVMValueRef scale = lp_build_const_vec(gallivm, tmp_type, 1.0/src_scale);
544 for(i = 0; i < num_tmps; ++i)
545 tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
546 }
547 }
548 }
549 else {
550 unsigned src_shift = lp_const_shift(src_type);
551 unsigned dst_shift = lp_const_shift(dst_type);
552
553 /* FIXME: compensate different offsets too */
554 if(src_shift < dst_shift) {
555 LLVMValueRef shift = lp_build_const_int_vec(gallivm, tmp_type, dst_shift - src_shift);
556 for(i = 0; i < num_tmps; ++i)
557 tmp[i] = LLVMBuildShl(builder, tmp[i], shift, "");
558 }
559 }
560
561 for(i = 0; i < num_dsts; ++i) {
562 dst[i] = tmp[i];
563 assert(lp_check_value(dst_type, dst[i]));
564 }
565 }
566
567
568 /**
569 * Bit mask conversion.
570 *
571 * This will convert the integer masks that match the given types.
572 *
573 * The mask values should 0 or -1, i.e., all bits either set to zero or one.
574 * Any other value will likely cause in unpredictable results.
575 *
576 * This is basically a very trimmed down version of lp_build_conv.
577 */
578 void
579 lp_build_conv_mask(struct gallivm_state *gallivm,
580 struct lp_type src_type,
581 struct lp_type dst_type,
582 const LLVMValueRef *src, unsigned num_srcs,
583 LLVMValueRef *dst, unsigned num_dsts)
584 {
585 /* Register width must remain constant */
586 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
587
588 /* We must not loose or gain channels. Only precision */
589 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
590
591 /*
592 * Drop
593 *
594 * We assume all values are 0 or -1
595 */
596
597 src_type.floating = FALSE;
598 src_type.fixed = FALSE;
599 src_type.sign = TRUE;
600 src_type.norm = FALSE;
601
602 dst_type.floating = FALSE;
603 dst_type.fixed = FALSE;
604 dst_type.sign = TRUE;
605 dst_type.norm = FALSE;
606
607 /*
608 * Truncate or expand bit width
609 */
610
611 if(src_type.width > dst_type.width) {
612 assert(num_dsts == 1);
613 dst[0] = lp_build_pack(gallivm, src_type, dst_type, TRUE, src, num_srcs);
614 }
615 else if(src_type.width < dst_type.width) {
616 assert(num_srcs == 1);
617 lp_build_unpack(gallivm, src_type, dst_type, src[0], dst, num_dsts);
618 }
619 else {
620 assert(num_srcs == num_dsts);
621 memcpy(dst, src, num_dsts * sizeof *dst);
622 }
623 }