gallivm: More accurate float -> 24bit & 32bit unorm conversion.
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_conv.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper functions for type conversions.
32 *
33 * We want to use the fastest type for a given computation whenever feasible.
34 * The other side of this is that we need to be able convert between several
35 * types accurately and efficiently.
36 *
37 * Conversion between types of different bit width is quite complex since a
38 *
39 * To remember there are a few invariants in type conversions:
40 *
41 * - register width must remain constant:
42 *
43 * src_type.width * src_type.length == dst_type.width * dst_type.length
44 *
45 * - total number of elements must remain constant:
46 *
47 * src_type.length * num_srcs == dst_type.length * num_dsts
48 *
49 * It is not always possible to do the conversion both accurately and
50 * efficiently, usually due to lack of adequate machine instructions. In these
51 * cases it is important not to cut shortcuts here and sacrifice accuracy, as
52 * there this functions can be used anywhere. In the future we might have a
53 * precision parameter which can gauge the accuracy vs efficiency compromise,
54 * but for now if the data conversion between two stages happens to be the
55 * bottleneck, then most likely should just avoid converting at all and run
56 * both stages with the same type.
57 *
58 * Make sure to run lp_test_conv unit test after any change to this file.
59 *
60 * @author Jose Fonseca <jfonseca@vmware.com>
61 */
62
63
64 #include "util/u_debug.h"
65 #include "util/u_math.h"
66 #include "util/u_cpu_detect.h"
67
68 #include "lp_bld_type.h"
69 #include "lp_bld_const.h"
70 #include "lp_bld_arit.h"
71 #include "lp_bld_pack.h"
72 #include "lp_bld_conv.h"
73
74
75 /**
76 * Special case for converting clamped IEEE-754 floats to unsigned norms.
77 *
78 * The mathematical voodoo below may seem excessive but it is actually
79 * paramount we do it this way for several reasons. First, there is no single
80 * precision FP to unsigned integer conversion Intel SSE instruction. Second,
81 * secondly, even if there was, since the FP's mantissa takes only a fraction
82 * of register bits the typically scale and cast approach would require double
83 * precision for accurate results, and therefore half the throughput
84 *
85 * Although the result values can be scaled to an arbitrary bit width specified
86 * by dst_width, the actual result type will have the same width.
87 *
88 * Ex: src = { float, float, float, float }
89 * return { i32, i32, i32, i32 } where each value is in [0, 2^dst_width-1].
90 */
91 LLVMValueRef
92 lp_build_clamped_float_to_unsigned_norm(LLVMBuilderRef builder,
93 struct lp_type src_type,
94 unsigned dst_width,
95 LLVMValueRef src)
96 {
97 LLVMTypeRef int_vec_type = lp_build_int_vec_type(src_type);
98 LLVMValueRef res;
99 unsigned mantissa;
100
101 assert(src_type.floating);
102 assert(dst_width <= src_type.width);
103 src_type.sign = FALSE;
104
105 mantissa = lp_mantissa(src_type);
106
107 if (dst_width <= mantissa) {
108 /*
109 * Apply magic coefficients that will make the desired result to appear
110 * in the lowest significant bits of the mantissa, with correct rounding.
111 *
112 * This only works if the destination width fits in the mantissa.
113 */
114
115 unsigned long long ubound;
116 unsigned long long mask;
117 double scale;
118 double bias;
119
120 ubound = (1ULL << dst_width);
121 mask = ubound - 1;
122 scale = (double)mask/ubound;
123 bias = (double)(1ULL << (mantissa - dst_width));
124
125 res = LLVMBuildFMul(builder, src, lp_build_const_vec(src_type, scale), "");
126 res = LLVMBuildFAdd(builder, res, lp_build_const_vec(src_type, bias), "");
127 res = LLVMBuildBitCast(builder, res, int_vec_type, "");
128 res = LLVMBuildAnd(builder, res, lp_build_const_int_vec(src_type, mask), "");
129 }
130 else if (dst_width == (mantissa + 1)) {
131 /*
132 * The destination width matches exactly what can be represented in
133 * floating point (i.e., mantissa + 1 bits). So do a straight
134 * multiplication followed by casting. No further rounding is necessary.
135 */
136
137 double scale;
138
139 scale = (double)((1ULL << dst_width) - 1);
140
141 res = LLVMBuildFMul(builder, src, lp_build_const_vec(src_type, scale), "");
142 res = LLVMBuildFPToSI(builder, res, int_vec_type, "");
143 }
144 else {
145 /*
146 * The destination exceeds what can be represented in the floating point.
147 * So multiply by the largest power two we get away with, and when
148 * subtract the most significant bit to rescale to normalized values.
149 *
150 * The largest power of two factor we can get away is
151 * (1 << (src_type.width - 1)), because we need to use signed . In theory it
152 * should be (1 << (src_type.width - 2)), but IEEE 754 rules states
153 * INT_MIN should be returned in FPToSI, which is the correct result for
154 * values near 1.0!
155 *
156 * This means we get (src_type.width - 1) correct bits for values near 0.0,
157 * and (mantissa + 1) correct bits for values near 1.0. Equally or more
158 * important, we also get exact results for 0.0 and 1.0.
159 */
160
161 unsigned n = MIN2(src_type.width - 1, dst_width);
162
163 double scale = (double)(1ULL << n);
164 unsigned lshift = dst_width - n;
165 unsigned rshift = n;
166 LLVMValueRef lshifted;
167 LLVMValueRef rshifted;
168
169 res = LLVMBuildFMul(builder, src, lp_build_const_vec(src_type, scale), "");
170 res = LLVMBuildFPToSI(builder, res, int_vec_type, "");
171
172 /*
173 * Align the most significant bit to its final place.
174 *
175 * This will cause 1.0 to overflow to 0, but the later adjustment will
176 * get it right.
177 */
178 if (lshift) {
179 lshifted = LLVMBuildShl(builder, res,
180 lp_build_const_int_vec(src_type, lshift), "");
181 } else {
182 lshifted = res;
183 }
184
185 /*
186 * Align the most significant bit to the right.
187 */
188 rshifted = LLVMBuildAShr(builder, res,
189 lp_build_const_int_vec(src_type, rshift), "");
190
191 /*
192 * Subtract the MSB to the LSB, therefore re-scaling from
193 * (1 << dst_width) to ((1 << dst_width) - 1).
194 */
195
196 res = LLVMBuildSub(builder, lshifted, rshifted, "");
197 }
198
199 return res;
200 }
201
202
203 /**
204 * Inverse of lp_build_clamped_float_to_unsigned_norm above.
205 * Ex: src = { i32, i32, i32, i32 } with values in range [0, 2^src_width-1]
206 * return {float, float, float, float} with values in range [0, 1].
207 */
208 LLVMValueRef
209 lp_build_unsigned_norm_to_float(LLVMBuilderRef builder,
210 unsigned src_width,
211 struct lp_type dst_type,
212 LLVMValueRef src)
213 {
214 LLVMTypeRef vec_type = lp_build_vec_type(dst_type);
215 LLVMTypeRef int_vec_type = lp_build_int_vec_type(dst_type);
216 LLVMValueRef bias_;
217 LLVMValueRef res;
218 unsigned mantissa;
219 unsigned n;
220 unsigned long long ubound;
221 unsigned long long mask;
222 double scale;
223 double bias;
224
225 assert(dst_type.floating);
226
227 /* Special-case int8->float, though most cases could be handled
228 * this way:
229 */
230 if (src_width == 8) {
231 scale = 1.0/255.0;
232 res = LLVMBuildSIToFP(builder, src, vec_type, "");
233 res = LLVMBuildFMul(builder, res, lp_build_const_vec(dst_type, scale), "");
234 return res;
235 }
236
237 mantissa = lp_mantissa(dst_type);
238
239 n = MIN2(mantissa, src_width);
240
241 ubound = ((unsigned long long)1 << n);
242 mask = ubound - 1;
243 scale = (double)ubound/mask;
244 bias = (double)((unsigned long long)1 << (mantissa - n));
245
246 res = src;
247
248 if(src_width > mantissa) {
249 int shift = src_width - mantissa;
250 res = LLVMBuildLShr(builder, res, lp_build_const_int_vec(dst_type, shift), "");
251 }
252
253 bias_ = lp_build_const_vec(dst_type, bias);
254
255 res = LLVMBuildOr(builder,
256 res,
257 LLVMBuildBitCast(builder, bias_, int_vec_type, ""), "");
258
259 res = LLVMBuildBitCast(builder, res, vec_type, "");
260
261 res = LLVMBuildFSub(builder, res, bias_, "");
262 res = LLVMBuildFMul(builder, res, lp_build_const_vec(dst_type, scale), "");
263
264 return res;
265 }
266
267
268 /**
269 * Generic type conversion.
270 *
271 * TODO: Take a precision argument, or even better, add a new precision member
272 * to the lp_type union.
273 */
274 void
275 lp_build_conv(LLVMBuilderRef builder,
276 struct lp_type src_type,
277 struct lp_type dst_type,
278 const LLVMValueRef *src, unsigned num_srcs,
279 LLVMValueRef *dst, unsigned num_dsts)
280 {
281 struct lp_type tmp_type;
282 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
283 unsigned num_tmps;
284 unsigned i;
285
286 /* We must not loose or gain channels. Only precision */
287 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
288
289 assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
290 assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
291 assert(num_srcs <= LP_MAX_VECTOR_LENGTH);
292 assert(num_dsts <= LP_MAX_VECTOR_LENGTH);
293
294 tmp_type = src_type;
295 for(i = 0; i < num_srcs; ++i) {
296 assert(lp_check_value(src_type, src[i]));
297 tmp[i] = src[i];
298 }
299 num_tmps = num_srcs;
300
301
302 /* Special case 4x4f --> 1x16ub
303 */
304 if (src_type.floating == 1 &&
305 src_type.fixed == 0 &&
306 src_type.sign == 1 &&
307 src_type.norm == 0 &&
308 src_type.width == 32 &&
309 src_type.length == 4 &&
310
311 dst_type.floating == 0 &&
312 dst_type.fixed == 0 &&
313 dst_type.sign == 0 &&
314 dst_type.norm == 1 &&
315 dst_type.width == 8 &&
316 dst_type.length == 16 &&
317
318 util_cpu_caps.has_sse2)
319 {
320 int i;
321
322 for (i = 0; i < num_dsts; i++, src += 4) {
323 struct lp_type int16_type = dst_type;
324 struct lp_type int32_type = dst_type;
325 LLVMValueRef lo, hi;
326 LLVMValueRef src_int0;
327 LLVMValueRef src_int1;
328 LLVMValueRef src_int2;
329 LLVMValueRef src_int3;
330 LLVMTypeRef int16_vec_type;
331 LLVMTypeRef int32_vec_type;
332 LLVMTypeRef src_vec_type;
333 LLVMTypeRef dst_vec_type;
334 LLVMValueRef const_255f;
335 LLVMValueRef a, b, c, d;
336
337 int16_type.width *= 2;
338 int16_type.length /= 2;
339 int16_type.sign = 1;
340
341 int32_type.width *= 4;
342 int32_type.length /= 4;
343 int32_type.sign = 1;
344
345 src_vec_type = lp_build_vec_type(src_type);
346 dst_vec_type = lp_build_vec_type(dst_type);
347 int16_vec_type = lp_build_vec_type(int16_type);
348 int32_vec_type = lp_build_vec_type(int32_type);
349
350 const_255f = lp_build_const_vec(src_type, 255.0f);
351
352 a = LLVMBuildFMul(builder, src[0], const_255f, "");
353 b = LLVMBuildFMul(builder, src[1], const_255f, "");
354 c = LLVMBuildFMul(builder, src[2], const_255f, "");
355 d = LLVMBuildFMul(builder, src[3], const_255f, "");
356
357 {
358 struct lp_build_context bld;
359
360 bld.builder = builder;
361 bld.type = src_type;
362 bld.vec_type = src_vec_type;
363 bld.int_elem_type = lp_build_elem_type(int32_type);
364 bld.int_vec_type = int32_vec_type;
365 bld.undef = lp_build_undef(src_type);
366 bld.zero = lp_build_zero(src_type);
367 bld.one = lp_build_one(src_type);
368
369 src_int0 = lp_build_iround(&bld, a);
370 src_int1 = lp_build_iround(&bld, b);
371 src_int2 = lp_build_iround(&bld, c);
372 src_int3 = lp_build_iround(&bld, d);
373 }
374 /* relying on clamping behavior of sse2 intrinsics here */
375 lo = lp_build_pack2(builder, int32_type, int16_type, src_int0, src_int1);
376 hi = lp_build_pack2(builder, int32_type, int16_type, src_int2, src_int3);
377 dst[i] = lp_build_pack2(builder, int16_type, dst_type, lo, hi);
378 }
379 return;
380 }
381
382 /*
383 * Clamp if necessary
384 */
385
386 if(memcmp(&src_type, &dst_type, sizeof src_type) != 0) {
387 struct lp_build_context bld;
388 double src_min = lp_const_min(src_type);
389 double dst_min = lp_const_min(dst_type);
390 double src_max = lp_const_max(src_type);
391 double dst_max = lp_const_max(dst_type);
392 LLVMValueRef thres;
393
394 lp_build_context_init(&bld, builder, tmp_type);
395
396 if(src_min < dst_min) {
397 if(dst_min == 0.0)
398 thres = bld.zero;
399 else
400 thres = lp_build_const_vec(src_type, dst_min);
401 for(i = 0; i < num_tmps; ++i)
402 tmp[i] = lp_build_max(&bld, tmp[i], thres);
403 }
404
405 if(src_max > dst_max) {
406 if(dst_max == 1.0)
407 thres = bld.one;
408 else
409 thres = lp_build_const_vec(src_type, dst_max);
410 for(i = 0; i < num_tmps; ++i)
411 tmp[i] = lp_build_min(&bld, tmp[i], thres);
412 }
413 }
414
415 /*
416 * Scale to the narrowest range
417 */
418
419 if(dst_type.floating) {
420 /* Nothing to do */
421 }
422 else if(tmp_type.floating) {
423 if(!dst_type.fixed && !dst_type.sign && dst_type.norm) {
424 for(i = 0; i < num_tmps; ++i) {
425 tmp[i] = lp_build_clamped_float_to_unsigned_norm(builder,
426 tmp_type,
427 dst_type.width,
428 tmp[i]);
429 }
430 tmp_type.floating = FALSE;
431 }
432 else {
433 double dst_scale = lp_const_scale(dst_type);
434 LLVMTypeRef tmp_vec_type;
435
436 if (dst_scale != 1.0) {
437 LLVMValueRef scale = lp_build_const_vec(tmp_type, dst_scale);
438 for(i = 0; i < num_tmps; ++i)
439 tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
440 }
441
442 /* Use an equally sized integer for intermediate computations */
443 tmp_type.floating = FALSE;
444 tmp_vec_type = lp_build_vec_type(tmp_type);
445 for(i = 0; i < num_tmps; ++i) {
446 #if 0
447 if(dst_type.sign)
448 tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
449 else
450 tmp[i] = LLVMBuildFPToUI(builder, tmp[i], tmp_vec_type, "");
451 #else
452 /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
453 tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
454 #endif
455 }
456 }
457 }
458 else {
459 unsigned src_shift = lp_const_shift(src_type);
460 unsigned dst_shift = lp_const_shift(dst_type);
461
462 /* FIXME: compensate different offsets too */
463 if(src_shift > dst_shift) {
464 LLVMValueRef shift = lp_build_const_int_vec(tmp_type, src_shift - dst_shift);
465 for(i = 0; i < num_tmps; ++i)
466 if(src_type.sign)
467 tmp[i] = LLVMBuildAShr(builder, tmp[i], shift, "");
468 else
469 tmp[i] = LLVMBuildLShr(builder, tmp[i], shift, "");
470 }
471 }
472
473 /*
474 * Truncate or expand bit width
475 *
476 * No data conversion should happen here, although the sign bits are
477 * crucial to avoid bad clamping.
478 */
479
480 {
481 struct lp_type new_type;
482
483 new_type = tmp_type;
484 new_type.sign = dst_type.sign;
485 new_type.width = dst_type.width;
486 new_type.length = dst_type.length;
487
488 lp_build_resize(builder, tmp_type, new_type, tmp, num_srcs, tmp, num_dsts);
489
490 tmp_type = new_type;
491 num_tmps = num_dsts;
492 }
493
494 /*
495 * Scale to the widest range
496 */
497
498 if(src_type.floating) {
499 /* Nothing to do */
500 }
501 else if(!src_type.floating && dst_type.floating) {
502 if(!src_type.fixed && !src_type.sign && src_type.norm) {
503 for(i = 0; i < num_tmps; ++i) {
504 tmp[i] = lp_build_unsigned_norm_to_float(builder,
505 src_type.width,
506 dst_type,
507 tmp[i]);
508 }
509 tmp_type.floating = TRUE;
510 }
511 else {
512 double src_scale = lp_const_scale(src_type);
513 LLVMTypeRef tmp_vec_type;
514
515 /* Use an equally sized integer for intermediate computations */
516 tmp_type.floating = TRUE;
517 tmp_type.sign = TRUE;
518 tmp_vec_type = lp_build_vec_type(tmp_type);
519 for(i = 0; i < num_tmps; ++i) {
520 #if 0
521 if(dst_type.sign)
522 tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
523 else
524 tmp[i] = LLVMBuildUIToFP(builder, tmp[i], tmp_vec_type, "");
525 #else
526 /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
527 tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
528 #endif
529 }
530
531 if (src_scale != 1.0) {
532 LLVMValueRef scale = lp_build_const_vec(tmp_type, 1.0/src_scale);
533 for(i = 0; i < num_tmps; ++i)
534 tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
535 }
536 }
537 }
538 else {
539 unsigned src_shift = lp_const_shift(src_type);
540 unsigned dst_shift = lp_const_shift(dst_type);
541
542 /* FIXME: compensate different offsets too */
543 if(src_shift < dst_shift) {
544 LLVMValueRef shift = lp_build_const_int_vec(tmp_type, dst_shift - src_shift);
545 for(i = 0; i < num_tmps; ++i)
546 tmp[i] = LLVMBuildShl(builder, tmp[i], shift, "");
547 }
548 }
549
550 for(i = 0; i < num_dsts; ++i) {
551 dst[i] = tmp[i];
552 assert(lp_check_value(dst_type, dst[i]));
553 }
554 }
555
556
557 /**
558 * Bit mask conversion.
559 *
560 * This will convert the integer masks that match the given types.
561 *
562 * The mask values should 0 or -1, i.e., all bits either set to zero or one.
563 * Any other value will likely cause in unpredictable results.
564 *
565 * This is basically a very trimmed down version of lp_build_conv.
566 */
567 void
568 lp_build_conv_mask(LLVMBuilderRef builder,
569 struct lp_type src_type,
570 struct lp_type dst_type,
571 const LLVMValueRef *src, unsigned num_srcs,
572 LLVMValueRef *dst, unsigned num_dsts)
573 {
574 /* Register width must remain constant */
575 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
576
577 /* We must not loose or gain channels. Only precision */
578 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
579
580 /*
581 * Drop
582 *
583 * We assume all values are 0 or -1
584 */
585
586 src_type.floating = FALSE;
587 src_type.fixed = FALSE;
588 src_type.sign = TRUE;
589 src_type.norm = FALSE;
590
591 dst_type.floating = FALSE;
592 dst_type.fixed = FALSE;
593 dst_type.sign = TRUE;
594 dst_type.norm = FALSE;
595
596 /*
597 * Truncate or expand bit width
598 */
599
600 if(src_type.width > dst_type.width) {
601 assert(num_dsts == 1);
602 dst[0] = lp_build_pack(builder, src_type, dst_type, TRUE, src, num_srcs);
603 }
604 else if(src_type.width < dst_type.width) {
605 assert(num_srcs == 1);
606 lp_build_unpack(builder, src_type, dst_type, src[0], dst, num_dsts);
607 }
608 else {
609 assert(num_srcs == num_dsts);
610 memcpy(dst, src, num_dsts * sizeof *dst);
611 }
612 }