1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 * Helper functions for type conversions.
33 * We want to use the fastest type for a given computation whenever feasible.
34 * The other side of this is that we need to be able convert between several
35 * types accurately and efficiently.
37 * Conversion between types of different bit width is quite complex since a
39 * To remember there are a few invariants in type conversions:
41 * - register width must remain constant:
43 * src_type.width * src_type.length == dst_type.width * dst_type.length
45 * - total number of elements must remain constant:
47 * src_type.length * num_srcs == dst_type.length * num_dsts
49 * It is not always possible to do the conversion both accurately and
50 * efficiently, usually due to lack of adequate machine instructions. In these
51 * cases it is important not to cut shortcuts here and sacrifice accuracy, as
52 * there this functions can be used anywhere. In the future we might have a
53 * precision parameter which can gauge the accuracy vs efficiency compromise,
54 * but for now if the data conversion between two stages happens to be the
55 * bottleneck, then most likely should just avoid converting at all and run
56 * both stages with the same type.
58 * Make sure to run lp_test_conv unit test after any change to this file.
60 * @author Jose Fonseca <jfonseca@vmware.com>
64 #include "util/u_debug.h"
65 #include "util/u_math.h"
66 #include "util/u_cpu_detect.h"
68 #include "lp_bld_type.h"
69 #include "lp_bld_const.h"
70 #include "lp_bld_arit.h"
71 #include "lp_bld_pack.h"
72 #include "lp_bld_conv.h"
76 * Special case for converting clamped IEEE-754 floats to unsigned norms.
78 * The mathematical voodoo below may seem excessive but it is actually
79 * paramount we do it this way for several reasons. First, there is no single
80 * precision FP to unsigned integer conversion Intel SSE instruction. Second,
81 * secondly, even if there was, since the FP's mantissa takes only a fraction
82 * of register bits the typically scale and cast approach would require double
83 * precision for accurate results, and therefore half the throughput
85 * Although the result values can be scaled to an arbitrary bit width specified
86 * by dst_width, the actual result type will have the same width.
88 * Ex: src = { float, float, float, float }
89 * return { i32, i32, i32, i32 } where each value is in [0, 2^dst_width-1].
92 lp_build_clamped_float_to_unsigned_norm(LLVMBuilderRef builder
,
93 struct lp_type src_type
,
97 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(src_type
);
101 assert(src_type
.floating
);
102 assert(dst_width
<= src_type
.width
);
103 src_type
.sign
= FALSE
;
105 mantissa
= lp_mantissa(src_type
);
107 if (dst_width
<= mantissa
) {
109 * Apply magic coefficients that will make the desired result to appear
110 * in the lowest significant bits of the mantissa, with correct rounding.
112 * This only works if the destination width fits in the mantissa.
115 unsigned long long ubound
;
116 unsigned long long mask
;
120 ubound
= (1ULL << dst_width
);
122 scale
= (double)mask
/ubound
;
123 bias
= (double)(1ULL << (mantissa
- dst_width
));
125 res
= LLVMBuildFMul(builder
, src
, lp_build_const_vec(src_type
, scale
), "");
126 res
= LLVMBuildFAdd(builder
, res
, lp_build_const_vec(src_type
, bias
), "");
127 res
= LLVMBuildBitCast(builder
, res
, int_vec_type
, "");
128 res
= LLVMBuildAnd(builder
, res
, lp_build_const_int_vec(src_type
, mask
), "");
130 else if (dst_width
== (mantissa
+ 1)) {
132 * The destination width matches exactly what can be represented in
133 * floating point (i.e., mantissa + 1 bits). So do a straight
134 * multiplication followed by casting. No further rounding is necessary.
139 scale
= (double)((1ULL << dst_width
) - 1);
141 res
= LLVMBuildFMul(builder
, src
, lp_build_const_vec(src_type
, scale
), "");
142 res
= LLVMBuildFPToSI(builder
, res
, int_vec_type
, "");
146 * The destination exceeds what can be represented in the floating point.
147 * So multiply by the largest power two we get away with, and when
148 * subtract the most significant bit to rescale to normalized values.
150 * The largest power of two factor we can get away is
151 * (1 << (src_type.width - 1)), because we need to use signed . In theory it
152 * should be (1 << (src_type.width - 2)), but IEEE 754 rules states
153 * INT_MIN should be returned in FPToSI, which is the correct result for
156 * This means we get (src_type.width - 1) correct bits for values near 0.0,
157 * and (mantissa + 1) correct bits for values near 1.0. Equally or more
158 * important, we also get exact results for 0.0 and 1.0.
161 unsigned n
= MIN2(src_type
.width
- 1, dst_width
);
163 double scale
= (double)(1ULL << n
);
164 unsigned lshift
= dst_width
- n
;
166 LLVMValueRef lshifted
;
167 LLVMValueRef rshifted
;
169 res
= LLVMBuildFMul(builder
, src
, lp_build_const_vec(src_type
, scale
), "");
170 res
= LLVMBuildFPToSI(builder
, res
, int_vec_type
, "");
173 * Align the most significant bit to its final place.
175 * This will cause 1.0 to overflow to 0, but the later adjustment will
179 lshifted
= LLVMBuildShl(builder
, res
,
180 lp_build_const_int_vec(src_type
, lshift
), "");
186 * Align the most significant bit to the right.
188 rshifted
= LLVMBuildAShr(builder
, res
,
189 lp_build_const_int_vec(src_type
, rshift
), "");
192 * Subtract the MSB to the LSB, therefore re-scaling from
193 * (1 << dst_width) to ((1 << dst_width) - 1).
196 res
= LLVMBuildSub(builder
, lshifted
, rshifted
, "");
204 * Inverse of lp_build_clamped_float_to_unsigned_norm above.
205 * Ex: src = { i32, i32, i32, i32 } with values in range [0, 2^src_width-1]
206 * return {float, float, float, float} with values in range [0, 1].
209 lp_build_unsigned_norm_to_float(LLVMBuilderRef builder
,
211 struct lp_type dst_type
,
214 LLVMTypeRef vec_type
= lp_build_vec_type(dst_type
);
215 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(dst_type
);
220 unsigned long long ubound
;
221 unsigned long long mask
;
225 assert(dst_type
.floating
);
227 /* Special-case int8->float, though most cases could be handled
230 if (src_width
== 8) {
232 res
= LLVMBuildSIToFP(builder
, src
, vec_type
, "");
233 res
= LLVMBuildFMul(builder
, res
, lp_build_const_vec(dst_type
, scale
), "");
237 mantissa
= lp_mantissa(dst_type
);
239 n
= MIN2(mantissa
, src_width
);
241 ubound
= ((unsigned long long)1 << n
);
243 scale
= (double)ubound
/mask
;
244 bias
= (double)((unsigned long long)1 << (mantissa
- n
));
248 if(src_width
> mantissa
) {
249 int shift
= src_width
- mantissa
;
250 res
= LLVMBuildLShr(builder
, res
, lp_build_const_int_vec(dst_type
, shift
), "");
253 bias_
= lp_build_const_vec(dst_type
, bias
);
255 res
= LLVMBuildOr(builder
,
257 LLVMBuildBitCast(builder
, bias_
, int_vec_type
, ""), "");
259 res
= LLVMBuildBitCast(builder
, res
, vec_type
, "");
261 res
= LLVMBuildFSub(builder
, res
, bias_
, "");
262 res
= LLVMBuildFMul(builder
, res
, lp_build_const_vec(dst_type
, scale
), "");
269 * Generic type conversion.
271 * TODO: Take a precision argument, or even better, add a new precision member
272 * to the lp_type union.
275 lp_build_conv(LLVMBuilderRef builder
,
276 struct lp_type src_type
,
277 struct lp_type dst_type
,
278 const LLVMValueRef
*src
, unsigned num_srcs
,
279 LLVMValueRef
*dst
, unsigned num_dsts
)
281 struct lp_type tmp_type
;
282 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
286 /* We must not loose or gain channels. Only precision */
287 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
289 assert(src_type
.length
<= LP_MAX_VECTOR_LENGTH
);
290 assert(dst_type
.length
<= LP_MAX_VECTOR_LENGTH
);
291 assert(num_srcs
<= LP_MAX_VECTOR_LENGTH
);
292 assert(num_dsts
<= LP_MAX_VECTOR_LENGTH
);
295 for(i
= 0; i
< num_srcs
; ++i
) {
296 assert(lp_check_value(src_type
, src
[i
]));
302 /* Special case 4x4f --> 1x16ub
304 if (src_type
.floating
== 1 &&
305 src_type
.fixed
== 0 &&
306 src_type
.sign
== 1 &&
307 src_type
.norm
== 0 &&
308 src_type
.width
== 32 &&
309 src_type
.length
== 4 &&
311 dst_type
.floating
== 0 &&
312 dst_type
.fixed
== 0 &&
313 dst_type
.sign
== 0 &&
314 dst_type
.norm
== 1 &&
315 dst_type
.width
== 8 &&
316 dst_type
.length
== 16 &&
318 util_cpu_caps
.has_sse2
)
322 for (i
= 0; i
< num_dsts
; i
++, src
+= 4) {
323 struct lp_type int16_type
= dst_type
;
324 struct lp_type int32_type
= dst_type
;
326 LLVMValueRef src_int0
;
327 LLVMValueRef src_int1
;
328 LLVMValueRef src_int2
;
329 LLVMValueRef src_int3
;
330 LLVMTypeRef int16_vec_type
;
331 LLVMTypeRef int32_vec_type
;
332 LLVMTypeRef src_vec_type
;
333 LLVMTypeRef dst_vec_type
;
334 LLVMValueRef const_255f
;
335 LLVMValueRef a
, b
, c
, d
;
337 int16_type
.width
*= 2;
338 int16_type
.length
/= 2;
341 int32_type
.width
*= 4;
342 int32_type
.length
/= 4;
345 src_vec_type
= lp_build_vec_type(src_type
);
346 dst_vec_type
= lp_build_vec_type(dst_type
);
347 int16_vec_type
= lp_build_vec_type(int16_type
);
348 int32_vec_type
= lp_build_vec_type(int32_type
);
350 const_255f
= lp_build_const_vec(src_type
, 255.0f
);
352 a
= LLVMBuildFMul(builder
, src
[0], const_255f
, "");
353 b
= LLVMBuildFMul(builder
, src
[1], const_255f
, "");
354 c
= LLVMBuildFMul(builder
, src
[2], const_255f
, "");
355 d
= LLVMBuildFMul(builder
, src
[3], const_255f
, "");
358 struct lp_build_context bld
;
360 bld
.builder
= builder
;
362 bld
.vec_type
= src_vec_type
;
363 bld
.int_elem_type
= lp_build_elem_type(int32_type
);
364 bld
.int_vec_type
= int32_vec_type
;
365 bld
.undef
= lp_build_undef(src_type
);
366 bld
.zero
= lp_build_zero(src_type
);
367 bld
.one
= lp_build_one(src_type
);
369 src_int0
= lp_build_iround(&bld
, a
);
370 src_int1
= lp_build_iround(&bld
, b
);
371 src_int2
= lp_build_iround(&bld
, c
);
372 src_int3
= lp_build_iround(&bld
, d
);
374 /* relying on clamping behavior of sse2 intrinsics here */
375 lo
= lp_build_pack2(builder
, int32_type
, int16_type
, src_int0
, src_int1
);
376 hi
= lp_build_pack2(builder
, int32_type
, int16_type
, src_int2
, src_int3
);
377 dst
[i
] = lp_build_pack2(builder
, int16_type
, dst_type
, lo
, hi
);
386 if(memcmp(&src_type
, &dst_type
, sizeof src_type
) != 0) {
387 struct lp_build_context bld
;
388 double src_min
= lp_const_min(src_type
);
389 double dst_min
= lp_const_min(dst_type
);
390 double src_max
= lp_const_max(src_type
);
391 double dst_max
= lp_const_max(dst_type
);
394 lp_build_context_init(&bld
, builder
, tmp_type
);
396 if(src_min
< dst_min
) {
400 thres
= lp_build_const_vec(src_type
, dst_min
);
401 for(i
= 0; i
< num_tmps
; ++i
)
402 tmp
[i
] = lp_build_max(&bld
, tmp
[i
], thres
);
405 if(src_max
> dst_max
) {
409 thres
= lp_build_const_vec(src_type
, dst_max
);
410 for(i
= 0; i
< num_tmps
; ++i
)
411 tmp
[i
] = lp_build_min(&bld
, tmp
[i
], thres
);
416 * Scale to the narrowest range
419 if(dst_type
.floating
) {
422 else if(tmp_type
.floating
) {
423 if(!dst_type
.fixed
&& !dst_type
.sign
&& dst_type
.norm
) {
424 for(i
= 0; i
< num_tmps
; ++i
) {
425 tmp
[i
] = lp_build_clamped_float_to_unsigned_norm(builder
,
430 tmp_type
.floating
= FALSE
;
433 double dst_scale
= lp_const_scale(dst_type
);
434 LLVMTypeRef tmp_vec_type
;
436 if (dst_scale
!= 1.0) {
437 LLVMValueRef scale
= lp_build_const_vec(tmp_type
, dst_scale
);
438 for(i
= 0; i
< num_tmps
; ++i
)
439 tmp
[i
] = LLVMBuildFMul(builder
, tmp
[i
], scale
, "");
442 /* Use an equally sized integer for intermediate computations */
443 tmp_type
.floating
= FALSE
;
444 tmp_vec_type
= lp_build_vec_type(tmp_type
);
445 for(i
= 0; i
< num_tmps
; ++i
) {
448 tmp
[i
] = LLVMBuildFPToSI(builder
, tmp
[i
], tmp_vec_type
, "");
450 tmp
[i
] = LLVMBuildFPToUI(builder
, tmp
[i
], tmp_vec_type
, "");
452 /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
453 tmp
[i
] = LLVMBuildFPToSI(builder
, tmp
[i
], tmp_vec_type
, "");
459 unsigned src_shift
= lp_const_shift(src_type
);
460 unsigned dst_shift
= lp_const_shift(dst_type
);
462 /* FIXME: compensate different offsets too */
463 if(src_shift
> dst_shift
) {
464 LLVMValueRef shift
= lp_build_const_int_vec(tmp_type
, src_shift
- dst_shift
);
465 for(i
= 0; i
< num_tmps
; ++i
)
467 tmp
[i
] = LLVMBuildAShr(builder
, tmp
[i
], shift
, "");
469 tmp
[i
] = LLVMBuildLShr(builder
, tmp
[i
], shift
, "");
474 * Truncate or expand bit width
476 * No data conversion should happen here, although the sign bits are
477 * crucial to avoid bad clamping.
481 struct lp_type new_type
;
484 new_type
.sign
= dst_type
.sign
;
485 new_type
.width
= dst_type
.width
;
486 new_type
.length
= dst_type
.length
;
488 lp_build_resize(builder
, tmp_type
, new_type
, tmp
, num_srcs
, tmp
, num_dsts
);
495 * Scale to the widest range
498 if(src_type
.floating
) {
501 else if(!src_type
.floating
&& dst_type
.floating
) {
502 if(!src_type
.fixed
&& !src_type
.sign
&& src_type
.norm
) {
503 for(i
= 0; i
< num_tmps
; ++i
) {
504 tmp
[i
] = lp_build_unsigned_norm_to_float(builder
,
509 tmp_type
.floating
= TRUE
;
512 double src_scale
= lp_const_scale(src_type
);
513 LLVMTypeRef tmp_vec_type
;
515 /* Use an equally sized integer for intermediate computations */
516 tmp_type
.floating
= TRUE
;
517 tmp_type
.sign
= TRUE
;
518 tmp_vec_type
= lp_build_vec_type(tmp_type
);
519 for(i
= 0; i
< num_tmps
; ++i
) {
522 tmp
[i
] = LLVMBuildSIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
524 tmp
[i
] = LLVMBuildUIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
526 /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
527 tmp
[i
] = LLVMBuildSIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
531 if (src_scale
!= 1.0) {
532 LLVMValueRef scale
= lp_build_const_vec(tmp_type
, 1.0/src_scale
);
533 for(i
= 0; i
< num_tmps
; ++i
)
534 tmp
[i
] = LLVMBuildFMul(builder
, tmp
[i
], scale
, "");
539 unsigned src_shift
= lp_const_shift(src_type
);
540 unsigned dst_shift
= lp_const_shift(dst_type
);
542 /* FIXME: compensate different offsets too */
543 if(src_shift
< dst_shift
) {
544 LLVMValueRef shift
= lp_build_const_int_vec(tmp_type
, dst_shift
- src_shift
);
545 for(i
= 0; i
< num_tmps
; ++i
)
546 tmp
[i
] = LLVMBuildShl(builder
, tmp
[i
], shift
, "");
550 for(i
= 0; i
< num_dsts
; ++i
) {
552 assert(lp_check_value(dst_type
, dst
[i
]));
558 * Bit mask conversion.
560 * This will convert the integer masks that match the given types.
562 * The mask values should 0 or -1, i.e., all bits either set to zero or one.
563 * Any other value will likely cause in unpredictable results.
565 * This is basically a very trimmed down version of lp_build_conv.
568 lp_build_conv_mask(LLVMBuilderRef builder
,
569 struct lp_type src_type
,
570 struct lp_type dst_type
,
571 const LLVMValueRef
*src
, unsigned num_srcs
,
572 LLVMValueRef
*dst
, unsigned num_dsts
)
574 /* Register width must remain constant */
575 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
577 /* We must not loose or gain channels. Only precision */
578 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
583 * We assume all values are 0 or -1
586 src_type
.floating
= FALSE
;
587 src_type
.fixed
= FALSE
;
588 src_type
.sign
= TRUE
;
589 src_type
.norm
= FALSE
;
591 dst_type
.floating
= FALSE
;
592 dst_type
.fixed
= FALSE
;
593 dst_type
.sign
= TRUE
;
594 dst_type
.norm
= FALSE
;
597 * Truncate or expand bit width
600 if(src_type
.width
> dst_type
.width
) {
601 assert(num_dsts
== 1);
602 dst
[0] = lp_build_pack(builder
, src_type
, dst_type
, TRUE
, src
, num_srcs
);
604 else if(src_type
.width
< dst_type
.width
) {
605 assert(num_srcs
== 1);
606 lp_build_unpack(builder
, src_type
, dst_type
, src
[0], dst
, num_dsts
);
609 assert(num_srcs
== num_dsts
);
610 memcpy(dst
, src
, num_dsts
* sizeof *dst
);