8b477313d48a07c6e15bf9af9a670a3838243eac
1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 * Helper functions for type conversions.
33 * We want to use the fastest type for a given computation whenever feasible.
34 * The other side of this is that we need to be able convert between several
35 * types accurately and efficiently.
37 * Conversion between types of different bit width is quite complex since a
39 * To remember there are a few invariants in type conversions:
41 * - register width must remain constant:
43 * src_type.width * src_type.length == dst_type.width * dst_type.length
45 * - total number of elements must remain constant:
47 * src_type.length * num_srcs == dst_type.length * num_dsts
49 * It is not always possible to do the conversion both accurately and
50 * efficiently, usually due to lack of adequate machine instructions. In these
51 * cases it is important not to cut shortcuts here and sacrifice accuracy, as
52 * there this functions can be used anywhere. In the future we might have a
53 * precision parameter which can gauge the accuracy vs efficiency compromise,
54 * but for now if the data conversion between two stages happens to be the
55 * bottleneck, then most likely should just avoid converting at all and run
56 * both stages with the same type.
58 * Make sure to run lp_test_conv unit test after any change to this file.
60 * @author Jose Fonseca <jfonseca@vmware.com>
64 #include "util/u_debug.h"
65 #include "util/u_math.h"
67 #include "lp_bld_type.h"
68 #include "lp_bld_const.h"
69 #include "lp_bld_arit.h"
70 #include "lp_bld_pack.h"
71 #include "lp_bld_conv.h"
75 * Special case for converting clamped IEEE-754 floats to unsigned norms.
77 * The mathematical voodoo below may seem excessive but it is actually
78 * paramount we do it this way for several reasons. First, there is no single
79 * precision FP to unsigned integer conversion Intel SSE instruction. Second,
80 * secondly, even if there was, since the FP's mantissa takes only a fraction
81 * of register bits the typically scale and cast approach would require double
82 * precision for accurate results, and therefore half the throughput
84 * Although the result values can be scaled to an arbitrary bit width specified
85 * by dst_width, the actual result type will have the same width.
87 * Ex: src = { float, float, float, float }
88 * return { i32, i32, i32, i32 } where each value is in [0, 2^dst_width-1].
91 lp_build_clamped_float_to_unsigned_norm(LLVMBuilderRef builder
,
92 struct lp_type src_type
,
96 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(src_type
);
100 unsigned long long ubound
;
101 unsigned long long mask
;
105 assert(src_type
.floating
);
107 mantissa
= lp_mantissa(src_type
);
109 /* We cannot carry more bits than the mantissa */
110 n
= MIN2(mantissa
, dst_width
);
112 /* This magic coefficients will make the desired result to appear in the
113 * lowest significant bits of the mantissa.
115 ubound
= ((unsigned long long)1 << n
);
117 scale
= (double)mask
/ubound
;
118 bias
= (double)((unsigned long long)1 << (mantissa
- n
));
120 res
= LLVMBuildFMul(builder
, src
, lp_build_const_vec(src_type
, scale
), "");
121 res
= LLVMBuildFAdd(builder
, res
, lp_build_const_vec(src_type
, bias
), "");
122 res
= LLVMBuildBitCast(builder
, res
, int_vec_type
, "");
125 int shift
= dst_width
- n
;
126 res
= LLVMBuildShl(builder
, res
, lp_build_const_int_vec(src_type
, shift
), "");
128 /* TODO: Fill in the empty lower bits for additional precision? */
129 /* YES: this fixes progs/trivial/tri-z-eq.c.
130 * Otherwise vertex Z=1.0 values get converted to something like
131 * 0xfffffb00 and the test for equality with 0xffffffff fails.
136 msb
= LLVMBuildLShr(builder
, res
, lp_build_const_int_vec(src_type
, dst_width
- 1), "");
137 msb
= LLVMBuildShl(builder
, msb
, lp_build_const_int_vec(src_type
, shift
), "");
138 msb
= LLVMBuildSub(builder
, msb
, lp_build_const_int_vec(src_type
, 1), "");
139 res
= LLVMBuildOr(builder
, res
, msb
, "");
143 res
= LLVMBuildOr(builder
, res
, LLVMBuildLShr(builder
, res
, lp_build_const_int_vec(src_type
, n
), ""), "");
150 res
= LLVMBuildAnd(builder
, res
, lp_build_const_int_vec(src_type
, mask
), "");
157 * Inverse of lp_build_clamped_float_to_unsigned_norm above.
158 * Ex: src = { i32, i32, i32, i32 } with values in range [0, 2^src_width-1]
159 * return {float, float, float, float} with values in range [0, 1].
162 lp_build_unsigned_norm_to_float(LLVMBuilderRef builder
,
164 struct lp_type dst_type
,
167 LLVMTypeRef vec_type
= lp_build_vec_type(dst_type
);
168 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(dst_type
);
173 unsigned long long ubound
;
174 unsigned long long mask
;
178 assert(dst_type
.floating
);
180 mantissa
= lp_mantissa(dst_type
);
182 n
= MIN2(mantissa
, src_width
);
184 ubound
= ((unsigned long long)1 << n
);
186 scale
= (double)ubound
/mask
;
187 bias
= (double)((unsigned long long)1 << (mantissa
- n
));
191 if(src_width
> mantissa
) {
192 int shift
= src_width
- mantissa
;
193 res
= LLVMBuildLShr(builder
, res
, lp_build_const_int_vec(dst_type
, shift
), "");
196 bias_
= lp_build_const_vec(dst_type
, bias
);
198 res
= LLVMBuildOr(builder
,
200 LLVMBuildBitCast(builder
, bias_
, int_vec_type
, ""), "");
202 res
= LLVMBuildBitCast(builder
, res
, vec_type
, "");
204 res
= LLVMBuildFSub(builder
, res
, bias_
, "");
205 res
= LLVMBuildFMul(builder
, res
, lp_build_const_vec(dst_type
, scale
), "");
212 * Generic type conversion.
214 * TODO: Take a precision argument, or even better, add a new precision member
215 * to the lp_type union.
218 lp_build_conv(LLVMBuilderRef builder
,
219 struct lp_type src_type
,
220 struct lp_type dst_type
,
221 const LLVMValueRef
*src
, unsigned num_srcs
,
222 LLVMValueRef
*dst
, unsigned num_dsts
)
224 struct lp_type tmp_type
;
225 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
229 /* We must not loose or gain channels. Only precision */
230 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
232 assert(src_type
.length
<= LP_MAX_VECTOR_LENGTH
);
233 assert(dst_type
.length
<= LP_MAX_VECTOR_LENGTH
);
234 assert(num_srcs
<= LP_MAX_VECTOR_LENGTH
);
235 assert(num_dsts
<= LP_MAX_VECTOR_LENGTH
);
238 for(i
= 0; i
< num_srcs
; ++i
) {
239 assert(lp_check_value(src_type
, src
[i
]));
248 if(memcmp(&src_type
, &dst_type
, sizeof src_type
) != 0) {
249 struct lp_build_context bld
;
250 double src_min
= lp_const_min(src_type
);
251 double dst_min
= lp_const_min(dst_type
);
252 double src_max
= lp_const_max(src_type
);
253 double dst_max
= lp_const_max(dst_type
);
256 lp_build_context_init(&bld
, builder
, tmp_type
);
258 if(src_min
< dst_min
) {
262 thres
= lp_build_const_vec(src_type
, dst_min
);
263 for(i
= 0; i
< num_tmps
; ++i
)
264 tmp
[i
] = lp_build_max(&bld
, tmp
[i
], thres
);
267 if(src_max
> dst_max
) {
271 thres
= lp_build_const_vec(src_type
, dst_max
);
272 for(i
= 0; i
< num_tmps
; ++i
)
273 tmp
[i
] = lp_build_min(&bld
, tmp
[i
], thres
);
278 * Scale to the narrowest range
281 if(dst_type
.floating
) {
284 else if(tmp_type
.floating
) {
285 if(!dst_type
.fixed
&& !dst_type
.sign
&& dst_type
.norm
) {
286 for(i
= 0; i
< num_tmps
; ++i
) {
287 tmp
[i
] = lp_build_clamped_float_to_unsigned_norm(builder
,
292 tmp_type
.floating
= FALSE
;
295 double dst_scale
= lp_const_scale(dst_type
);
296 LLVMTypeRef tmp_vec_type
;
298 if (dst_scale
!= 1.0) {
299 LLVMValueRef scale
= lp_build_const_vec(tmp_type
, dst_scale
);
300 for(i
= 0; i
< num_tmps
; ++i
)
301 tmp
[i
] = LLVMBuildFMul(builder
, tmp
[i
], scale
, "");
304 /* Use an equally sized integer for intermediate computations */
305 tmp_type
.floating
= FALSE
;
306 tmp_vec_type
= lp_build_vec_type(tmp_type
);
307 for(i
= 0; i
< num_tmps
; ++i
) {
310 tmp
[i
] = LLVMBuildFPToSI(builder
, tmp
[i
], tmp_vec_type
, "");
312 tmp
[i
] = LLVMBuildFPToUI(builder
, tmp
[i
], tmp_vec_type
, "");
314 /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
315 tmp
[i
] = LLVMBuildFPToSI(builder
, tmp
[i
], tmp_vec_type
, "");
321 unsigned src_shift
= lp_const_shift(src_type
);
322 unsigned dst_shift
= lp_const_shift(dst_type
);
324 /* FIXME: compensate different offsets too */
325 if(src_shift
> dst_shift
) {
326 LLVMValueRef shift
= lp_build_const_int_vec(tmp_type
, src_shift
- dst_shift
);
327 for(i
= 0; i
< num_tmps
; ++i
)
329 tmp
[i
] = LLVMBuildAShr(builder
, tmp
[i
], shift
, "");
331 tmp
[i
] = LLVMBuildLShr(builder
, tmp
[i
], shift
, "");
336 * Truncate or expand bit width
338 * No data conversion should happen here, although the sign bits are
339 * crucial to avoid bad clamping.
343 struct lp_type new_type
;
346 new_type
.sign
= dst_type
.sign
;
347 new_type
.width
= dst_type
.width
;
348 new_type
.length
= dst_type
.length
;
350 lp_build_resize(builder
, tmp_type
, new_type
, tmp
, num_srcs
, tmp
, num_dsts
);
357 * Scale to the widest range
360 if(src_type
.floating
) {
363 else if(!src_type
.floating
&& dst_type
.floating
) {
364 if(!src_type
.fixed
&& !src_type
.sign
&& src_type
.norm
) {
365 for(i
= 0; i
< num_tmps
; ++i
) {
366 tmp
[i
] = lp_build_unsigned_norm_to_float(builder
,
371 tmp_type
.floating
= TRUE
;
374 double src_scale
= lp_const_scale(src_type
);
375 LLVMTypeRef tmp_vec_type
;
377 /* Use an equally sized integer for intermediate computations */
378 tmp_type
.floating
= TRUE
;
379 tmp_type
.sign
= TRUE
;
380 tmp_vec_type
= lp_build_vec_type(tmp_type
);
381 for(i
= 0; i
< num_tmps
; ++i
) {
384 tmp
[i
] = LLVMBuildSIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
386 tmp
[i
] = LLVMBuildUIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
388 /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
389 tmp
[i
] = LLVMBuildSIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
393 if (src_scale
!= 1.0) {
394 LLVMValueRef scale
= lp_build_const_vec(tmp_type
, 1.0/src_scale
);
395 for(i
= 0; i
< num_tmps
; ++i
)
396 tmp
[i
] = LLVMBuildFMul(builder
, tmp
[i
], scale
, "");
401 unsigned src_shift
= lp_const_shift(src_type
);
402 unsigned dst_shift
= lp_const_shift(dst_type
);
404 /* FIXME: compensate different offsets too */
405 if(src_shift
< dst_shift
) {
406 LLVMValueRef shift
= lp_build_const_int_vec(tmp_type
, dst_shift
- src_shift
);
407 for(i
= 0; i
< num_tmps
; ++i
)
408 tmp
[i
] = LLVMBuildShl(builder
, tmp
[i
], shift
, "");
412 for(i
= 0; i
< num_dsts
; ++i
) {
414 assert(lp_check_value(dst_type
, dst
[i
]));
420 * Bit mask conversion.
422 * This will convert the integer masks that match the given types.
424 * The mask values should 0 or -1, i.e., all bits either set to zero or one.
425 * Any other value will likely cause in unpredictable results.
427 * This is basically a very trimmed down version of lp_build_conv.
430 lp_build_conv_mask(LLVMBuilderRef builder
,
431 struct lp_type src_type
,
432 struct lp_type dst_type
,
433 const LLVMValueRef
*src
, unsigned num_srcs
,
434 LLVMValueRef
*dst
, unsigned num_dsts
)
436 /* Register width must remain constant */
437 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
439 /* We must not loose or gain channels. Only precision */
440 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
445 * We assume all values are 0 or -1
448 src_type
.floating
= FALSE
;
449 src_type
.fixed
= FALSE
;
450 src_type
.sign
= TRUE
;
451 src_type
.norm
= FALSE
;
453 dst_type
.floating
= FALSE
;
454 dst_type
.fixed
= FALSE
;
455 dst_type
.sign
= TRUE
;
456 dst_type
.norm
= FALSE
;
459 * Truncate or expand bit width
462 if(src_type
.width
> dst_type
.width
) {
463 assert(num_dsts
== 1);
464 dst
[0] = lp_build_pack(builder
, src_type
, dst_type
, TRUE
, src
, num_srcs
);
466 else if(src_type
.width
< dst_type
.width
) {
467 assert(num_srcs
== 1);
468 lp_build_unpack(builder
, src_type
, dst_type
, src
[0], dst
, num_dsts
);
471 assert(num_srcs
== num_dsts
);
472 memcpy(dst
, src
, num_dsts
* sizeof *dst
);