Merge branch '7.8'
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_conv.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper functions for type conversions.
32 *
33 * We want to use the fastest type for a given computation whenever feasible.
34 * The other side of this is that we need to be able convert between several
35 * types accurately and efficiently.
36 *
37 * Conversion between types of different bit width is quite complex since a
38 *
39 * To remember there are a few invariants in type conversions:
40 *
41 * - register width must remain constant:
42 *
43 * src_type.width * src_type.length == dst_type.width * dst_type.length
44 *
45 * - total number of elements must remain constant:
46 *
47 * src_type.length * num_srcs == dst_type.length * num_dsts
48 *
49 * It is not always possible to do the conversion both accurately and
50 * efficiently, usually due to lack of adequate machine instructions. In these
51 * cases it is important not to cut shortcuts here and sacrifice accuracy, as
52 * there this functions can be used anywhere. In the future we might have a
53 * precision parameter which can gauge the accuracy vs efficiency compromise,
54 * but for now if the data conversion between two stages happens to be the
55 * bottleneck, then most likely should just avoid converting at all and run
56 * both stages with the same type.
57 *
58 * Make sure to run lp_test_conv unit test after any change to this file.
59 *
60 * @author Jose Fonseca <jfonseca@vmware.com>
61 */
62
63
64 #include "util/u_debug.h"
65 #include "util/u_math.h"
66
67 #include "lp_bld_type.h"
68 #include "lp_bld_const.h"
69 #include "lp_bld_arit.h"
70 #include "lp_bld_pack.h"
71 #include "lp_bld_conv.h"
72
73
74 /**
75 * Special case for converting clamped IEEE-754 floats to unsigned norms.
76 *
77 * The mathematical voodoo below may seem excessive but it is actually
78 * paramount we do it this way for several reasons. First, there is no single
79 * precision FP to unsigned integer conversion Intel SSE instruction. Second,
80 * secondly, even if there was, since the FP's mantissa takes only a fraction
81 * of register bits the typically scale and cast approach would require double
82 * precision for accurate results, and therefore half the throughput
83 *
84 * Although the result values can be scaled to an arbitrary bit width specified
85 * by dst_width, the actual result type will have the same width.
86 */
87 LLVMValueRef
88 lp_build_clamped_float_to_unsigned_norm(LLVMBuilderRef builder,
89 struct lp_type src_type,
90 unsigned dst_width,
91 LLVMValueRef src)
92 {
93 LLVMTypeRef int_vec_type = lp_build_int_vec_type(src_type);
94 LLVMValueRef res;
95 unsigned mantissa;
96 unsigned n;
97 unsigned long long ubound;
98 unsigned long long mask;
99 double scale;
100 double bias;
101
102 assert(src_type.floating);
103
104 mantissa = lp_mantissa(src_type);
105
106 /* We cannot carry more bits than the mantissa */
107 n = MIN2(mantissa, dst_width);
108
109 /* This magic coefficients will make the desired result to appear in the
110 * lowest significant bits of the mantissa.
111 */
112 ubound = ((unsigned long long)1 << n);
113 mask = ubound - 1;
114 scale = (double)mask/ubound;
115 bias = (double)((unsigned long long)1 << (mantissa - n));
116
117 res = LLVMBuildMul(builder, src, lp_build_const_vec(src_type, scale), "");
118 res = LLVMBuildAdd(builder, res, lp_build_const_vec(src_type, bias), "");
119 res = LLVMBuildBitCast(builder, res, int_vec_type, "");
120
121 if(dst_width > n) {
122 int shift = dst_width - n;
123 res = LLVMBuildShl(builder, res, lp_build_const_int_vec(src_type, shift), "");
124
125 /* TODO: Fill in the empty lower bits for additional precision? */
126 /* YES: this fixes progs/trivial/tri-z-eq.c.
127 * Otherwise vertex Z=1.0 values get converted to something like
128 * 0xfffffb00 and the test for equality with 0xffffffff fails.
129 */
130 #if 0
131 {
132 LLVMValueRef msb;
133 msb = LLVMBuildLShr(builder, res, lp_build_const_int_vec(src_type, dst_width - 1), "");
134 msb = LLVMBuildShl(builder, msb, lp_build_const_int_vec(src_type, shift), "");
135 msb = LLVMBuildSub(builder, msb, lp_build_const_int_vec(src_type, 1), "");
136 res = LLVMBuildOr(builder, res, msb, "");
137 }
138 #elif 0
139 while(shift > 0) {
140 res = LLVMBuildOr(builder, res, LLVMBuildLShr(builder, res, lp_build_const_int_vec(src_type, n), ""), "");
141 shift -= n;
142 n *= 2;
143 }
144 #endif
145 }
146 else
147 res = LLVMBuildAnd(builder, res, lp_build_const_int_vec(src_type, mask), "");
148
149 return res;
150 }
151
152
153 /**
154 * Inverse of lp_build_clamped_float_to_unsigned_norm above.
155 */
156 LLVMValueRef
157 lp_build_unsigned_norm_to_float(LLVMBuilderRef builder,
158 unsigned src_width,
159 struct lp_type dst_type,
160 LLVMValueRef src)
161 {
162 LLVMTypeRef vec_type = lp_build_vec_type(dst_type);
163 LLVMTypeRef int_vec_type = lp_build_int_vec_type(dst_type);
164 LLVMValueRef bias_;
165 LLVMValueRef res;
166 unsigned mantissa;
167 unsigned n;
168 unsigned long long ubound;
169 unsigned long long mask;
170 double scale;
171 double bias;
172
173 mantissa = lp_mantissa(dst_type);
174
175 n = MIN2(mantissa, src_width);
176
177 ubound = ((unsigned long long)1 << n);
178 mask = ubound - 1;
179 scale = (double)ubound/mask;
180 bias = (double)((unsigned long long)1 << (mantissa - n));
181
182 res = src;
183
184 if(src_width > mantissa) {
185 int shift = src_width - mantissa;
186 res = LLVMBuildLShr(builder, res, lp_build_const_int_vec(dst_type, shift), "");
187 }
188
189 bias_ = lp_build_const_vec(dst_type, bias);
190
191 res = LLVMBuildOr(builder,
192 res,
193 LLVMBuildBitCast(builder, bias_, int_vec_type, ""), "");
194
195 res = LLVMBuildBitCast(builder, res, vec_type, "");
196
197 res = LLVMBuildSub(builder, res, bias_, "");
198 res = LLVMBuildMul(builder, res, lp_build_const_vec(dst_type, scale), "");
199
200 return res;
201 }
202
203
204 /**
205 * Generic type conversion.
206 *
207 * TODO: Take a precision argument, or even better, add a new precision member
208 * to the lp_type union.
209 */
210 void
211 lp_build_conv(LLVMBuilderRef builder,
212 struct lp_type src_type,
213 struct lp_type dst_type,
214 const LLVMValueRef *src, unsigned num_srcs,
215 LLVMValueRef *dst, unsigned num_dsts)
216 {
217 struct lp_type tmp_type;
218 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
219 unsigned num_tmps;
220 unsigned i;
221
222 /* Register width must remain constant */
223 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
224
225 /* We must not loose or gain channels. Only precision */
226 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
227
228 assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
229 assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
230
231 tmp_type = src_type;
232 for(i = 0; i < num_srcs; ++i)
233 tmp[i] = src[i];
234 num_tmps = num_srcs;
235
236 /*
237 * Clamp if necessary
238 */
239
240 if(memcmp(&src_type, &dst_type, sizeof src_type) != 0) {
241 struct lp_build_context bld;
242 double src_min = lp_const_min(src_type);
243 double dst_min = lp_const_min(dst_type);
244 double src_max = lp_const_max(src_type);
245 double dst_max = lp_const_max(dst_type);
246 LLVMValueRef thres;
247
248 lp_build_context_init(&bld, builder, tmp_type);
249
250 if(src_min < dst_min) {
251 if(dst_min == 0.0)
252 thres = bld.zero;
253 else
254 thres = lp_build_const_vec(src_type, dst_min);
255 for(i = 0; i < num_tmps; ++i)
256 tmp[i] = lp_build_max(&bld, tmp[i], thres);
257 }
258
259 if(src_max > dst_max) {
260 if(dst_max == 1.0)
261 thres = bld.one;
262 else
263 thres = lp_build_const_vec(src_type, dst_max);
264 for(i = 0; i < num_tmps; ++i)
265 tmp[i] = lp_build_min(&bld, tmp[i], thres);
266 }
267 }
268
269 /*
270 * Scale to the narrowest range
271 */
272
273 if(dst_type.floating) {
274 /* Nothing to do */
275 }
276 else if(tmp_type.floating) {
277 if(!dst_type.fixed && !dst_type.sign && dst_type.norm) {
278 for(i = 0; i < num_tmps; ++i) {
279 tmp[i] = lp_build_clamped_float_to_unsigned_norm(builder,
280 tmp_type,
281 dst_type.width,
282 tmp[i]);
283 }
284 tmp_type.floating = FALSE;
285 }
286 else {
287 double dst_scale = lp_const_scale(dst_type);
288 LLVMTypeRef tmp_vec_type;
289
290 if (dst_scale != 1.0) {
291 LLVMValueRef scale = lp_build_const_vec(tmp_type, dst_scale);
292 for(i = 0; i < num_tmps; ++i)
293 tmp[i] = LLVMBuildMul(builder, tmp[i], scale, "");
294 }
295
296 /* Use an equally sized integer for intermediate computations */
297 tmp_type.floating = FALSE;
298 tmp_vec_type = lp_build_vec_type(tmp_type);
299 for(i = 0; i < num_tmps; ++i) {
300 #if 0
301 if(dst_type.sign)
302 tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
303 else
304 tmp[i] = LLVMBuildFPToUI(builder, tmp[i], tmp_vec_type, "");
305 #else
306 /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
307 tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
308 #endif
309 }
310 }
311 }
312 else {
313 unsigned src_shift = lp_const_shift(src_type);
314 unsigned dst_shift = lp_const_shift(dst_type);
315
316 /* FIXME: compensate different offsets too */
317 if(src_shift > dst_shift) {
318 LLVMValueRef shift = lp_build_const_int_vec(tmp_type, src_shift - dst_shift);
319 for(i = 0; i < num_tmps; ++i)
320 if(src_type.sign)
321 tmp[i] = LLVMBuildAShr(builder, tmp[i], shift, "");
322 else
323 tmp[i] = LLVMBuildLShr(builder, tmp[i], shift, "");
324 }
325 }
326
327 /*
328 * Truncate or expand bit width
329 */
330
331 assert(!tmp_type.floating || tmp_type.width == dst_type.width);
332
333 if(tmp_type.width > dst_type.width) {
334 assert(num_dsts == 1);
335 tmp[0] = lp_build_pack(builder, tmp_type, dst_type, TRUE, tmp, num_tmps);
336 tmp_type.width = dst_type.width;
337 tmp_type.length = dst_type.length;
338 num_tmps = 1;
339 }
340
341 if(tmp_type.width < dst_type.width) {
342 assert(num_tmps == 1);
343 lp_build_unpack(builder, tmp_type, dst_type, tmp[0], tmp, num_dsts);
344 tmp_type.width = dst_type.width;
345 tmp_type.length = dst_type.length;
346 num_tmps = num_dsts;
347 }
348
349 assert(tmp_type.width == dst_type.width);
350 assert(tmp_type.length == dst_type.length);
351 assert(num_tmps == num_dsts);
352
353 /*
354 * Scale to the widest range
355 */
356
357 if(src_type.floating) {
358 /* Nothing to do */
359 }
360 else if(!src_type.floating && dst_type.floating) {
361 if(!src_type.fixed && !src_type.sign && src_type.norm) {
362 for(i = 0; i < num_tmps; ++i) {
363 tmp[i] = lp_build_unsigned_norm_to_float(builder,
364 src_type.width,
365 dst_type,
366 tmp[i]);
367 }
368 tmp_type.floating = TRUE;
369 }
370 else {
371 double src_scale = lp_const_scale(src_type);
372 LLVMTypeRef tmp_vec_type;
373
374 /* Use an equally sized integer for intermediate computations */
375 tmp_type.floating = TRUE;
376 tmp_type.sign = TRUE;
377 tmp_vec_type = lp_build_vec_type(tmp_type);
378 for(i = 0; i < num_tmps; ++i) {
379 #if 0
380 if(dst_type.sign)
381 tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
382 else
383 tmp[i] = LLVMBuildUIToFP(builder, tmp[i], tmp_vec_type, "");
384 #else
385 /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
386 tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
387 #endif
388 }
389
390 if (src_scale != 1.0) {
391 LLVMValueRef scale = lp_build_const_vec(tmp_type, 1.0/src_scale);
392 for(i = 0; i < num_tmps; ++i)
393 tmp[i] = LLVMBuildMul(builder, tmp[i], scale, "");
394 }
395 }
396 }
397 else {
398 unsigned src_shift = lp_const_shift(src_type);
399 unsigned dst_shift = lp_const_shift(dst_type);
400
401 /* FIXME: compensate different offsets too */
402 if(src_shift < dst_shift) {
403 LLVMValueRef shift = lp_build_const_int_vec(tmp_type, dst_shift - src_shift);
404 for(i = 0; i < num_tmps; ++i)
405 tmp[i] = LLVMBuildShl(builder, tmp[i], shift, "");
406 }
407 }
408
409 for(i = 0; i < num_dsts; ++i)
410 dst[i] = tmp[i];
411 }
412
413
414 /**
415 * Bit mask conversion.
416 *
417 * This will convert the integer masks that match the given types.
418 *
419 * The mask values should 0 or -1, i.e., all bits either set to zero or one.
420 * Any other value will likely cause in unpredictable results.
421 *
422 * This is basically a very trimmed down version of lp_build_conv.
423 */
424 void
425 lp_build_conv_mask(LLVMBuilderRef builder,
426 struct lp_type src_type,
427 struct lp_type dst_type,
428 const LLVMValueRef *src, unsigned num_srcs,
429 LLVMValueRef *dst, unsigned num_dsts)
430 {
431 /* Register width must remain constant */
432 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
433
434 /* We must not loose or gain channels. Only precision */
435 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
436
437 /*
438 * Drop
439 *
440 * We assume all values are 0 or -1
441 */
442
443 src_type.floating = FALSE;
444 src_type.fixed = FALSE;
445 src_type.sign = TRUE;
446 src_type.norm = FALSE;
447
448 dst_type.floating = FALSE;
449 dst_type.fixed = FALSE;
450 dst_type.sign = TRUE;
451 dst_type.norm = FALSE;
452
453 /*
454 * Truncate or expand bit width
455 */
456
457 if(src_type.width > dst_type.width) {
458 assert(num_dsts == 1);
459 dst[0] = lp_build_pack(builder, src_type, dst_type, TRUE, src, num_srcs);
460 }
461 else if(src_type.width < dst_type.width) {
462 assert(num_srcs == 1);
463 lp_build_unpack(builder, src_type, dst_type, src[0], dst, num_dsts);
464 }
465 else {
466 assert(num_srcs == num_dsts);
467 memcpy(dst, src, num_dsts * sizeof *dst);
468 }
469 }