gallivm: fix indirect addressing of constant buffer
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_conv.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper functions for type conversions.
32 *
33 * We want to use the fastest type for a given computation whenever feasible.
34 * The other side of this is that we need to be able convert between several
35 * types accurately and efficiently.
36 *
37 * Conversion between types of different bit width is quite complex since a
38 *
39 * To remember there are a few invariants in type conversions:
40 *
41 * - register width must remain constant:
42 *
43 * src_type.width * src_type.length == dst_type.width * dst_type.length
44 *
45 * - total number of elements must remain constant:
46 *
47 * src_type.length * num_srcs == dst_type.length * num_dsts
48 *
49 * It is not always possible to do the conversion both accurately and
50 * efficiently, usually due to lack of adequate machine instructions. In these
51 * cases it is important not to cut shortcuts here and sacrifice accuracy, as
52 * there this functions can be used anywhere. In the future we might have a
53 * precision parameter which can gauge the accuracy vs efficiency compromise,
54 * but for now if the data conversion between two stages happens to be the
55 * bottleneck, then most likely should just avoid converting at all and run
56 * both stages with the same type.
57 *
58 * Make sure to run lp_test_conv unit test after any change to this file.
59 *
60 * @author Jose Fonseca <jfonseca@vmware.com>
61 */
62
63
64 #include "util/u_debug.h"
65 #include "util/u_math.h"
66
67 #include "lp_bld_type.h"
68 #include "lp_bld_const.h"
69 #include "lp_bld_arit.h"
70 #include "lp_bld_pack.h"
71 #include "lp_bld_conv.h"
72
73
74 /**
75 * Special case for converting clamped IEEE-754 floats to unsigned norms.
76 *
77 * The mathematical voodoo below may seem excessive but it is actually
78 * paramount we do it this way for several reasons. First, there is no single
79 * precision FP to unsigned integer conversion Intel SSE instruction. Second,
80 * secondly, even if there was, since the FP's mantissa takes only a fraction
81 * of register bits the typically scale and cast approach would require double
82 * precision for accurate results, and therefore half the throughput
83 *
84 * Although the result values can be scaled to an arbitrary bit width specified
85 * by dst_width, the actual result type will have the same width.
86 *
87 * Ex: src = { float, float, float, float }
88 * return { i32, i32, i32, i32 } where each value is in [0, 2^dst_width-1].
89 */
90 LLVMValueRef
91 lp_build_clamped_float_to_unsigned_norm(LLVMBuilderRef builder,
92 struct lp_type src_type,
93 unsigned dst_width,
94 LLVMValueRef src)
95 {
96 LLVMTypeRef int_vec_type = lp_build_int_vec_type(src_type);
97 LLVMValueRef res;
98 unsigned mantissa;
99 unsigned n;
100 unsigned long long ubound;
101 unsigned long long mask;
102 double scale;
103 double bias;
104
105 assert(src_type.floating);
106
107 mantissa = lp_mantissa(src_type);
108
109 /* We cannot carry more bits than the mantissa */
110 n = MIN2(mantissa, dst_width);
111
112 /* This magic coefficients will make the desired result to appear in the
113 * lowest significant bits of the mantissa.
114 */
115 ubound = ((unsigned long long)1 << n);
116 mask = ubound - 1;
117 scale = (double)mask/ubound;
118 bias = (double)((unsigned long long)1 << (mantissa - n));
119
120 res = LLVMBuildMul(builder, src, lp_build_const_vec(src_type, scale), "");
121 res = LLVMBuildAdd(builder, res, lp_build_const_vec(src_type, bias), "");
122 res = LLVMBuildBitCast(builder, res, int_vec_type, "");
123
124 if(dst_width > n) {
125 int shift = dst_width - n;
126 res = LLVMBuildShl(builder, res, lp_build_const_int_vec(src_type, shift), "");
127
128 /* TODO: Fill in the empty lower bits for additional precision? */
129 /* YES: this fixes progs/trivial/tri-z-eq.c.
130 * Otherwise vertex Z=1.0 values get converted to something like
131 * 0xfffffb00 and the test for equality with 0xffffffff fails.
132 */
133 #if 0
134 {
135 LLVMValueRef msb;
136 msb = LLVMBuildLShr(builder, res, lp_build_const_int_vec(src_type, dst_width - 1), "");
137 msb = LLVMBuildShl(builder, msb, lp_build_const_int_vec(src_type, shift), "");
138 msb = LLVMBuildSub(builder, msb, lp_build_const_int_vec(src_type, 1), "");
139 res = LLVMBuildOr(builder, res, msb, "");
140 }
141 #elif 0
142 while(shift > 0) {
143 res = LLVMBuildOr(builder, res, LLVMBuildLShr(builder, res, lp_build_const_int_vec(src_type, n), ""), "");
144 shift -= n;
145 n *= 2;
146 }
147 #endif
148 }
149 else
150 res = LLVMBuildAnd(builder, res, lp_build_const_int_vec(src_type, mask), "");
151
152 return res;
153 }
154
155
156 /**
157 * Inverse of lp_build_clamped_float_to_unsigned_norm above.
158 * Ex: src = { i32, i32, i32, i32 } with values in range [0, 2^src_width-1]
159 * return {float, float, float, float} with values in range [0, 1].
160 */
161 LLVMValueRef
162 lp_build_unsigned_norm_to_float(LLVMBuilderRef builder,
163 unsigned src_width,
164 struct lp_type dst_type,
165 LLVMValueRef src)
166 {
167 LLVMTypeRef vec_type = lp_build_vec_type(dst_type);
168 LLVMTypeRef int_vec_type = lp_build_int_vec_type(dst_type);
169 LLVMValueRef bias_;
170 LLVMValueRef res;
171 unsigned mantissa;
172 unsigned n;
173 unsigned long long ubound;
174 unsigned long long mask;
175 double scale;
176 double bias;
177
178 mantissa = lp_mantissa(dst_type);
179
180 n = MIN2(mantissa, src_width);
181
182 ubound = ((unsigned long long)1 << n);
183 mask = ubound - 1;
184 scale = (double)ubound/mask;
185 bias = (double)((unsigned long long)1 << (mantissa - n));
186
187 res = src;
188
189 if(src_width > mantissa) {
190 int shift = src_width - mantissa;
191 res = LLVMBuildLShr(builder, res, lp_build_const_int_vec(dst_type, shift), "");
192 }
193
194 bias_ = lp_build_const_vec(dst_type, bias);
195
196 res = LLVMBuildOr(builder,
197 res,
198 LLVMBuildBitCast(builder, bias_, int_vec_type, ""), "");
199
200 res = LLVMBuildBitCast(builder, res, vec_type, "");
201
202 res = LLVMBuildSub(builder, res, bias_, "");
203 res = LLVMBuildMul(builder, res, lp_build_const_vec(dst_type, scale), "");
204
205 return res;
206 }
207
208
209 /**
210 * Generic type conversion.
211 *
212 * TODO: Take a precision argument, or even better, add a new precision member
213 * to the lp_type union.
214 */
215 void
216 lp_build_conv(LLVMBuilderRef builder,
217 struct lp_type src_type,
218 struct lp_type dst_type,
219 const LLVMValueRef *src, unsigned num_srcs,
220 LLVMValueRef *dst, unsigned num_dsts)
221 {
222 struct lp_type tmp_type;
223 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
224 unsigned num_tmps;
225 unsigned i;
226
227 /* We must not loose or gain channels. Only precision */
228 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
229
230 assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
231 assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
232 assert(num_srcs <= LP_MAX_VECTOR_LENGTH);
233 assert(num_dsts <= LP_MAX_VECTOR_LENGTH);
234
235 tmp_type = src_type;
236 for(i = 0; i < num_srcs; ++i) {
237 assert(lp_check_value(src_type, src[i]));
238 tmp[i] = src[i];
239 }
240 num_tmps = num_srcs;
241
242 /*
243 * Clamp if necessary
244 */
245
246 if(memcmp(&src_type, &dst_type, sizeof src_type) != 0) {
247 struct lp_build_context bld;
248 double src_min = lp_const_min(src_type);
249 double dst_min = lp_const_min(dst_type);
250 double src_max = lp_const_max(src_type);
251 double dst_max = lp_const_max(dst_type);
252 LLVMValueRef thres;
253
254 lp_build_context_init(&bld, builder, tmp_type);
255
256 if(src_min < dst_min) {
257 if(dst_min == 0.0)
258 thres = bld.zero;
259 else
260 thres = lp_build_const_vec(src_type, dst_min);
261 for(i = 0; i < num_tmps; ++i)
262 tmp[i] = lp_build_max(&bld, tmp[i], thres);
263 }
264
265 if(src_max > dst_max) {
266 if(dst_max == 1.0)
267 thres = bld.one;
268 else
269 thres = lp_build_const_vec(src_type, dst_max);
270 for(i = 0; i < num_tmps; ++i)
271 tmp[i] = lp_build_min(&bld, tmp[i], thres);
272 }
273 }
274
275 /*
276 * Scale to the narrowest range
277 */
278
279 if(dst_type.floating) {
280 /* Nothing to do */
281 }
282 else if(tmp_type.floating) {
283 if(!dst_type.fixed && !dst_type.sign && dst_type.norm) {
284 for(i = 0; i < num_tmps; ++i) {
285 tmp[i] = lp_build_clamped_float_to_unsigned_norm(builder,
286 tmp_type,
287 dst_type.width,
288 tmp[i]);
289 }
290 tmp_type.floating = FALSE;
291 }
292 else {
293 double dst_scale = lp_const_scale(dst_type);
294 LLVMTypeRef tmp_vec_type;
295
296 if (dst_scale != 1.0) {
297 LLVMValueRef scale = lp_build_const_vec(tmp_type, dst_scale);
298 for(i = 0; i < num_tmps; ++i)
299 tmp[i] = LLVMBuildMul(builder, tmp[i], scale, "");
300 }
301
302 /* Use an equally sized integer for intermediate computations */
303 tmp_type.floating = FALSE;
304 tmp_vec_type = lp_build_vec_type(tmp_type);
305 for(i = 0; i < num_tmps; ++i) {
306 #if 0
307 if(dst_type.sign)
308 tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
309 else
310 tmp[i] = LLVMBuildFPToUI(builder, tmp[i], tmp_vec_type, "");
311 #else
312 /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
313 tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
314 #endif
315 }
316 }
317 }
318 else {
319 unsigned src_shift = lp_const_shift(src_type);
320 unsigned dst_shift = lp_const_shift(dst_type);
321
322 /* FIXME: compensate different offsets too */
323 if(src_shift > dst_shift) {
324 LLVMValueRef shift = lp_build_const_int_vec(tmp_type, src_shift - dst_shift);
325 for(i = 0; i < num_tmps; ++i)
326 if(src_type.sign)
327 tmp[i] = LLVMBuildAShr(builder, tmp[i], shift, "");
328 else
329 tmp[i] = LLVMBuildLShr(builder, tmp[i], shift, "");
330 }
331 }
332
333 /*
334 * Truncate or expand bit width
335 *
336 * No data conversion should happen here, although the sign bits are
337 * crucial to avoid bad clamping.
338 */
339
340 {
341 struct lp_type new_type;
342
343 new_type = tmp_type;
344 new_type.sign = dst_type.sign;
345 new_type.width = dst_type.width;
346 new_type.length = dst_type.length;
347
348 lp_build_resize(builder, tmp_type, new_type, tmp, num_srcs, tmp, num_dsts);
349
350 tmp_type = new_type;
351 num_tmps = num_dsts;
352 }
353
354 /*
355 * Scale to the widest range
356 */
357
358 if(src_type.floating) {
359 /* Nothing to do */
360 }
361 else if(!src_type.floating && dst_type.floating) {
362 if(!src_type.fixed && !src_type.sign && src_type.norm) {
363 for(i = 0; i < num_tmps; ++i) {
364 tmp[i] = lp_build_unsigned_norm_to_float(builder,
365 src_type.width,
366 dst_type,
367 tmp[i]);
368 }
369 tmp_type.floating = TRUE;
370 }
371 else {
372 double src_scale = lp_const_scale(src_type);
373 LLVMTypeRef tmp_vec_type;
374
375 /* Use an equally sized integer for intermediate computations */
376 tmp_type.floating = TRUE;
377 tmp_type.sign = TRUE;
378 tmp_vec_type = lp_build_vec_type(tmp_type);
379 for(i = 0; i < num_tmps; ++i) {
380 #if 0
381 if(dst_type.sign)
382 tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
383 else
384 tmp[i] = LLVMBuildUIToFP(builder, tmp[i], tmp_vec_type, "");
385 #else
386 /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
387 tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
388 #endif
389 }
390
391 if (src_scale != 1.0) {
392 LLVMValueRef scale = lp_build_const_vec(tmp_type, 1.0/src_scale);
393 for(i = 0; i < num_tmps; ++i)
394 tmp[i] = LLVMBuildMul(builder, tmp[i], scale, "");
395 }
396 }
397 }
398 else {
399 unsigned src_shift = lp_const_shift(src_type);
400 unsigned dst_shift = lp_const_shift(dst_type);
401
402 /* FIXME: compensate different offsets too */
403 if(src_shift < dst_shift) {
404 LLVMValueRef shift = lp_build_const_int_vec(tmp_type, dst_shift - src_shift);
405 for(i = 0; i < num_tmps; ++i)
406 tmp[i] = LLVMBuildShl(builder, tmp[i], shift, "");
407 }
408 }
409
410 for(i = 0; i < num_dsts; ++i) {
411 dst[i] = tmp[i];
412 assert(lp_check_value(dst_type, dst[i]));
413 }
414 }
415
416
417 /**
418 * Bit mask conversion.
419 *
420 * This will convert the integer masks that match the given types.
421 *
422 * The mask values should 0 or -1, i.e., all bits either set to zero or one.
423 * Any other value will likely cause in unpredictable results.
424 *
425 * This is basically a very trimmed down version of lp_build_conv.
426 */
427 void
428 lp_build_conv_mask(LLVMBuilderRef builder,
429 struct lp_type src_type,
430 struct lp_type dst_type,
431 const LLVMValueRef *src, unsigned num_srcs,
432 LLVMValueRef *dst, unsigned num_dsts)
433 {
434 /* Register width must remain constant */
435 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
436
437 /* We must not loose or gain channels. Only precision */
438 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
439
440 /*
441 * Drop
442 *
443 * We assume all values are 0 or -1
444 */
445
446 src_type.floating = FALSE;
447 src_type.fixed = FALSE;
448 src_type.sign = TRUE;
449 src_type.norm = FALSE;
450
451 dst_type.floating = FALSE;
452 dst_type.fixed = FALSE;
453 dst_type.sign = TRUE;
454 dst_type.norm = FALSE;
455
456 /*
457 * Truncate or expand bit width
458 */
459
460 if(src_type.width > dst_type.width) {
461 assert(num_dsts == 1);
462 dst[0] = lp_build_pack(builder, src_type, dst_type, TRUE, src, num_srcs);
463 }
464 else if(src_type.width < dst_type.width) {
465 assert(num_srcs == 1);
466 lp_build_unpack(builder, src_type, dst_type, src[0], dst, num_dsts);
467 }
468 else {
469 assert(num_srcs == num_dsts);
470 memcpy(dst, src, num_dsts * sizeof *dst);
471 }
472 }