gallivm: Fix for dynamically linked LLVM 2.8 library.
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_pack.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper functions for packing/unpacking.
32 *
33 * Pack/unpacking is necessary for conversion between types of different
34 * bit width.
35 *
36 * They are also commonly used when an computation needs higher
37 * precision for the intermediate values. For example, if one needs the
38 * function:
39 *
40 * c = compute(a, b);
41 *
42 * to use more precision for intermediate results then one should implement it
43 * as:
44 *
45 * LLVMValueRef
46 * compute(LLVMBuilderRef builder struct lp_type type, LLVMValueRef a, LLVMValueRef b)
47 * {
48 * struct lp_type wide_type = lp_wider_type(type);
49 * LLVMValueRef al, ah, bl, bh, cl, ch, c;
50 *
51 * lp_build_unpack2(builder, type, wide_type, a, &al, &ah);
52 * lp_build_unpack2(builder, type, wide_type, b, &bl, &bh);
53 *
54 * cl = compute_half(al, bl);
55 * ch = compute_half(ah, bh);
56 *
57 * c = lp_build_pack2(bld->builder, wide_type, type, cl, ch);
58 *
59 * return c;
60 * }
61 *
62 * where compute_half() would do the computation for half the elements with
63 * twice the precision.
64 *
65 * @author Jose Fonseca <jfonseca@vmware.com>
66 */
67
68
69 #include "util/u_debug.h"
70 #include "util/u_math.h"
71 #include "util/u_cpu_detect.h"
72
73 #include "lp_bld_type.h"
74 #include "lp_bld_const.h"
75 #include "lp_bld_init.h"
76 #include "lp_bld_intr.h"
77 #include "lp_bld_arit.h"
78 #include "lp_bld_pack.h"
79
80
81 /**
82 * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions.
83 */
84 static LLVMValueRef
85 lp_build_const_unpack_shuffle(struct gallivm_state *gallivm,
86 unsigned n, unsigned lo_hi)
87 {
88 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
89 unsigned i, j;
90
91 assert(n <= LP_MAX_VECTOR_LENGTH);
92 assert(lo_hi < 2);
93
94 /* TODO: cache results in a static table */
95
96 for(i = 0, j = lo_hi*n/2; i < n; i += 2, ++j) {
97 elems[i + 0] = lp_build_const_int32(gallivm, 0 + j);
98 elems[i + 1] = lp_build_const_int32(gallivm, n + j);
99 }
100
101 return LLVMConstVector(elems, n);
102 }
103
104
105 /**
106 * Build shuffle vectors that match PACKxx instructions.
107 */
108 static LLVMValueRef
109 lp_build_const_pack_shuffle(struct gallivm_state *gallivm, unsigned n)
110 {
111 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
112 unsigned i;
113
114 assert(n <= LP_MAX_VECTOR_LENGTH);
115
116 for(i = 0; i < n; ++i)
117 elems[i] = lp_build_const_int32(gallivm, 2*i);
118
119 return LLVMConstVector(elems, n);
120 }
121
122
123 /**
124 * Interleave vector elements.
125 *
126 * Matches the PUNPCKLxx and PUNPCKHxx SSE instructions.
127 */
128 LLVMValueRef
129 lp_build_interleave2(struct gallivm_state *gallivm,
130 struct lp_type type,
131 LLVMValueRef a,
132 LLVMValueRef b,
133 unsigned lo_hi)
134 {
135 LLVMValueRef shuffle;
136
137 shuffle = lp_build_const_unpack_shuffle(gallivm, type.length, lo_hi);
138
139 return LLVMBuildShuffleVector(gallivm->builder, a, b, shuffle, "");
140 }
141
142
143 /**
144 * Double the bit width.
145 *
146 * This will only change the number of bits the values are represented, not the
147 * values themselves.
148 */
149 void
150 lp_build_unpack2(struct gallivm_state *gallivm,
151 struct lp_type src_type,
152 struct lp_type dst_type,
153 LLVMValueRef src,
154 LLVMValueRef *dst_lo,
155 LLVMValueRef *dst_hi)
156 {
157 LLVMBuilderRef builder = gallivm->builder;
158 LLVMValueRef msb;
159 LLVMTypeRef dst_vec_type;
160
161 assert(!src_type.floating);
162 assert(!dst_type.floating);
163 assert(dst_type.width == src_type.width * 2);
164 assert(dst_type.length * 2 == src_type.length);
165
166 if(dst_type.sign && src_type.sign) {
167 /* Replicate the sign bit in the most significant bits */
168 msb = LLVMBuildAShr(builder, src, lp_build_const_int_vec(gallivm, src_type, src_type.width - 1), "");
169 }
170 else
171 /* Most significant bits always zero */
172 msb = lp_build_zero(gallivm, src_type);
173
174 /* Interleave bits */
175 #ifdef PIPE_ARCH_LITTLE_ENDIAN
176 *dst_lo = lp_build_interleave2(gallivm, src_type, src, msb, 0);
177 *dst_hi = lp_build_interleave2(gallivm, src_type, src, msb, 1);
178 #else
179 *dst_lo = lp_build_interleave2(gallivm, src_type, msb, src, 0);
180 *dst_hi = lp_build_interleave2(gallivm, src_type, msb, src, 1);
181 #endif
182
183 /* Cast the result into the new type (twice as wide) */
184
185 dst_vec_type = lp_build_vec_type(gallivm, dst_type);
186
187 *dst_lo = LLVMBuildBitCast(builder, *dst_lo, dst_vec_type, "");
188 *dst_hi = LLVMBuildBitCast(builder, *dst_hi, dst_vec_type, "");
189 }
190
191
192 /**
193 * Expand the bit width.
194 *
195 * This will only change the number of bits the values are represented, not the
196 * values themselves.
197 */
198 void
199 lp_build_unpack(struct gallivm_state *gallivm,
200 struct lp_type src_type,
201 struct lp_type dst_type,
202 LLVMValueRef src,
203 LLVMValueRef *dst, unsigned num_dsts)
204 {
205 unsigned num_tmps;
206 unsigned i;
207
208 /* Register width must remain constant */
209 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
210
211 /* We must not loose or gain channels. Only precision */
212 assert(src_type.length == dst_type.length * num_dsts);
213
214 num_tmps = 1;
215 dst[0] = src;
216
217 while(src_type.width < dst_type.width) {
218 struct lp_type tmp_type = src_type;
219
220 tmp_type.width *= 2;
221 tmp_type.length /= 2;
222
223 for(i = num_tmps; i--; ) {
224 lp_build_unpack2(gallivm, src_type, tmp_type, dst[i], &dst[2*i + 0], &dst[2*i + 1]);
225 }
226
227 src_type = tmp_type;
228
229 num_tmps *= 2;
230 }
231
232 assert(num_tmps == num_dsts);
233 }
234
235
236 /**
237 * Non-interleaved pack.
238 *
239 * This will move values as
240 *
241 * lo = __ l0 __ l1 __ l2 __.. __ ln
242 * hi = __ h0 __ h1 __ h2 __.. __ hn
243 * res = l0 l1 l2 .. ln h0 h1 h2 .. hn
244 *
245 * This will only change the number of bits the values are represented, not the
246 * values themselves.
247 *
248 * It is assumed the values are already clamped into the destination type range.
249 * Values outside that range will produce undefined results. Use
250 * lp_build_packs2 instead.
251 */
252 LLVMValueRef
253 lp_build_pack2(struct gallivm_state *gallivm,
254 struct lp_type src_type,
255 struct lp_type dst_type,
256 LLVMValueRef lo,
257 LLVMValueRef hi)
258 {
259 LLVMBuilderRef builder = gallivm->builder;
260 #if HAVE_LLVM < 0x0207
261 LLVMTypeRef src_vec_type = lp_build_vec_type(gallivm, src_type);
262 #endif
263 LLVMTypeRef dst_vec_type = lp_build_vec_type(gallivm, dst_type);
264 LLVMValueRef shuffle;
265 LLVMValueRef res = NULL;
266
267 assert(!src_type.floating);
268 assert(!dst_type.floating);
269 assert(src_type.width == dst_type.width * 2);
270 assert(src_type.length * 2 == dst_type.length);
271
272 /* Check for special cases first */
273 if(util_cpu_caps.has_sse2 && src_type.width * src_type.length == 128) {
274 switch(src_type.width) {
275 case 32:
276 if(dst_type.sign) {
277 #if HAVE_LLVM >= 0x0207
278 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packssdw.128", dst_vec_type, lo, hi);
279 #else
280 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packssdw.128", src_vec_type, lo, hi);
281 #endif
282 }
283 else {
284 if (util_cpu_caps.has_sse4_1) {
285 return lp_build_intrinsic_binary(builder, "llvm.x86.sse41.packusdw", dst_vec_type, lo, hi);
286 }
287 else {
288 /* use generic shuffle below */
289 res = NULL;
290 }
291 }
292 break;
293
294 case 16:
295 if(dst_type.sign)
296 #if HAVE_LLVM >= 0x0207
297 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packsswb.128", dst_vec_type, lo, hi);
298 #else
299 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packsswb.128", src_vec_type, lo, hi);
300 #endif
301 else
302 #if HAVE_LLVM >= 0x0207
303 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packuswb.128", dst_vec_type, lo, hi);
304 #else
305 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packuswb.128", src_vec_type, lo, hi);
306 #endif
307 break;
308
309 default:
310 assert(0);
311 return LLVMGetUndef(dst_vec_type);
312 break;
313 }
314
315 if (res) {
316 res = LLVMBuildBitCast(builder, res, dst_vec_type, "");
317 return res;
318 }
319 }
320
321 /* generic shuffle */
322 lo = LLVMBuildBitCast(builder, lo, dst_vec_type, "");
323 hi = LLVMBuildBitCast(builder, hi, dst_vec_type, "");
324
325 shuffle = lp_build_const_pack_shuffle(gallivm, dst_type.length);
326
327 res = LLVMBuildShuffleVector(builder, lo, hi, shuffle, "");
328
329 return res;
330 }
331
332
333
334 /**
335 * Non-interleaved pack and saturate.
336 *
337 * Same as lp_build_pack2 but will saturate values so that they fit into the
338 * destination type.
339 */
340 LLVMValueRef
341 lp_build_packs2(struct gallivm_state *gallivm,
342 struct lp_type src_type,
343 struct lp_type dst_type,
344 LLVMValueRef lo,
345 LLVMValueRef hi)
346 {
347 boolean clamp;
348
349 assert(!src_type.floating);
350 assert(!dst_type.floating);
351 assert(src_type.sign == dst_type.sign);
352 assert(src_type.width == dst_type.width * 2);
353 assert(src_type.length * 2 == dst_type.length);
354
355 clamp = TRUE;
356
357 /* All X86 SSE non-interleaved pack instructions take signed inputs and
358 * saturate them, so no need to clamp for those cases. */
359 if(util_cpu_caps.has_sse2 &&
360 src_type.width * src_type.length == 128 &&
361 src_type.sign)
362 clamp = FALSE;
363
364 if(clamp) {
365 struct lp_build_context bld;
366 unsigned dst_bits = dst_type.sign ? dst_type.width - 1 : dst_type.width;
367 LLVMValueRef dst_max = lp_build_const_int_vec(gallivm, src_type, ((unsigned long long)1 << dst_bits) - 1);
368 lp_build_context_init(&bld, gallivm, src_type);
369 lo = lp_build_min(&bld, lo, dst_max);
370 hi = lp_build_min(&bld, hi, dst_max);
371 /* FIXME: What about lower bound? */
372 }
373
374 return lp_build_pack2(gallivm, src_type, dst_type, lo, hi);
375 }
376
377
378 /**
379 * Truncate the bit width.
380 *
381 * TODO: Handle saturation consistently.
382 */
383 LLVMValueRef
384 lp_build_pack(struct gallivm_state *gallivm,
385 struct lp_type src_type,
386 struct lp_type dst_type,
387 boolean clamped,
388 const LLVMValueRef *src, unsigned num_srcs)
389 {
390 LLVMValueRef (*pack2)(struct gallivm_state *gallivm,
391 struct lp_type src_type,
392 struct lp_type dst_type,
393 LLVMValueRef lo,
394 LLVMValueRef hi);
395 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
396 unsigned i;
397
398
399 /* Register width must remain constant */
400 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
401
402 /* We must not loose or gain channels. Only precision */
403 assert(src_type.length * num_srcs == dst_type.length);
404
405 if(clamped)
406 pack2 = &lp_build_pack2;
407 else
408 pack2 = &lp_build_packs2;
409
410 for(i = 0; i < num_srcs; ++i)
411 tmp[i] = src[i];
412
413 while(src_type.width > dst_type.width) {
414 struct lp_type tmp_type = src_type;
415
416 tmp_type.width /= 2;
417 tmp_type.length *= 2;
418
419 /* Take in consideration the sign changes only in the last step */
420 if(tmp_type.width == dst_type.width)
421 tmp_type.sign = dst_type.sign;
422
423 num_srcs /= 2;
424
425 for(i = 0; i < num_srcs; ++i)
426 tmp[i] = pack2(gallivm, src_type, tmp_type,
427 tmp[2*i + 0], tmp[2*i + 1]);
428
429 src_type = tmp_type;
430 }
431
432 assert(num_srcs == 1);
433
434 return tmp[0];
435 }
436
437
438 /**
439 * Truncate or expand the bitwidth.
440 *
441 * NOTE: Getting the right sign flags is crucial here, as we employ some
442 * intrinsics that do saturation.
443 */
444 void
445 lp_build_resize(struct gallivm_state *gallivm,
446 struct lp_type src_type,
447 struct lp_type dst_type,
448 const LLVMValueRef *src, unsigned num_srcs,
449 LLVMValueRef *dst, unsigned num_dsts)
450 {
451 LLVMBuilderRef builder = gallivm->builder;
452 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
453 unsigned i;
454
455 /*
456 * We don't support float <-> int conversion here. That must be done
457 * before/after calling this function.
458 */
459 assert(src_type.floating == dst_type.floating);
460
461 /*
462 * We don't support double <-> float conversion yet, although it could be
463 * added with little effort.
464 */
465 assert((!src_type.floating && !dst_type.floating) ||
466 src_type.width == dst_type.width);
467
468 /* We must not loose or gain channels. Only precision */
469 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
470
471 /* We don't support M:N conversion, only 1:N, M:1, or 1:1 */
472 assert(num_srcs == 1 || num_dsts == 1);
473
474 assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
475 assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
476 assert(num_srcs <= LP_MAX_VECTOR_LENGTH);
477 assert(num_dsts <= LP_MAX_VECTOR_LENGTH);
478
479 if (src_type.width > dst_type.width) {
480 /*
481 * Truncate bit width.
482 */
483
484 assert(num_dsts == 1);
485
486 if (src_type.width * src_type.length == dst_type.width * dst_type.length) {
487 /*
488 * Register width remains constant -- use vector packing intrinsics
489 */
490
491 tmp[0] = lp_build_pack(gallivm, src_type, dst_type, TRUE, src, num_srcs);
492 }
493 else {
494 /*
495 * Do it element-wise.
496 */
497
498 assert(src_type.length == dst_type.length);
499 tmp[0] = lp_build_undef(gallivm, dst_type);
500 for (i = 0; i < dst_type.length; ++i) {
501 LLVMValueRef index = lp_build_const_int32(gallivm, i);
502 LLVMValueRef val = LLVMBuildExtractElement(builder, src[0], index, "");
503 val = LLVMBuildTrunc(builder, val, lp_build_elem_type(gallivm, dst_type), "");
504 tmp[0] = LLVMBuildInsertElement(builder, tmp[0], val, index, "");
505 }
506 }
507 }
508 else if (src_type.width < dst_type.width) {
509 /*
510 * Expand bit width.
511 */
512
513 assert(num_srcs == 1);
514
515 if (src_type.width * src_type.length == dst_type.width * dst_type.length) {
516 /*
517 * Register width remains constant -- use vector unpack intrinsics
518 */
519 lp_build_unpack(gallivm, src_type, dst_type, src[0], tmp, num_dsts);
520 }
521 else {
522 /*
523 * Do it element-wise.
524 */
525
526 assert(src_type.length == dst_type.length);
527 tmp[0] = lp_build_undef(gallivm, dst_type);
528 for (i = 0; i < dst_type.length; ++i) {
529 LLVMValueRef index = lp_build_const_int32(gallivm, i);
530 LLVMValueRef val = LLVMBuildExtractElement(builder, src[0], index, "");
531
532 if (src_type.sign && dst_type.sign) {
533 val = LLVMBuildSExt(builder, val, lp_build_elem_type(gallivm, dst_type), "");
534 } else {
535 val = LLVMBuildZExt(builder, val, lp_build_elem_type(gallivm, dst_type), "");
536 }
537 tmp[0] = LLVMBuildInsertElement(builder, tmp[0], val, index, "");
538 }
539 }
540 }
541 else {
542 /*
543 * No-op
544 */
545
546 assert(num_srcs == 1);
547 assert(num_dsts == 1);
548
549 tmp[0] = src[0];
550 }
551
552 for(i = 0; i < num_dsts; ++i)
553 dst[i] = tmp[i];
554 }
555
556