b467d561e3699902cb91c9e733b69dcf72a358ea
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_pack.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper functions for packing/unpacking.
32 *
33 * Pack/unpacking is necessary for conversion between types of different
34 * bit width.
35 *
36 * They are also commonly used when an computation needs higher
37 * precision for the intermediate values. For example, if one needs the
38 * function:
39 *
40 * c = compute(a, b);
41 *
42 * to use more precision for intermediate results then one should implement it
43 * as:
44 *
45 * LLVMValueRef
46 * compute(LLVMBuilderRef builder struct lp_type type, LLVMValueRef a, LLVMValueRef b)
47 * {
48 * struct lp_type wide_type = lp_wider_type(type);
49 * LLVMValueRef al, ah, bl, bh, cl, ch, c;
50 *
51 * lp_build_unpack2(builder, type, wide_type, a, &al, &ah);
52 * lp_build_unpack2(builder, type, wide_type, b, &bl, &bh);
53 *
54 * cl = compute_half(al, bl);
55 * ch = compute_half(ah, bh);
56 *
57 * c = lp_build_pack2(bld->builder, wide_type, type, cl, ch);
58 *
59 * return c;
60 * }
61 *
62 * where compute_half() would do the computation for half the elements with
63 * twice the precision.
64 *
65 * @author Jose Fonseca <jfonseca@vmware.com>
66 */
67
68
69 #include "util/u_debug.h"
70 #include "util/u_math.h"
71 #include "util/u_cpu_detect.h"
72 #include "util/u_memory.h"
73
74 #include "lp_bld_type.h"
75 #include "lp_bld_const.h"
76 #include "lp_bld_init.h"
77 #include "lp_bld_intr.h"
78 #include "lp_bld_arit.h"
79 #include "lp_bld_pack.h"
80 #include "lp_bld_swizzle.h"
81
82
83 /**
84 * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions.
85 */
86 static LLVMValueRef
87 lp_build_const_unpack_shuffle(struct gallivm_state *gallivm,
88 unsigned n, unsigned lo_hi)
89 {
90 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
91 unsigned i, j;
92
93 assert(n <= LP_MAX_VECTOR_LENGTH);
94 assert(lo_hi < 2);
95
96 /* TODO: cache results in a static table */
97
98 for(i = 0, j = lo_hi*n/2; i < n; i += 2, ++j) {
99 elems[i + 0] = lp_build_const_int32(gallivm, 0 + j);
100 elems[i + 1] = lp_build_const_int32(gallivm, n + j);
101 }
102
103 return LLVMConstVector(elems, n);
104 }
105
106 /**
107 * Similar to lp_build_const_unpack_shuffle but for special AVX 256bit unpack.
108 * See comment above lp_build_interleave2_half for more details.
109 */
110 static LLVMValueRef
111 lp_build_const_unpack_shuffle_half(struct gallivm_state *gallivm,
112 unsigned n, unsigned lo_hi)
113 {
114 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
115 unsigned i, j;
116
117 assert(n <= LP_MAX_VECTOR_LENGTH);
118 assert(lo_hi < 2);
119
120 for (i = 0, j = lo_hi*(n/4); i < n; i += 2, ++j) {
121 if (i == (n / 2))
122 j += n / 4;
123
124 elems[i + 0] = lp_build_const_int32(gallivm, 0 + j);
125 elems[i + 1] = lp_build_const_int32(gallivm, n + j);
126 }
127
128 return LLVMConstVector(elems, n);
129 }
130
131 /**
132 * Build shuffle vectors that match PACKxx instructions.
133 */
134 static LLVMValueRef
135 lp_build_const_pack_shuffle(struct gallivm_state *gallivm, unsigned n)
136 {
137 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
138 unsigned i;
139
140 assert(n <= LP_MAX_VECTOR_LENGTH);
141
142 for(i = 0; i < n; ++i)
143 elems[i] = lp_build_const_int32(gallivm, 2*i);
144
145 return LLVMConstVector(elems, n);
146 }
147
148 /**
149 * Return a vector with elements src[start:start+size]
150 * Most useful for getting half the values out of a 256bit sized vector,
151 * otherwise may cause data rearrangement to happen.
152 */
153 LLVMValueRef
154 lp_build_extract_range(struct gallivm_state *gallivm,
155 LLVMValueRef src,
156 unsigned start,
157 unsigned size)
158 {
159 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
160 unsigned i;
161
162 assert(size <= Elements(elems));
163
164 for (i = 0; i < size; ++i)
165 elems[i] = lp_build_const_int32(gallivm, i + start);
166
167 if (size == 1) {
168 return LLVMBuildExtractElement(gallivm->builder, src, elems[0], "");
169 }
170 else {
171 return LLVMBuildShuffleVector(gallivm->builder, src, src,
172 LLVMConstVector(elems, size), "");
173 }
174 }
175
176 /**
177 * Concatenates several (must be a power of 2) vectors (of same type)
178 * into a larger one.
179 * Most useful for building up a 256bit sized vector out of two 128bit ones.
180 */
181 LLVMValueRef
182 lp_build_concat(struct gallivm_state *gallivm,
183 LLVMValueRef src[],
184 struct lp_type src_type,
185 unsigned num_vectors)
186 {
187 unsigned new_length, i;
188 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH/2];
189 LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH];
190
191 assert(src_type.length * num_vectors <= Elements(shuffles));
192 assert(util_is_power_of_two(num_vectors));
193
194 new_length = src_type.length;
195
196 for (i = 0; i < num_vectors; i++)
197 tmp[i] = src[i];
198
199 while (num_vectors > 1) {
200 num_vectors >>= 1;
201 new_length <<= 1;
202 for (i = 0; i < new_length; i++) {
203 shuffles[i] = lp_build_const_int32(gallivm, i);
204 }
205 for (i = 0; i < num_vectors; i++) {
206 tmp[i] = LLVMBuildShuffleVector(gallivm->builder, tmp[i*2], tmp[i*2 + 1],
207 LLVMConstVector(shuffles, new_length), "");
208 }
209 }
210
211 return tmp[0];
212 }
213
214
215 /**
216 * Combines vectors to reduce from num_srcs to num_dsts.
217 * Returns the number of src vectors concatenated in a single dst.
218 *
219 * num_srcs must be exactly divisible by num_dsts.
220 *
221 * e.g. For num_srcs = 4 and src = [x, y, z, w]
222 * num_dsts = 1 dst = [xyzw] return = 4
223 * num_dsts = 2 dst = [xy, zw] return = 2
224 */
225 int
226 lp_build_concat_n(struct gallivm_state *gallivm,
227 struct lp_type src_type,
228 LLVMValueRef *src,
229 unsigned num_srcs,
230 LLVMValueRef *dst,
231 unsigned num_dsts)
232 {
233 int size = num_srcs / num_dsts;
234 int i;
235
236 assert(num_srcs >= num_dsts);
237 assert((num_srcs % size) == 0);
238
239 if (num_srcs == num_dsts)
240 return 1;
241
242 for (i = 0; i < num_dsts; ++i) {
243 dst[i] = lp_build_concat(gallivm, &src[i * size], src_type, size);
244 }
245
246 return size;
247 }
248
249
250 /**
251 * Interleave vector elements.
252 *
253 * Matches the PUNPCKLxx and PUNPCKHxx SSE instructions.
254 */
255 LLVMValueRef
256 lp_build_interleave2(struct gallivm_state *gallivm,
257 struct lp_type type,
258 LLVMValueRef a,
259 LLVMValueRef b,
260 unsigned lo_hi)
261 {
262 LLVMValueRef shuffle;
263
264 shuffle = lp_build_const_unpack_shuffle(gallivm, type.length, lo_hi);
265
266 return LLVMBuildShuffleVector(gallivm->builder, a, b, shuffle, "");
267 }
268
269 /**
270 * Interleave vector elements but with 256 bit,
271 * treats it as interleave with 2 concatenated 128 bit vectors.
272 *
273 * This differs to lp_build_interleave2 as that function would do the following (for lo):
274 * a0 b0 a1 b1 a2 b2 a3 b3, and this does not compile into an AVX unpack instruction.
275 *
276 *
277 * An example interleave 8x float with 8x float on AVX 256bit unpack:
278 * a0 a1 a2 a3 a4 a5 a6 a7 <-> b0 b1 b2 b3 b4 b5 b6 b7
279 *
280 * Equivalent to interleaving 2x 128 bit vectors
281 * a0 a1 a2 a3 <-> b0 b1 b2 b3 concatenated with a4 a5 a6 a7 <-> b4 b5 b6 b7
282 *
283 * So interleave-lo would result in:
284 * a0 b0 a1 b1 a4 b4 a5 b5
285 *
286 * And interleave-hi would result in:
287 * a2 b2 a3 b3 a6 b6 a7 b7
288 */
289 LLVMValueRef
290 lp_build_interleave2_half(struct gallivm_state *gallivm,
291 struct lp_type type,
292 LLVMValueRef a,
293 LLVMValueRef b,
294 unsigned lo_hi)
295 {
296 if (type.length * type.width == 256) {
297 LLVMValueRef shuffle = lp_build_const_unpack_shuffle_half(gallivm, type.length, lo_hi);
298 return LLVMBuildShuffleVector(gallivm->builder, a, b, shuffle, "");
299 } else {
300 return lp_build_interleave2(gallivm, type, a, b, lo_hi);
301 }
302 }
303
304 /**
305 * Double the bit width.
306 *
307 * This will only change the number of bits the values are represented, not the
308 * values themselves.
309 */
310 void
311 lp_build_unpack2(struct gallivm_state *gallivm,
312 struct lp_type src_type,
313 struct lp_type dst_type,
314 LLVMValueRef src,
315 LLVMValueRef *dst_lo,
316 LLVMValueRef *dst_hi)
317 {
318 LLVMBuilderRef builder = gallivm->builder;
319 LLVMValueRef msb;
320 LLVMTypeRef dst_vec_type;
321
322 assert(!src_type.floating);
323 assert(!dst_type.floating);
324 assert(dst_type.width == src_type.width * 2);
325 assert(dst_type.length * 2 == src_type.length);
326
327 if(dst_type.sign && src_type.sign) {
328 /* Replicate the sign bit in the most significant bits */
329 msb = LLVMBuildAShr(builder, src, lp_build_const_int_vec(gallivm, src_type, src_type.width - 1), "");
330 }
331 else
332 /* Most significant bits always zero */
333 msb = lp_build_zero(gallivm, src_type);
334
335 /* Interleave bits */
336 #ifdef PIPE_ARCH_LITTLE_ENDIAN
337 *dst_lo = lp_build_interleave2(gallivm, src_type, src, msb, 0);
338 *dst_hi = lp_build_interleave2(gallivm, src_type, src, msb, 1);
339 #else
340 *dst_lo = lp_build_interleave2(gallivm, src_type, msb, src, 0);
341 *dst_hi = lp_build_interleave2(gallivm, src_type, msb, src, 1);
342 #endif
343
344 /* Cast the result into the new type (twice as wide) */
345
346 dst_vec_type = lp_build_vec_type(gallivm, dst_type);
347
348 *dst_lo = LLVMBuildBitCast(builder, *dst_lo, dst_vec_type, "");
349 *dst_hi = LLVMBuildBitCast(builder, *dst_hi, dst_vec_type, "");
350 }
351
352
353 /**
354 * Expand the bit width.
355 *
356 * This will only change the number of bits the values are represented, not the
357 * values themselves.
358 */
359 void
360 lp_build_unpack(struct gallivm_state *gallivm,
361 struct lp_type src_type,
362 struct lp_type dst_type,
363 LLVMValueRef src,
364 LLVMValueRef *dst, unsigned num_dsts)
365 {
366 unsigned num_tmps;
367 unsigned i;
368
369 /* Register width must remain constant */
370 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
371
372 /* We must not loose or gain channels. Only precision */
373 assert(src_type.length == dst_type.length * num_dsts);
374
375 num_tmps = 1;
376 dst[0] = src;
377
378 while(src_type.width < dst_type.width) {
379 struct lp_type tmp_type = src_type;
380
381 tmp_type.width *= 2;
382 tmp_type.length /= 2;
383
384 for(i = num_tmps; i--; ) {
385 lp_build_unpack2(gallivm, src_type, tmp_type, dst[i], &dst[2*i + 0], &dst[2*i + 1]);
386 }
387
388 src_type = tmp_type;
389
390 num_tmps *= 2;
391 }
392
393 assert(num_tmps == num_dsts);
394 }
395
396
397 /**
398 * Non-interleaved pack.
399 *
400 * This will move values as
401 * (LSB) (MSB)
402 * lo = l0 __ l1 __ l2 __.. __ ln __
403 * hi = h0 __ h1 __ h2 __.. __ hn __
404 * res = l0 l1 l2 .. ln h0 h1 h2 .. hn
405 *
406 * This will only change the number of bits the values are represented, not the
407 * values themselves.
408 *
409 * It is assumed the values are already clamped into the destination type range.
410 * Values outside that range will produce undefined results. Use
411 * lp_build_packs2 instead.
412 */
413 LLVMValueRef
414 lp_build_pack2(struct gallivm_state *gallivm,
415 struct lp_type src_type,
416 struct lp_type dst_type,
417 LLVMValueRef lo,
418 LLVMValueRef hi)
419 {
420 LLVMBuilderRef builder = gallivm->builder;
421 LLVMTypeRef dst_vec_type = lp_build_vec_type(gallivm, dst_type);
422 LLVMValueRef shuffle;
423 LLVMValueRef res = NULL;
424 struct lp_type intr_type = dst_type;
425
426 #if HAVE_LLVM < 0x0207
427 intr_type = src_type;
428 #endif
429
430 assert(!src_type.floating);
431 assert(!dst_type.floating);
432 assert(src_type.width == dst_type.width * 2);
433 assert(src_type.length * 2 == dst_type.length);
434
435 /* Check for special cases first */
436 if(util_cpu_caps.has_sse2 && src_type.width * src_type.length >= 128) {
437 const char *intrinsic = NULL;
438
439 switch(src_type.width) {
440 case 32:
441 if(dst_type.sign) {
442 intrinsic = "llvm.x86.sse2.packssdw.128";
443 }
444 else {
445 if (util_cpu_caps.has_sse4_1) {
446 intrinsic = "llvm.x86.sse41.packusdw";
447 #if HAVE_LLVM < 0x0207
448 /* llvm < 2.7 has inconsistent signatures except for packusdw */
449 intr_type = dst_type;
450 #endif
451 }
452 }
453 break;
454 case 16:
455 if (dst_type.sign) {
456 intrinsic = "llvm.x86.sse2.packsswb.128";
457 }
458 else {
459 intrinsic = "llvm.x86.sse2.packuswb.128";
460 }
461 break;
462 /* default uses generic shuffle below */
463 }
464 if (intrinsic) {
465 if (src_type.width * src_type.length == 128) {
466 LLVMTypeRef intr_vec_type = lp_build_vec_type(gallivm, intr_type);
467 res = lp_build_intrinsic_binary(builder, intrinsic, intr_vec_type, lo, hi);
468 if (dst_vec_type != intr_vec_type) {
469 res = LLVMBuildBitCast(builder, res, dst_vec_type, "");
470 }
471 }
472 else {
473 int num_split = src_type.width * src_type.length / 128;
474 int i;
475 int nlen = 128 / src_type.width;
476 struct lp_type ndst_type = lp_type_unorm(dst_type.width, 128);
477 struct lp_type nintr_type = lp_type_unorm(intr_type.width, 128);
478 LLVMValueRef tmpres[LP_MAX_VECTOR_WIDTH / 128];
479 LLVMValueRef tmplo, tmphi;
480 LLVMTypeRef ndst_vec_type = lp_build_vec_type(gallivm, ndst_type);
481 LLVMTypeRef nintr_vec_type = lp_build_vec_type(gallivm, nintr_type);
482
483 assert(num_split <= LP_MAX_VECTOR_WIDTH / 128);
484
485 for (i = 0; i < num_split / 2; i++) {
486 tmplo = lp_build_extract_range(gallivm,
487 lo, i*nlen*2, nlen);
488 tmphi = lp_build_extract_range(gallivm,
489 lo, i*nlen*2 + nlen, nlen);
490 tmpres[i] = lp_build_intrinsic_binary(builder, intrinsic,
491 nintr_vec_type, tmplo, tmphi);
492 if (ndst_vec_type != nintr_vec_type) {
493 tmpres[i] = LLVMBuildBitCast(builder, tmpres[i], ndst_vec_type, "");
494 }
495 }
496 for (i = 0; i < num_split / 2; i++) {
497 tmplo = lp_build_extract_range(gallivm,
498 hi, i*nlen*2, nlen);
499 tmphi = lp_build_extract_range(gallivm,
500 hi, i*nlen*2 + nlen, nlen);
501 tmpres[i+num_split/2] = lp_build_intrinsic_binary(builder, intrinsic,
502 nintr_vec_type,
503 tmplo, tmphi);
504 if (ndst_vec_type != nintr_vec_type) {
505 tmpres[i+num_split/2] = LLVMBuildBitCast(builder, tmpres[i+num_split/2],
506 ndst_vec_type, "");
507 }
508 }
509 res = lp_build_concat(gallivm, tmpres, ndst_type, num_split);
510 }
511 return res;
512 }
513 }
514
515 /* generic shuffle */
516 lo = LLVMBuildBitCast(builder, lo, dst_vec_type, "");
517 hi = LLVMBuildBitCast(builder, hi, dst_vec_type, "");
518
519 shuffle = lp_build_const_pack_shuffle(gallivm, dst_type.length);
520
521 res = LLVMBuildShuffleVector(builder, lo, hi, shuffle, "");
522
523 return res;
524 }
525
526
527
528 /**
529 * Non-interleaved pack and saturate.
530 *
531 * Same as lp_build_pack2 but will saturate values so that they fit into the
532 * destination type.
533 */
534 LLVMValueRef
535 lp_build_packs2(struct gallivm_state *gallivm,
536 struct lp_type src_type,
537 struct lp_type dst_type,
538 LLVMValueRef lo,
539 LLVMValueRef hi)
540 {
541 boolean clamp;
542
543 assert(!src_type.floating);
544 assert(!dst_type.floating);
545 assert(src_type.sign == dst_type.sign);
546 assert(src_type.width == dst_type.width * 2);
547 assert(src_type.length * 2 == dst_type.length);
548
549 clamp = TRUE;
550
551 /* All X86 SSE non-interleaved pack instructions take signed inputs and
552 * saturate them, so no need to clamp for those cases. */
553 if(util_cpu_caps.has_sse2 &&
554 src_type.width * src_type.length >= 128 &&
555 src_type.sign &&
556 (src_type.width == 32 || src_type.width == 16))
557 clamp = FALSE;
558
559 if(clamp) {
560 struct lp_build_context bld;
561 unsigned dst_bits = dst_type.sign ? dst_type.width - 1 : dst_type.width;
562 LLVMValueRef dst_max = lp_build_const_int_vec(gallivm, src_type, ((unsigned long long)1 << dst_bits) - 1);
563 lp_build_context_init(&bld, gallivm, src_type);
564 lo = lp_build_min(&bld, lo, dst_max);
565 hi = lp_build_min(&bld, hi, dst_max);
566 /* FIXME: What about lower bound? */
567 }
568
569 return lp_build_pack2(gallivm, src_type, dst_type, lo, hi);
570 }
571
572
573 /**
574 * Truncate the bit width.
575 *
576 * TODO: Handle saturation consistently.
577 */
578 LLVMValueRef
579 lp_build_pack(struct gallivm_state *gallivm,
580 struct lp_type src_type,
581 struct lp_type dst_type,
582 boolean clamped,
583 const LLVMValueRef *src, unsigned num_srcs)
584 {
585 LLVMValueRef (*pack2)(struct gallivm_state *gallivm,
586 struct lp_type src_type,
587 struct lp_type dst_type,
588 LLVMValueRef lo,
589 LLVMValueRef hi);
590 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
591 unsigned i;
592
593 /* Register width must remain constant */
594 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
595
596 /* We must not loose or gain channels. Only precision */
597 assert(src_type.length * num_srcs == dst_type.length);
598
599 if(clamped)
600 pack2 = &lp_build_pack2;
601 else
602 pack2 = &lp_build_packs2;
603
604 for(i = 0; i < num_srcs; ++i)
605 tmp[i] = src[i];
606
607 while(src_type.width > dst_type.width) {
608 struct lp_type tmp_type = src_type;
609
610 tmp_type.width /= 2;
611 tmp_type.length *= 2;
612
613 /* Take in consideration the sign changes only in the last step */
614 if(tmp_type.width == dst_type.width)
615 tmp_type.sign = dst_type.sign;
616
617 num_srcs /= 2;
618
619 for(i = 0; i < num_srcs; ++i)
620 tmp[i] = pack2(gallivm, src_type, tmp_type,
621 tmp[2*i + 0], tmp[2*i + 1]);
622
623 src_type = tmp_type;
624 }
625
626 assert(num_srcs == 1);
627
628 return tmp[0];
629 }
630
631
632 /**
633 * Truncate or expand the bitwidth.
634 *
635 * NOTE: Getting the right sign flags is crucial here, as we employ some
636 * intrinsics that do saturation.
637 */
638 void
639 lp_build_resize(struct gallivm_state *gallivm,
640 struct lp_type src_type,
641 struct lp_type dst_type,
642 const LLVMValueRef *src, unsigned num_srcs,
643 LLVMValueRef *dst, unsigned num_dsts)
644 {
645 LLVMBuilderRef builder = gallivm->builder;
646 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
647 unsigned i;
648
649 /*
650 * We don't support float <-> int conversion here. That must be done
651 * before/after calling this function.
652 */
653 assert(src_type.floating == dst_type.floating);
654
655 /*
656 * We don't support double <-> float conversion yet, although it could be
657 * added with little effort.
658 */
659 assert((!src_type.floating && !dst_type.floating) ||
660 src_type.width == dst_type.width);
661
662 /* We must not loose or gain channels. Only precision */
663 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
664
665 /* We don't support M:N conversion, only 1:N, M:1, or 1:1 */
666 assert(num_srcs == 1 || num_dsts == 1);
667
668 assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
669 assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
670 assert(num_srcs <= LP_MAX_VECTOR_LENGTH);
671 assert(num_dsts <= LP_MAX_VECTOR_LENGTH);
672
673 if (src_type.width > dst_type.width) {
674 /*
675 * Truncate bit width.
676 */
677
678 assert(num_dsts == 1);
679
680 if (src_type.width * src_type.length == dst_type.width * dst_type.length) {
681 /*
682 * Register width remains constant -- use vector packing intrinsics
683 */
684 tmp[0] = lp_build_pack(gallivm, src_type, dst_type, TRUE, src, num_srcs);
685 }
686 else {
687 if (src_type.width / dst_type.width > num_srcs) {
688 /*
689 * First change src vectors size (with shuffle) so they have the
690 * same size as the destination vector, then pack normally.
691 * Note: cannot use cast/extract because llvm generates atrocious code.
692 */
693 unsigned size_ratio = (src_type.width * src_type.length) /
694 (dst_type.length * dst_type.width);
695 unsigned new_length = src_type.length / size_ratio;
696
697 for (i = 0; i < size_ratio * num_srcs; i++) {
698 unsigned start_index = (i % size_ratio) * new_length;
699 tmp[i] = lp_build_extract_range(gallivm, src[i / size_ratio],
700 start_index, new_length);
701 }
702 num_srcs *= size_ratio;
703 src_type.length = new_length;
704 tmp[0] = lp_build_pack(gallivm, src_type, dst_type, TRUE, tmp, num_srcs);
705 }
706 else {
707 /*
708 * Truncate bit width but expand vector size - first pack
709 * then expand simply because this should be more AVX-friendly
710 * for the cases we probably hit.
711 */
712 unsigned size_ratio = (dst_type.width * dst_type.length) /
713 (src_type.length * src_type.width);
714 unsigned num_pack_srcs = num_srcs / size_ratio;
715 dst_type.length = dst_type.length / size_ratio;
716
717 for (i = 0; i < size_ratio; i++) {
718 tmp[i] = lp_build_pack(gallivm, src_type, dst_type, TRUE,
719 &src[i*num_pack_srcs], num_pack_srcs);
720 }
721 tmp[0] = lp_build_concat(gallivm, tmp, dst_type, size_ratio);
722 }
723 }
724 }
725 else if (src_type.width < dst_type.width) {
726 /*
727 * Expand bit width.
728 */
729
730 assert(num_srcs == 1);
731
732 if (src_type.width * src_type.length == dst_type.width * dst_type.length) {
733 /*
734 * Register width remains constant -- use vector unpack intrinsics
735 */
736 lp_build_unpack(gallivm, src_type, dst_type, src[0], tmp, num_dsts);
737 }
738 else {
739 /*
740 * Do it element-wise.
741 */
742 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
743
744 for (i = 0; i < num_dsts; i++) {
745 tmp[i] = lp_build_undef(gallivm, dst_type);
746 }
747
748 for (i = 0; i < src_type.length; ++i) {
749 unsigned j = i / dst_type.length;
750 LLVMValueRef srcindex = lp_build_const_int32(gallivm, i);
751 LLVMValueRef dstindex = lp_build_const_int32(gallivm, i % dst_type.length);
752 LLVMValueRef val = LLVMBuildExtractElement(builder, src[0], srcindex, "");
753
754 if (src_type.sign && dst_type.sign) {
755 val = LLVMBuildSExt(builder, val, lp_build_elem_type(gallivm, dst_type), "");
756 } else {
757 val = LLVMBuildZExt(builder, val, lp_build_elem_type(gallivm, dst_type), "");
758 }
759 tmp[j] = LLVMBuildInsertElement(builder, tmp[j], val, dstindex, "");
760 }
761 }
762 }
763 else {
764 /*
765 * No-op
766 */
767
768 assert(num_srcs == 1);
769 assert(num_dsts == 1);
770
771 tmp[0] = src[0];
772 }
773
774 for(i = 0; i < num_dsts; ++i)
775 dst[i] = tmp[i];
776 }
777
778
779 /**
780 * Expands src vector from src.length to dst_length
781 */
782 LLVMValueRef
783 lp_build_pad_vector(struct gallivm_state *gallivm,
784 LLVMValueRef src,
785 unsigned dst_length)
786 {
787 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
788 LLVMValueRef undef;
789 LLVMTypeRef type;
790 unsigned i, src_length;
791
792 type = LLVMTypeOf(src);
793
794 if (LLVMGetTypeKind(type) != LLVMVectorTypeKind) {
795 /* Can't use ShuffleVector on non-vector type */
796 undef = LLVMGetUndef(LLVMVectorType(type, dst_length));
797 return LLVMBuildInsertElement(gallivm->builder, undef, src, lp_build_const_int32(gallivm, 0), "");
798 }
799
800 undef = LLVMGetUndef(type);
801 src_length = LLVMGetVectorSize(type);
802
803 assert(dst_length <= Elements(elems));
804 assert(dst_length >= src_length);
805
806 if (src_length == dst_length)
807 return src;
808
809 /* All elements from src vector */
810 for (i = 0; i < src_length; ++i)
811 elems[i] = lp_build_const_int32(gallivm, i);
812
813 /* Undef fill remaining space */
814 for (i = src_length; i < dst_length; ++i)
815 elems[i] = lp_build_const_int32(gallivm, src_length);
816
817 /* Combine the two vectors */
818 return LLVMBuildShuffleVector(gallivm->builder, src, undef, LLVMConstVector(elems, dst_length), "");
819 }