gallivm: (trivial) fix lp_build_concat_n
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_pack.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper functions for packing/unpacking.
32 *
33 * Pack/unpacking is necessary for conversion between types of different
34 * bit width.
35 *
36 * They are also commonly used when an computation needs higher
37 * precision for the intermediate values. For example, if one needs the
38 * function:
39 *
40 * c = compute(a, b);
41 *
42 * to use more precision for intermediate results then one should implement it
43 * as:
44 *
45 * LLVMValueRef
46 * compute(LLVMBuilderRef builder struct lp_type type, LLVMValueRef a, LLVMValueRef b)
47 * {
48 * struct lp_type wide_type = lp_wider_type(type);
49 * LLVMValueRef al, ah, bl, bh, cl, ch, c;
50 *
51 * lp_build_unpack2(builder, type, wide_type, a, &al, &ah);
52 * lp_build_unpack2(builder, type, wide_type, b, &bl, &bh);
53 *
54 * cl = compute_half(al, bl);
55 * ch = compute_half(ah, bh);
56 *
57 * c = lp_build_pack2(bld->builder, wide_type, type, cl, ch);
58 *
59 * return c;
60 * }
61 *
62 * where compute_half() would do the computation for half the elements with
63 * twice the precision.
64 *
65 * @author Jose Fonseca <jfonseca@vmware.com>
66 */
67
68
69 #include "util/u_debug.h"
70 #include "util/u_math.h"
71 #include "util/u_cpu_detect.h"
72 #include "util/u_memory.h"
73
74 #include "lp_bld_type.h"
75 #include "lp_bld_const.h"
76 #include "lp_bld_init.h"
77 #include "lp_bld_intr.h"
78 #include "lp_bld_arit.h"
79 #include "lp_bld_pack.h"
80 #include "lp_bld_swizzle.h"
81
82
83 /**
84 * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions.
85 */
86 static LLVMValueRef
87 lp_build_const_unpack_shuffle(struct gallivm_state *gallivm,
88 unsigned n, unsigned lo_hi)
89 {
90 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
91 unsigned i, j;
92
93 assert(n <= LP_MAX_VECTOR_LENGTH);
94 assert(lo_hi < 2);
95
96 /* TODO: cache results in a static table */
97
98 for(i = 0, j = lo_hi*n/2; i < n; i += 2, ++j) {
99 elems[i + 0] = lp_build_const_int32(gallivm, 0 + j);
100 elems[i + 1] = lp_build_const_int32(gallivm, n + j);
101 }
102
103 return LLVMConstVector(elems, n);
104 }
105
106 /**
107 * Similar to lp_build_const_unpack_shuffle but for special AVX 256bit unpack.
108 * See comment above lp_build_interleave2_half for more details.
109 */
110 static LLVMValueRef
111 lp_build_const_unpack_shuffle_half(struct gallivm_state *gallivm,
112 unsigned n, unsigned lo_hi)
113 {
114 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
115 unsigned i, j;
116
117 assert(n <= LP_MAX_VECTOR_LENGTH);
118 assert(lo_hi < 2);
119
120 for (i = 0, j = lo_hi*(n/4); i < n; i += 2, ++j) {
121 if (i == (n / 2))
122 j += n / 4;
123
124 elems[i + 0] = lp_build_const_int32(gallivm, 0 + j);
125 elems[i + 1] = lp_build_const_int32(gallivm, n + j);
126 }
127
128 return LLVMConstVector(elems, n);
129 }
130
131 /**
132 * Build shuffle vectors that match PACKxx (SSE) instructions or
133 * VPERM (Altivec).
134 */
135 static LLVMValueRef
136 lp_build_const_pack_shuffle(struct gallivm_state *gallivm, unsigned n)
137 {
138 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
139 unsigned i;
140
141 assert(n <= LP_MAX_VECTOR_LENGTH);
142
143 for(i = 0; i < n; ++i)
144 #ifdef PIPE_ARCH_LITTLE_ENDIAN
145 elems[i] = lp_build_const_int32(gallivm, 2*i);
146 #else
147 elems[i] = lp_build_const_int32(gallivm, 2*i+1);
148 #endif
149
150 return LLVMConstVector(elems, n);
151 }
152
153 /**
154 * Return a vector with elements src[start:start+size]
155 * Most useful for getting half the values out of a 256bit sized vector,
156 * otherwise may cause data rearrangement to happen.
157 */
158 LLVMValueRef
159 lp_build_extract_range(struct gallivm_state *gallivm,
160 LLVMValueRef src,
161 unsigned start,
162 unsigned size)
163 {
164 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
165 unsigned i;
166
167 assert(size <= Elements(elems));
168
169 for (i = 0; i < size; ++i)
170 elems[i] = lp_build_const_int32(gallivm, i + start);
171
172 if (size == 1) {
173 return LLVMBuildExtractElement(gallivm->builder, src, elems[0], "");
174 }
175 else {
176 return LLVMBuildShuffleVector(gallivm->builder, src, src,
177 LLVMConstVector(elems, size), "");
178 }
179 }
180
181 /**
182 * Concatenates several (must be a power of 2) vectors (of same type)
183 * into a larger one.
184 * Most useful for building up a 256bit sized vector out of two 128bit ones.
185 */
186 LLVMValueRef
187 lp_build_concat(struct gallivm_state *gallivm,
188 LLVMValueRef src[],
189 struct lp_type src_type,
190 unsigned num_vectors)
191 {
192 unsigned new_length, i;
193 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH/2];
194 LLVMValueRef shuffles[LP_MAX_VECTOR_LENGTH];
195
196 assert(src_type.length * num_vectors <= Elements(shuffles));
197 assert(util_is_power_of_two(num_vectors));
198
199 new_length = src_type.length;
200
201 for (i = 0; i < num_vectors; i++)
202 tmp[i] = src[i];
203
204 while (num_vectors > 1) {
205 num_vectors >>= 1;
206 new_length <<= 1;
207 for (i = 0; i < new_length; i++) {
208 shuffles[i] = lp_build_const_int32(gallivm, i);
209 }
210 for (i = 0; i < num_vectors; i++) {
211 tmp[i] = LLVMBuildShuffleVector(gallivm->builder, tmp[i*2], tmp[i*2 + 1],
212 LLVMConstVector(shuffles, new_length), "");
213 }
214 }
215
216 return tmp[0];
217 }
218
219
220 /**
221 * Combines vectors to reduce from num_srcs to num_dsts.
222 * Returns the number of src vectors concatenated in a single dst.
223 *
224 * num_srcs must be exactly divisible by num_dsts.
225 *
226 * e.g. For num_srcs = 4 and src = [x, y, z, w]
227 * num_dsts = 1 dst = [xyzw] return = 4
228 * num_dsts = 2 dst = [xy, zw] return = 2
229 */
230 int
231 lp_build_concat_n(struct gallivm_state *gallivm,
232 struct lp_type src_type,
233 LLVMValueRef *src,
234 unsigned num_srcs,
235 LLVMValueRef *dst,
236 unsigned num_dsts)
237 {
238 int size = num_srcs / num_dsts;
239 int i;
240
241 assert(num_srcs >= num_dsts);
242 assert((num_srcs % size) == 0);
243
244 if (num_srcs == num_dsts) {
245 for (i = 0; i < num_dsts; ++i) {
246 dst[i] = src[i];
247 }
248 return 1;
249 }
250
251 for (i = 0; i < num_dsts; ++i) {
252 dst[i] = lp_build_concat(gallivm, &src[i * size], src_type, size);
253 }
254
255 return size;
256 }
257
258
259 /**
260 * Interleave vector elements.
261 *
262 * Matches the PUNPCKLxx and PUNPCKHxx SSE instructions
263 * (but not for 256bit AVX vectors).
264 */
265 LLVMValueRef
266 lp_build_interleave2(struct gallivm_state *gallivm,
267 struct lp_type type,
268 LLVMValueRef a,
269 LLVMValueRef b,
270 unsigned lo_hi)
271 {
272 LLVMValueRef shuffle;
273
274 shuffle = lp_build_const_unpack_shuffle(gallivm, type.length, lo_hi);
275
276 return LLVMBuildShuffleVector(gallivm->builder, a, b, shuffle, "");
277 }
278
279 /**
280 * Interleave vector elements but with 256 bit,
281 * treats it as interleave with 2 concatenated 128 bit vectors.
282 *
283 * This differs to lp_build_interleave2 as that function would do the following (for lo):
284 * a0 b0 a1 b1 a2 b2 a3 b3, and this does not compile into an AVX unpack instruction.
285 *
286 *
287 * An example interleave 8x float with 8x float on AVX 256bit unpack:
288 * a0 a1 a2 a3 a4 a5 a6 a7 <-> b0 b1 b2 b3 b4 b5 b6 b7
289 *
290 * Equivalent to interleaving 2x 128 bit vectors
291 * a0 a1 a2 a3 <-> b0 b1 b2 b3 concatenated with a4 a5 a6 a7 <-> b4 b5 b6 b7
292 *
293 * So interleave-lo would result in:
294 * a0 b0 a1 b1 a4 b4 a5 b5
295 *
296 * And interleave-hi would result in:
297 * a2 b2 a3 b3 a6 b6 a7 b7
298 */
299 LLVMValueRef
300 lp_build_interleave2_half(struct gallivm_state *gallivm,
301 struct lp_type type,
302 LLVMValueRef a,
303 LLVMValueRef b,
304 unsigned lo_hi)
305 {
306 if (type.length * type.width == 256) {
307 LLVMValueRef shuffle = lp_build_const_unpack_shuffle_half(gallivm, type.length, lo_hi);
308 return LLVMBuildShuffleVector(gallivm->builder, a, b, shuffle, "");
309 } else {
310 return lp_build_interleave2(gallivm, type, a, b, lo_hi);
311 }
312 }
313
314 /**
315 * Double the bit width.
316 *
317 * This will only change the number of bits the values are represented, not the
318 * values themselves.
319 */
320 void
321 lp_build_unpack2(struct gallivm_state *gallivm,
322 struct lp_type src_type,
323 struct lp_type dst_type,
324 LLVMValueRef src,
325 LLVMValueRef *dst_lo,
326 LLVMValueRef *dst_hi)
327 {
328 LLVMBuilderRef builder = gallivm->builder;
329 LLVMValueRef msb;
330 LLVMTypeRef dst_vec_type;
331
332 assert(!src_type.floating);
333 assert(!dst_type.floating);
334 assert(dst_type.width == src_type.width * 2);
335 assert(dst_type.length * 2 == src_type.length);
336
337 if(dst_type.sign && src_type.sign) {
338 /* Replicate the sign bit in the most significant bits */
339 msb = LLVMBuildAShr(builder, src, lp_build_const_int_vec(gallivm, src_type, src_type.width - 1), "");
340 }
341 else
342 /* Most significant bits always zero */
343 msb = lp_build_zero(gallivm, src_type);
344
345 /* Interleave bits */
346 #ifdef PIPE_ARCH_LITTLE_ENDIAN
347 *dst_lo = lp_build_interleave2(gallivm, src_type, src, msb, 0);
348 *dst_hi = lp_build_interleave2(gallivm, src_type, src, msb, 1);
349 #else
350 *dst_lo = lp_build_interleave2(gallivm, src_type, msb, src, 0);
351 *dst_hi = lp_build_interleave2(gallivm, src_type, msb, src, 1);
352 #endif
353
354 /* Cast the result into the new type (twice as wide) */
355
356 dst_vec_type = lp_build_vec_type(gallivm, dst_type);
357
358 *dst_lo = LLVMBuildBitCast(builder, *dst_lo, dst_vec_type, "");
359 *dst_hi = LLVMBuildBitCast(builder, *dst_hi, dst_vec_type, "");
360 }
361
362
363 /**
364 * Expand the bit width.
365 *
366 * This will only change the number of bits the values are represented, not the
367 * values themselves.
368 */
369 void
370 lp_build_unpack(struct gallivm_state *gallivm,
371 struct lp_type src_type,
372 struct lp_type dst_type,
373 LLVMValueRef src,
374 LLVMValueRef *dst, unsigned num_dsts)
375 {
376 unsigned num_tmps;
377 unsigned i;
378
379 /* Register width must remain constant */
380 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
381
382 /* We must not loose or gain channels. Only precision */
383 assert(src_type.length == dst_type.length * num_dsts);
384
385 num_tmps = 1;
386 dst[0] = src;
387
388 while(src_type.width < dst_type.width) {
389 struct lp_type tmp_type = src_type;
390
391 tmp_type.width *= 2;
392 tmp_type.length /= 2;
393
394 for(i = num_tmps; i--; ) {
395 lp_build_unpack2(gallivm, src_type, tmp_type, dst[i], &dst[2*i + 0], &dst[2*i + 1]);
396 }
397
398 src_type = tmp_type;
399
400 num_tmps *= 2;
401 }
402
403 assert(num_tmps == num_dsts);
404 }
405
406
407 /**
408 * Non-interleaved pack.
409 *
410 * This will move values as
411 * (LSB) (MSB)
412 * lo = l0 __ l1 __ l2 __.. __ ln __
413 * hi = h0 __ h1 __ h2 __.. __ hn __
414 * res = l0 l1 l2 .. ln h0 h1 h2 .. hn
415 *
416 * This will only change the number of bits the values are represented, not the
417 * values themselves.
418 *
419 * It is assumed the values are already clamped into the destination type range.
420 * Values outside that range will produce undefined results. Use
421 * lp_build_packs2 instead.
422 */
423 LLVMValueRef
424 lp_build_pack2(struct gallivm_state *gallivm,
425 struct lp_type src_type,
426 struct lp_type dst_type,
427 LLVMValueRef lo,
428 LLVMValueRef hi)
429 {
430 LLVMBuilderRef builder = gallivm->builder;
431 LLVMTypeRef dst_vec_type = lp_build_vec_type(gallivm, dst_type);
432 LLVMValueRef shuffle;
433 LLVMValueRef res = NULL;
434 struct lp_type intr_type = dst_type;
435
436 #if HAVE_LLVM < 0x0207
437 intr_type = src_type;
438 #endif
439
440 assert(!src_type.floating);
441 assert(!dst_type.floating);
442 assert(src_type.width == dst_type.width * 2);
443 assert(src_type.length * 2 == dst_type.length);
444
445 /* Check for special cases first */
446 if((util_cpu_caps.has_sse2 || util_cpu_caps.has_altivec) &&
447 src_type.width * src_type.length >= 128) {
448 const char *intrinsic = NULL;
449
450 switch(src_type.width) {
451 case 32:
452 if (util_cpu_caps.has_sse2) {
453 if(dst_type.sign) {
454 intrinsic = "llvm.x86.sse2.packssdw.128";
455 }
456 else {
457 if (util_cpu_caps.has_sse4_1) {
458 intrinsic = "llvm.x86.sse41.packusdw";
459 #if HAVE_LLVM < 0x0207
460 /* llvm < 2.7 has inconsistent signatures except for packusdw */
461 intr_type = dst_type;
462 #endif
463 }
464 }
465 } else if (util_cpu_caps.has_altivec) {
466 if (dst_type.sign) {
467 intrinsic = "llvm.ppc.altivec.vpkswus";
468 } else {
469 intrinsic = "llvm.ppc.altivec.vpkuwus";
470 }
471 }
472 break;
473 case 16:
474 if (dst_type.sign) {
475 if (util_cpu_caps.has_sse2) {
476 intrinsic = "llvm.x86.sse2.packsswb.128";
477 } else if (util_cpu_caps.has_altivec) {
478 intrinsic = "llvm.ppc.altivec.vpkshss";
479 }
480 } else {
481 if (util_cpu_caps.has_sse2) {
482 intrinsic = "llvm.x86.sse2.packuswb.128";
483 } else if (util_cpu_caps.has_altivec) {
484 intrinsic = "llvm.ppc.altivec.vpkshus";
485 }
486 }
487 break;
488 /* default uses generic shuffle below */
489 }
490 if (intrinsic) {
491 if (src_type.width * src_type.length == 128) {
492 LLVMTypeRef intr_vec_type = lp_build_vec_type(gallivm, intr_type);
493 res = lp_build_intrinsic_binary(builder, intrinsic, intr_vec_type, lo, hi);
494 if (dst_vec_type != intr_vec_type) {
495 res = LLVMBuildBitCast(builder, res, dst_vec_type, "");
496 }
497 }
498 else {
499 int num_split = src_type.width * src_type.length / 128;
500 int i;
501 int nlen = 128 / src_type.width;
502 struct lp_type ndst_type = lp_type_unorm(dst_type.width, 128);
503 struct lp_type nintr_type = lp_type_unorm(intr_type.width, 128);
504 LLVMValueRef tmpres[LP_MAX_VECTOR_WIDTH / 128];
505 LLVMValueRef tmplo, tmphi;
506 LLVMTypeRef ndst_vec_type = lp_build_vec_type(gallivm, ndst_type);
507 LLVMTypeRef nintr_vec_type = lp_build_vec_type(gallivm, nintr_type);
508
509 assert(num_split <= LP_MAX_VECTOR_WIDTH / 128);
510
511 for (i = 0; i < num_split / 2; i++) {
512 tmplo = lp_build_extract_range(gallivm,
513 lo, i*nlen*2, nlen);
514 tmphi = lp_build_extract_range(gallivm,
515 lo, i*nlen*2 + nlen, nlen);
516 tmpres[i] = lp_build_intrinsic_binary(builder, intrinsic,
517 nintr_vec_type, tmplo, tmphi);
518 if (ndst_vec_type != nintr_vec_type) {
519 tmpres[i] = LLVMBuildBitCast(builder, tmpres[i], ndst_vec_type, "");
520 }
521 }
522 for (i = 0; i < num_split / 2; i++) {
523 tmplo = lp_build_extract_range(gallivm,
524 hi, i*nlen*2, nlen);
525 tmphi = lp_build_extract_range(gallivm,
526 hi, i*nlen*2 + nlen, nlen);
527 tmpres[i+num_split/2] = lp_build_intrinsic_binary(builder, intrinsic,
528 nintr_vec_type,
529 tmplo, tmphi);
530 if (ndst_vec_type != nintr_vec_type) {
531 tmpres[i+num_split/2] = LLVMBuildBitCast(builder, tmpres[i+num_split/2],
532 ndst_vec_type, "");
533 }
534 }
535 res = lp_build_concat(gallivm, tmpres, ndst_type, num_split);
536 }
537 return res;
538 }
539 }
540
541 /* generic shuffle */
542 lo = LLVMBuildBitCast(builder, lo, dst_vec_type, "");
543 hi = LLVMBuildBitCast(builder, hi, dst_vec_type, "");
544
545 shuffle = lp_build_const_pack_shuffle(gallivm, dst_type.length);
546
547 res = LLVMBuildShuffleVector(builder, lo, hi, shuffle, "");
548
549 return res;
550 }
551
552
553
554 /**
555 * Non-interleaved pack and saturate.
556 *
557 * Same as lp_build_pack2 but will saturate values so that they fit into the
558 * destination type.
559 */
560 LLVMValueRef
561 lp_build_packs2(struct gallivm_state *gallivm,
562 struct lp_type src_type,
563 struct lp_type dst_type,
564 LLVMValueRef lo,
565 LLVMValueRef hi)
566 {
567 boolean clamp;
568
569 assert(!src_type.floating);
570 assert(!dst_type.floating);
571 assert(src_type.sign == dst_type.sign);
572 assert(src_type.width == dst_type.width * 2);
573 assert(src_type.length * 2 == dst_type.length);
574
575 clamp = TRUE;
576
577 /* All X86 SSE non-interleaved pack instructions take signed inputs and
578 * saturate them, so no need to clamp for those cases. */
579 if(util_cpu_caps.has_sse2 &&
580 src_type.width * src_type.length >= 128 &&
581 src_type.sign &&
582 (src_type.width == 32 || src_type.width == 16))
583 clamp = FALSE;
584
585 if(clamp) {
586 struct lp_build_context bld;
587 unsigned dst_bits = dst_type.sign ? dst_type.width - 1 : dst_type.width;
588 LLVMValueRef dst_max = lp_build_const_int_vec(gallivm, src_type, ((unsigned long long)1 << dst_bits) - 1);
589 lp_build_context_init(&bld, gallivm, src_type);
590 lo = lp_build_min(&bld, lo, dst_max);
591 hi = lp_build_min(&bld, hi, dst_max);
592 /* FIXME: What about lower bound? */
593 }
594
595 return lp_build_pack2(gallivm, src_type, dst_type, lo, hi);
596 }
597
598
599 /**
600 * Truncate the bit width.
601 *
602 * TODO: Handle saturation consistently.
603 */
604 LLVMValueRef
605 lp_build_pack(struct gallivm_state *gallivm,
606 struct lp_type src_type,
607 struct lp_type dst_type,
608 boolean clamped,
609 const LLVMValueRef *src, unsigned num_srcs)
610 {
611 LLVMValueRef (*pack2)(struct gallivm_state *gallivm,
612 struct lp_type src_type,
613 struct lp_type dst_type,
614 LLVMValueRef lo,
615 LLVMValueRef hi);
616 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
617 unsigned i;
618
619 /* Register width must remain constant */
620 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
621
622 /* We must not loose or gain channels. Only precision */
623 assert(src_type.length * num_srcs == dst_type.length);
624
625 if(clamped)
626 pack2 = &lp_build_pack2;
627 else
628 pack2 = &lp_build_packs2;
629
630 for(i = 0; i < num_srcs; ++i)
631 tmp[i] = src[i];
632
633 while(src_type.width > dst_type.width) {
634 struct lp_type tmp_type = src_type;
635
636 tmp_type.width /= 2;
637 tmp_type.length *= 2;
638
639 /* Take in consideration the sign changes only in the last step */
640 if(tmp_type.width == dst_type.width)
641 tmp_type.sign = dst_type.sign;
642
643 num_srcs /= 2;
644
645 for(i = 0; i < num_srcs; ++i)
646 tmp[i] = pack2(gallivm, src_type, tmp_type,
647 tmp[2*i + 0], tmp[2*i + 1]);
648
649 src_type = tmp_type;
650 }
651
652 assert(num_srcs == 1);
653
654 return tmp[0];
655 }
656
657
658 /**
659 * Truncate or expand the bitwidth.
660 *
661 * NOTE: Getting the right sign flags is crucial here, as we employ some
662 * intrinsics that do saturation.
663 */
664 void
665 lp_build_resize(struct gallivm_state *gallivm,
666 struct lp_type src_type,
667 struct lp_type dst_type,
668 const LLVMValueRef *src, unsigned num_srcs,
669 LLVMValueRef *dst, unsigned num_dsts)
670 {
671 LLVMBuilderRef builder = gallivm->builder;
672 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
673 unsigned i;
674
675 /*
676 * We don't support float <-> int conversion here. That must be done
677 * before/after calling this function.
678 */
679 assert(src_type.floating == dst_type.floating);
680
681 /*
682 * We don't support double <-> float conversion yet, although it could be
683 * added with little effort.
684 */
685 assert((!src_type.floating && !dst_type.floating) ||
686 src_type.width == dst_type.width);
687
688 /* We must not loose or gain channels. Only precision */
689 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
690
691 /* We don't support M:N conversion, only 1:N, M:1, or 1:1 */
692 assert(num_srcs == 1 || num_dsts == 1);
693
694 assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
695 assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
696 assert(num_srcs <= LP_MAX_VECTOR_LENGTH);
697 assert(num_dsts <= LP_MAX_VECTOR_LENGTH);
698
699 if (src_type.width > dst_type.width) {
700 /*
701 * Truncate bit width.
702 */
703
704 assert(num_dsts == 1);
705
706 if (src_type.width * src_type.length == dst_type.width * dst_type.length) {
707 /*
708 * Register width remains constant -- use vector packing intrinsics
709 */
710 tmp[0] = lp_build_pack(gallivm, src_type, dst_type, TRUE, src, num_srcs);
711 }
712 else {
713 if (src_type.width / dst_type.width > num_srcs) {
714 /*
715 * First change src vectors size (with shuffle) so they have the
716 * same size as the destination vector, then pack normally.
717 * Note: cannot use cast/extract because llvm generates atrocious code.
718 */
719 unsigned size_ratio = (src_type.width * src_type.length) /
720 (dst_type.length * dst_type.width);
721 unsigned new_length = src_type.length / size_ratio;
722
723 for (i = 0; i < size_ratio * num_srcs; i++) {
724 unsigned start_index = (i % size_ratio) * new_length;
725 tmp[i] = lp_build_extract_range(gallivm, src[i / size_ratio],
726 start_index, new_length);
727 }
728 num_srcs *= size_ratio;
729 src_type.length = new_length;
730 tmp[0] = lp_build_pack(gallivm, src_type, dst_type, TRUE, tmp, num_srcs);
731 }
732 else {
733 /*
734 * Truncate bit width but expand vector size - first pack
735 * then expand simply because this should be more AVX-friendly
736 * for the cases we probably hit.
737 */
738 unsigned size_ratio = (dst_type.width * dst_type.length) /
739 (src_type.length * src_type.width);
740 unsigned num_pack_srcs = num_srcs / size_ratio;
741 dst_type.length = dst_type.length / size_ratio;
742
743 for (i = 0; i < size_ratio; i++) {
744 tmp[i] = lp_build_pack(gallivm, src_type, dst_type, TRUE,
745 &src[i*num_pack_srcs], num_pack_srcs);
746 }
747 tmp[0] = lp_build_concat(gallivm, tmp, dst_type, size_ratio);
748 }
749 }
750 }
751 else if (src_type.width < dst_type.width) {
752 /*
753 * Expand bit width.
754 */
755
756 assert(num_srcs == 1);
757
758 if (src_type.width * src_type.length == dst_type.width * dst_type.length) {
759 /*
760 * Register width remains constant -- use vector unpack intrinsics
761 */
762 lp_build_unpack(gallivm, src_type, dst_type, src[0], tmp, num_dsts);
763 }
764 else {
765 /*
766 * Do it element-wise.
767 */
768 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
769
770 for (i = 0; i < num_dsts; i++) {
771 tmp[i] = lp_build_undef(gallivm, dst_type);
772 }
773
774 for (i = 0; i < src_type.length; ++i) {
775 unsigned j = i / dst_type.length;
776 LLVMValueRef srcindex = lp_build_const_int32(gallivm, i);
777 LLVMValueRef dstindex = lp_build_const_int32(gallivm, i % dst_type.length);
778 LLVMValueRef val = LLVMBuildExtractElement(builder, src[0], srcindex, "");
779
780 if (src_type.sign && dst_type.sign) {
781 val = LLVMBuildSExt(builder, val, lp_build_elem_type(gallivm, dst_type), "");
782 } else {
783 val = LLVMBuildZExt(builder, val, lp_build_elem_type(gallivm, dst_type), "");
784 }
785 tmp[j] = LLVMBuildInsertElement(builder, tmp[j], val, dstindex, "");
786 }
787 }
788 }
789 else {
790 /*
791 * No-op
792 */
793
794 assert(num_srcs == 1);
795 assert(num_dsts == 1);
796
797 tmp[0] = src[0];
798 }
799
800 for(i = 0; i < num_dsts; ++i)
801 dst[i] = tmp[i];
802 }
803
804
805 /**
806 * Expands src vector from src.length to dst_length
807 */
808 LLVMValueRef
809 lp_build_pad_vector(struct gallivm_state *gallivm,
810 LLVMValueRef src,
811 unsigned dst_length)
812 {
813 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
814 LLVMValueRef undef;
815 LLVMTypeRef type;
816 unsigned i, src_length;
817
818 type = LLVMTypeOf(src);
819
820 if (LLVMGetTypeKind(type) != LLVMVectorTypeKind) {
821 /* Can't use ShuffleVector on non-vector type */
822 undef = LLVMGetUndef(LLVMVectorType(type, dst_length));
823 return LLVMBuildInsertElement(gallivm->builder, undef, src, lp_build_const_int32(gallivm, 0), "");
824 }
825
826 undef = LLVMGetUndef(type);
827 src_length = LLVMGetVectorSize(type);
828
829 assert(dst_length <= Elements(elems));
830 assert(dst_length >= src_length);
831
832 if (src_length == dst_length)
833 return src;
834
835 /* All elements from src vector */
836 for (i = 0; i < src_length; ++i)
837 elems[i] = lp_build_const_int32(gallivm, i);
838
839 /* Undef fill remaining space */
840 for (i = src_length; i < dst_length; ++i)
841 elems[i] = lp_build_const_int32(gallivm, src_length);
842
843 /* Combine the two vectors */
844 return LLVMBuildShuffleVector(gallivm->builder, src, undef, LLVMConstVector(elems, dst_length), "");
845 }