1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 * Helper functions for packing/unpacking.
33 * Pack/unpacking is necessary for conversion between types of different
36 * They are also commonly used when an computation needs higher
37 * precision for the intermediate values. For example, if one needs the
42 * to use more precision for intermediate results then one should implement it
46 * compute(LLVMBuilderRef builder struct lp_type type, LLVMValueRef a, LLVMValueRef b)
48 * struct lp_type wide_type = lp_wider_type(type);
49 * LLVMValueRef al, ah, bl, bh, cl, ch, c;
51 * lp_build_unpack2(builder, type, wide_type, a, &al, &ah);
52 * lp_build_unpack2(builder, type, wide_type, b, &bl, &bh);
54 * cl = compute_half(al, bl);
55 * ch = compute_half(ah, bh);
57 * c = lp_build_pack2(bld->builder, wide_type, type, cl, ch);
62 * where compute_half() would do the computation for half the elements with
63 * twice the precision.
65 * @author Jose Fonseca <jfonseca@vmware.com>
69 #include "util/u_debug.h"
70 #include "util/u_math.h"
71 #include "util/u_cpu_detect.h"
72 #include "util/u_memory.h"
74 #include "lp_bld_type.h"
75 #include "lp_bld_const.h"
76 #include "lp_bld_init.h"
77 #include "lp_bld_intr.h"
78 #include "lp_bld_arit.h"
79 #include "lp_bld_pack.h"
80 #include "lp_bld_swizzle.h"
84 * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions.
87 lp_build_const_unpack_shuffle(struct gallivm_state
*gallivm
,
88 unsigned n
, unsigned lo_hi
)
90 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
93 assert(n
<= LP_MAX_VECTOR_LENGTH
);
96 /* TODO: cache results in a static table */
98 for(i
= 0, j
= lo_hi
*n
/2; i
< n
; i
+= 2, ++j
) {
99 elems
[i
+ 0] = lp_build_const_int32(gallivm
, 0 + j
);
100 elems
[i
+ 1] = lp_build_const_int32(gallivm
, n
+ j
);
103 return LLVMConstVector(elems
, n
);
107 * Similar to lp_build_const_unpack_shuffle but for special AVX 256bit unpack.
108 * See comment above lp_build_interleave2_half for more details.
111 lp_build_const_unpack_shuffle_half(struct gallivm_state
*gallivm
,
112 unsigned n
, unsigned lo_hi
)
114 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
117 assert(n
<= LP_MAX_VECTOR_LENGTH
);
120 for (i
= 0, j
= lo_hi
*(n
/4); i
< n
; i
+= 2, ++j
) {
124 elems
[i
+ 0] = lp_build_const_int32(gallivm
, 0 + j
);
125 elems
[i
+ 1] = lp_build_const_int32(gallivm
, n
+ j
);
128 return LLVMConstVector(elems
, n
);
132 * Build shuffle vectors that match PACKxx (SSE) instructions or
136 lp_build_const_pack_shuffle(struct gallivm_state
*gallivm
, unsigned n
)
138 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
141 assert(n
<= LP_MAX_VECTOR_LENGTH
);
143 for(i
= 0; i
< n
; ++i
)
144 #ifdef PIPE_ARCH_LITTLE_ENDIAN
145 elems
[i
] = lp_build_const_int32(gallivm
, 2*i
);
147 elems
[i
] = lp_build_const_int32(gallivm
, 2*i
+1);
150 return LLVMConstVector(elems
, n
);
154 * Return a vector with elements src[start:start+size]
155 * Most useful for getting half the values out of a 256bit sized vector,
156 * otherwise may cause data rearrangement to happen.
159 lp_build_extract_range(struct gallivm_state
*gallivm
,
164 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
167 assert(size
<= ARRAY_SIZE(elems
));
169 for (i
= 0; i
< size
; ++i
)
170 elems
[i
] = lp_build_const_int32(gallivm
, i
+ start
);
173 return LLVMBuildExtractElement(gallivm
->builder
, src
, elems
[0], "");
176 return LLVMBuildShuffleVector(gallivm
->builder
, src
, src
,
177 LLVMConstVector(elems
, size
), "");
182 * Concatenates several (must be a power of 2) vectors (of same type)
184 * Most useful for building up a 256bit sized vector out of two 128bit ones.
187 lp_build_concat(struct gallivm_state
*gallivm
,
189 struct lp_type src_type
,
190 unsigned num_vectors
)
192 unsigned new_length
, i
;
193 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
/2];
194 LLVMValueRef shuffles
[LP_MAX_VECTOR_LENGTH
];
196 assert(src_type
.length
* num_vectors
<= ARRAY_SIZE(shuffles
));
197 assert(util_is_power_of_two(num_vectors
));
199 new_length
= src_type
.length
;
201 for (i
= 0; i
< num_vectors
; i
++)
204 while (num_vectors
> 1) {
207 for (i
= 0; i
< new_length
; i
++) {
208 shuffles
[i
] = lp_build_const_int32(gallivm
, i
);
210 for (i
= 0; i
< num_vectors
; i
++) {
211 tmp
[i
] = LLVMBuildShuffleVector(gallivm
->builder
, tmp
[i
*2], tmp
[i
*2 + 1],
212 LLVMConstVector(shuffles
, new_length
), "");
221 * Combines vectors to reduce from num_srcs to num_dsts.
222 * Returns the number of src vectors concatenated in a single dst.
224 * num_srcs must be exactly divisible by num_dsts.
226 * e.g. For num_srcs = 4 and src = [x, y, z, w]
227 * num_dsts = 1 dst = [xyzw] return = 4
228 * num_dsts = 2 dst = [xy, zw] return = 2
231 lp_build_concat_n(struct gallivm_state
*gallivm
,
232 struct lp_type src_type
,
238 int size
= num_srcs
/ num_dsts
;
241 assert(num_srcs
>= num_dsts
);
242 assert((num_srcs
% size
) == 0);
244 if (num_srcs
== num_dsts
) {
245 for (i
= 0; i
< num_dsts
; ++i
) {
251 for (i
= 0; i
< num_dsts
; ++i
) {
252 dst
[i
] = lp_build_concat(gallivm
, &src
[i
* size
], src_type
, size
);
260 * Un-interleave vector.
261 * This will return a vector consisting of every second element
262 * (depending on lo_hi, beginning at 0 or 1).
263 * The returned vector size (elems and width) will only be half
264 * that of the source vector.
267 lp_build_uninterleave1(struct gallivm_state
*gallivm
,
272 LLVMValueRef shuffle
, elems
[LP_MAX_VECTOR_LENGTH
];
274 assert(num_elems
<= LP_MAX_VECTOR_LENGTH
);
276 for (i
= 0; i
< num_elems
/ 2; ++i
)
277 elems
[i
] = lp_build_const_int32(gallivm
, 2*i
+ lo_hi
);
279 shuffle
= LLVMConstVector(elems
, num_elems
/ 2);
281 return LLVMBuildShuffleVector(gallivm
->builder
, a
, a
, shuffle
, "");
286 * Interleave vector elements.
288 * Matches the PUNPCKLxx and PUNPCKHxx SSE instructions
289 * (but not for 256bit AVX vectors).
292 lp_build_interleave2(struct gallivm_state
*gallivm
,
298 LLVMValueRef shuffle
;
300 if (type
.length
== 2 && type
.width
== 128 && util_cpu_caps
.has_avx
) {
302 * XXX: This is a workaround for llvm code generation deficiency. Strangely
303 * enough, while this needs vinsertf128/vextractf128 instructions (hence
304 * a natural match when using 2x128bit vectors) the "normal" unpack shuffle
305 * generates code ranging from atrocious (llvm 3.1) to terrible (llvm 3.2, 3.3).
306 * So use some different shuffles instead (the exact shuffles don't seem to
307 * matter, as long as not using 128bit wide vectors, works with 8x32 or 4x64).
309 struct lp_type tmp_type
= type
;
310 LLVMValueRef srchalf
[2], tmpdst
;
313 a
= LLVMBuildBitCast(gallivm
->builder
, a
, lp_build_vec_type(gallivm
, tmp_type
), "");
314 b
= LLVMBuildBitCast(gallivm
->builder
, b
, lp_build_vec_type(gallivm
, tmp_type
), "");
315 srchalf
[0] = lp_build_extract_range(gallivm
, a
, lo_hi
* 2, 2);
316 srchalf
[1] = lp_build_extract_range(gallivm
, b
, lo_hi
* 2, 2);
318 tmpdst
= lp_build_concat(gallivm
, srchalf
, tmp_type
, 2);
319 return LLVMBuildBitCast(gallivm
->builder
, tmpdst
, lp_build_vec_type(gallivm
, type
), "");
322 shuffle
= lp_build_const_unpack_shuffle(gallivm
, type
.length
, lo_hi
);
324 return LLVMBuildShuffleVector(gallivm
->builder
, a
, b
, shuffle
, "");
328 * Interleave vector elements but with 256 bit,
329 * treats it as interleave with 2 concatenated 128 bit vectors.
331 * This differs to lp_build_interleave2 as that function would do the following (for lo):
332 * a0 b0 a1 b1 a2 b2 a3 b3, and this does not compile into an AVX unpack instruction.
335 * An example interleave 8x float with 8x float on AVX 256bit unpack:
336 * a0 a1 a2 a3 a4 a5 a6 a7 <-> b0 b1 b2 b3 b4 b5 b6 b7
338 * Equivalent to interleaving 2x 128 bit vectors
339 * a0 a1 a2 a3 <-> b0 b1 b2 b3 concatenated with a4 a5 a6 a7 <-> b4 b5 b6 b7
341 * So interleave-lo would result in:
342 * a0 b0 a1 b1 a4 b4 a5 b5
344 * And interleave-hi would result in:
345 * a2 b2 a3 b3 a6 b6 a7 b7
348 lp_build_interleave2_half(struct gallivm_state
*gallivm
,
354 if (type
.length
* type
.width
== 256) {
355 LLVMValueRef shuffle
= lp_build_const_unpack_shuffle_half(gallivm
, type
.length
, lo_hi
);
356 return LLVMBuildShuffleVector(gallivm
->builder
, a
, b
, shuffle
, "");
358 return lp_build_interleave2(gallivm
, type
, a
, b
, lo_hi
);
363 * Double the bit width.
365 * This will only change the number of bits the values are represented, not the
369 lp_build_unpack2(struct gallivm_state
*gallivm
,
370 struct lp_type src_type
,
371 struct lp_type dst_type
,
373 LLVMValueRef
*dst_lo
,
374 LLVMValueRef
*dst_hi
)
376 LLVMBuilderRef builder
= gallivm
->builder
;
378 LLVMTypeRef dst_vec_type
;
380 assert(!src_type
.floating
);
381 assert(!dst_type
.floating
);
382 assert(dst_type
.width
== src_type
.width
* 2);
383 assert(dst_type
.length
* 2 == src_type
.length
);
385 if(dst_type
.sign
&& src_type
.sign
) {
386 /* Replicate the sign bit in the most significant bits */
387 msb
= LLVMBuildAShr(builder
, src
, lp_build_const_int_vec(gallivm
, src_type
, src_type
.width
- 1), "");
390 /* Most significant bits always zero */
391 msb
= lp_build_zero(gallivm
, src_type
);
393 /* Interleave bits */
394 #ifdef PIPE_ARCH_LITTLE_ENDIAN
395 *dst_lo
= lp_build_interleave2(gallivm
, src_type
, src
, msb
, 0);
396 *dst_hi
= lp_build_interleave2(gallivm
, src_type
, src
, msb
, 1);
398 *dst_lo
= lp_build_interleave2(gallivm
, src_type
, msb
, src
, 0);
399 *dst_hi
= lp_build_interleave2(gallivm
, src_type
, msb
, src
, 1);
402 /* Cast the result into the new type (twice as wide) */
404 dst_vec_type
= lp_build_vec_type(gallivm
, dst_type
);
406 *dst_lo
= LLVMBuildBitCast(builder
, *dst_lo
, dst_vec_type
, "");
407 *dst_hi
= LLVMBuildBitCast(builder
, *dst_hi
, dst_vec_type
, "");
412 * Expand the bit width.
414 * This will only change the number of bits the values are represented, not the
418 lp_build_unpack(struct gallivm_state
*gallivm
,
419 struct lp_type src_type
,
420 struct lp_type dst_type
,
422 LLVMValueRef
*dst
, unsigned num_dsts
)
427 /* Register width must remain constant */
428 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
430 /* We must not loose or gain channels. Only precision */
431 assert(src_type
.length
== dst_type
.length
* num_dsts
);
436 while(src_type
.width
< dst_type
.width
) {
437 struct lp_type tmp_type
= src_type
;
440 tmp_type
.length
/= 2;
442 for(i
= num_tmps
; i
--; ) {
443 lp_build_unpack2(gallivm
, src_type
, tmp_type
, dst
[i
], &dst
[2*i
+ 0], &dst
[2*i
+ 1]);
451 assert(num_tmps
== num_dsts
);
456 * Non-interleaved pack.
458 * This will move values as
460 * lo = l0 __ l1 __ l2 __.. __ ln __
461 * hi = h0 __ h1 __ h2 __.. __ hn __
462 * res = l0 l1 l2 .. ln h0 h1 h2 .. hn
464 * This will only change the number of bits the values are represented, not the
467 * It is assumed the values are already clamped into the destination type range.
468 * Values outside that range will produce undefined results. Use
469 * lp_build_packs2 instead.
472 lp_build_pack2(struct gallivm_state
*gallivm
,
473 struct lp_type src_type
,
474 struct lp_type dst_type
,
478 LLVMBuilderRef builder
= gallivm
->builder
;
479 LLVMTypeRef dst_vec_type
= lp_build_vec_type(gallivm
, dst_type
);
480 LLVMValueRef shuffle
;
481 LLVMValueRef res
= NULL
;
482 struct lp_type intr_type
= dst_type
;
484 assert(!src_type
.floating
);
485 assert(!dst_type
.floating
);
486 assert(src_type
.width
== dst_type
.width
* 2);
487 assert(src_type
.length
* 2 == dst_type
.length
);
489 /* Check for special cases first */
490 if ((util_cpu_caps
.has_sse2
|| util_cpu_caps
.has_altivec
) &&
491 src_type
.width
* src_type
.length
>= 128) {
492 const char *intrinsic
= NULL
;
493 boolean swap_intrinsic_operands
= FALSE
;
495 switch(src_type
.width
) {
497 if (util_cpu_caps
.has_sse2
) {
499 intrinsic
= "llvm.x86.sse2.packssdw.128";
501 if (util_cpu_caps
.has_sse4_1
) {
502 intrinsic
= "llvm.x86.sse41.packusdw";
505 } else if (util_cpu_caps
.has_altivec
) {
507 intrinsic
= "llvm.ppc.altivec.vpkswss";
509 intrinsic
= "llvm.ppc.altivec.vpkuwus";
511 #ifdef PIPE_ARCH_LITTLE_ENDIAN
512 swap_intrinsic_operands
= TRUE
;
518 if (util_cpu_caps
.has_sse2
) {
519 intrinsic
= "llvm.x86.sse2.packsswb.128";
520 } else if (util_cpu_caps
.has_altivec
) {
521 intrinsic
= "llvm.ppc.altivec.vpkshss";
522 #ifdef PIPE_ARCH_LITTLE_ENDIAN
523 swap_intrinsic_operands
= TRUE
;
527 if (util_cpu_caps
.has_sse2
) {
528 intrinsic
= "llvm.x86.sse2.packuswb.128";
529 } else if (util_cpu_caps
.has_altivec
) {
530 intrinsic
= "llvm.ppc.altivec.vpkshus";
531 #ifdef PIPE_ARCH_LITTLE_ENDIAN
532 swap_intrinsic_operands
= TRUE
;
537 /* default uses generic shuffle below */
540 if (src_type
.width
* src_type
.length
== 128) {
541 LLVMTypeRef intr_vec_type
= lp_build_vec_type(gallivm
, intr_type
);
542 if (swap_intrinsic_operands
) {
543 res
= lp_build_intrinsic_binary(builder
, intrinsic
, intr_vec_type
, hi
, lo
);
545 res
= lp_build_intrinsic_binary(builder
, intrinsic
, intr_vec_type
, lo
, hi
);
547 if (dst_vec_type
!= intr_vec_type
) {
548 res
= LLVMBuildBitCast(builder
, res
, dst_vec_type
, "");
552 int num_split
= src_type
.width
* src_type
.length
/ 128;
554 int nlen
= 128 / src_type
.width
;
555 int lo_off
= swap_intrinsic_operands
? nlen
: 0;
556 int hi_off
= swap_intrinsic_operands
? 0 : nlen
;
557 struct lp_type ndst_type
= lp_type_unorm(dst_type
.width
, 128);
558 struct lp_type nintr_type
= lp_type_unorm(intr_type
.width
, 128);
559 LLVMValueRef tmpres
[LP_MAX_VECTOR_WIDTH
/ 128];
560 LLVMValueRef tmplo
, tmphi
;
561 LLVMTypeRef ndst_vec_type
= lp_build_vec_type(gallivm
, ndst_type
);
562 LLVMTypeRef nintr_vec_type
= lp_build_vec_type(gallivm
, nintr_type
);
564 assert(num_split
<= LP_MAX_VECTOR_WIDTH
/ 128);
566 for (i
= 0; i
< num_split
/ 2; i
++) {
567 tmplo
= lp_build_extract_range(gallivm
,
568 lo
, i
*nlen
*2 + lo_off
, nlen
);
569 tmphi
= lp_build_extract_range(gallivm
,
570 lo
, i
*nlen
*2 + hi_off
, nlen
);
571 tmpres
[i
] = lp_build_intrinsic_binary(builder
, intrinsic
,
572 nintr_vec_type
, tmplo
, tmphi
);
573 if (ndst_vec_type
!= nintr_vec_type
) {
574 tmpres
[i
] = LLVMBuildBitCast(builder
, tmpres
[i
], ndst_vec_type
, "");
577 for (i
= 0; i
< num_split
/ 2; i
++) {
578 tmplo
= lp_build_extract_range(gallivm
,
579 hi
, i
*nlen
*2 + lo_off
, nlen
);
580 tmphi
= lp_build_extract_range(gallivm
,
581 hi
, i
*nlen
*2 + hi_off
, nlen
);
582 tmpres
[i
+num_split
/2] = lp_build_intrinsic_binary(builder
, intrinsic
,
585 if (ndst_vec_type
!= nintr_vec_type
) {
586 tmpres
[i
+num_split
/2] = LLVMBuildBitCast(builder
, tmpres
[i
+num_split
/2],
590 res
= lp_build_concat(gallivm
, tmpres
, ndst_type
, num_split
);
596 /* generic shuffle */
597 lo
= LLVMBuildBitCast(builder
, lo
, dst_vec_type
, "");
598 hi
= LLVMBuildBitCast(builder
, hi
, dst_vec_type
, "");
600 shuffle
= lp_build_const_pack_shuffle(gallivm
, dst_type
.length
);
602 res
= LLVMBuildShuffleVector(builder
, lo
, hi
, shuffle
, "");
610 * Non-interleaved pack and saturate.
612 * Same as lp_build_pack2 but will saturate values so that they fit into the
616 lp_build_packs2(struct gallivm_state
*gallivm
,
617 struct lp_type src_type
,
618 struct lp_type dst_type
,
624 assert(!src_type
.floating
);
625 assert(!dst_type
.floating
);
626 assert(src_type
.sign
== dst_type
.sign
);
627 assert(src_type
.width
== dst_type
.width
* 2);
628 assert(src_type
.length
* 2 == dst_type
.length
);
632 /* All X86 SSE non-interleaved pack instructions take signed inputs and
633 * saturate them, so no need to clamp for those cases. */
634 if(util_cpu_caps
.has_sse2
&&
635 src_type
.width
* src_type
.length
>= 128 &&
637 (src_type
.width
== 32 || src_type
.width
== 16))
641 struct lp_build_context bld
;
642 unsigned dst_bits
= dst_type
.sign
? dst_type
.width
- 1 : dst_type
.width
;
643 LLVMValueRef dst_max
= lp_build_const_int_vec(gallivm
, src_type
, ((unsigned long long)1 << dst_bits
) - 1);
644 lp_build_context_init(&bld
, gallivm
, src_type
);
645 lo
= lp_build_min(&bld
, lo
, dst_max
);
646 hi
= lp_build_min(&bld
, hi
, dst_max
);
647 /* FIXME: What about lower bound? */
650 return lp_build_pack2(gallivm
, src_type
, dst_type
, lo
, hi
);
655 * Truncate the bit width.
657 * TODO: Handle saturation consistently.
660 lp_build_pack(struct gallivm_state
*gallivm
,
661 struct lp_type src_type
,
662 struct lp_type dst_type
,
664 const LLVMValueRef
*src
, unsigned num_srcs
)
666 LLVMValueRef (*pack2
)(struct gallivm_state
*gallivm
,
667 struct lp_type src_type
,
668 struct lp_type dst_type
,
671 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
674 /* Register width must remain constant */
675 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
677 /* We must not loose or gain channels. Only precision */
678 assert(src_type
.length
* num_srcs
== dst_type
.length
);
681 pack2
= &lp_build_pack2
;
683 pack2
= &lp_build_packs2
;
685 for(i
= 0; i
< num_srcs
; ++i
)
688 while(src_type
.width
> dst_type
.width
) {
689 struct lp_type tmp_type
= src_type
;
692 tmp_type
.length
*= 2;
694 /* Take in consideration the sign changes only in the last step */
695 if(tmp_type
.width
== dst_type
.width
)
696 tmp_type
.sign
= dst_type
.sign
;
700 for(i
= 0; i
< num_srcs
; ++i
)
701 tmp
[i
] = pack2(gallivm
, src_type
, tmp_type
,
702 tmp
[2*i
+ 0], tmp
[2*i
+ 1]);
707 assert(num_srcs
== 1);
714 * Truncate or expand the bitwidth.
716 * NOTE: Getting the right sign flags is crucial here, as we employ some
717 * intrinsics that do saturation.
720 lp_build_resize(struct gallivm_state
*gallivm
,
721 struct lp_type src_type
,
722 struct lp_type dst_type
,
723 const LLVMValueRef
*src
, unsigned num_srcs
,
724 LLVMValueRef
*dst
, unsigned num_dsts
)
726 LLVMBuilderRef builder
= gallivm
->builder
;
727 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
731 * We don't support float <-> int conversion here. That must be done
732 * before/after calling this function.
734 assert(src_type
.floating
== dst_type
.floating
);
737 * We don't support double <-> float conversion yet, although it could be
738 * added with little effort.
740 assert((!src_type
.floating
&& !dst_type
.floating
) ||
741 src_type
.width
== dst_type
.width
);
743 /* We must not loose or gain channels. Only precision */
744 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
746 assert(src_type
.length
<= LP_MAX_VECTOR_LENGTH
);
747 assert(dst_type
.length
<= LP_MAX_VECTOR_LENGTH
);
748 assert(num_srcs
<= LP_MAX_VECTOR_LENGTH
);
749 assert(num_dsts
<= LP_MAX_VECTOR_LENGTH
);
751 if (src_type
.width
> dst_type
.width
) {
753 * Truncate bit width.
756 /* Conversion must be M:1 */
757 assert(num_dsts
== 1);
759 if (src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
) {
761 * Register width remains constant -- use vector packing intrinsics
763 tmp
[0] = lp_build_pack(gallivm
, src_type
, dst_type
, TRUE
, src
, num_srcs
);
766 if (src_type
.width
/ dst_type
.width
> num_srcs
) {
768 * First change src vectors size (with shuffle) so they have the
769 * same size as the destination vector, then pack normally.
770 * Note: cannot use cast/extract because llvm generates atrocious code.
772 unsigned size_ratio
= (src_type
.width
* src_type
.length
) /
773 (dst_type
.length
* dst_type
.width
);
774 unsigned new_length
= src_type
.length
/ size_ratio
;
776 for (i
= 0; i
< size_ratio
* num_srcs
; i
++) {
777 unsigned start_index
= (i
% size_ratio
) * new_length
;
778 tmp
[i
] = lp_build_extract_range(gallivm
, src
[i
/ size_ratio
],
779 start_index
, new_length
);
781 num_srcs
*= size_ratio
;
782 src_type
.length
= new_length
;
783 tmp
[0] = lp_build_pack(gallivm
, src_type
, dst_type
, TRUE
, tmp
, num_srcs
);
787 * Truncate bit width but expand vector size - first pack
788 * then expand simply because this should be more AVX-friendly
789 * for the cases we probably hit.
791 unsigned size_ratio
= (dst_type
.width
* dst_type
.length
) /
792 (src_type
.length
* src_type
.width
);
793 unsigned num_pack_srcs
= num_srcs
/ size_ratio
;
794 dst_type
.length
= dst_type
.length
/ size_ratio
;
796 for (i
= 0; i
< size_ratio
; i
++) {
797 tmp
[i
] = lp_build_pack(gallivm
, src_type
, dst_type
, TRUE
,
798 &src
[i
*num_pack_srcs
], num_pack_srcs
);
800 tmp
[0] = lp_build_concat(gallivm
, tmp
, dst_type
, size_ratio
);
804 else if (src_type
.width
< dst_type
.width
) {
809 /* Conversion must be 1:N */
810 assert(num_srcs
== 1);
812 if (src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
) {
814 * Register width remains constant -- use vector unpack intrinsics
816 lp_build_unpack(gallivm
, src_type
, dst_type
, src
[0], tmp
, num_dsts
);
820 * Do it element-wise.
822 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
824 for (i
= 0; i
< num_dsts
; i
++) {
825 tmp
[i
] = lp_build_undef(gallivm
, dst_type
);
828 for (i
= 0; i
< src_type
.length
; ++i
) {
829 unsigned j
= i
/ dst_type
.length
;
830 LLVMValueRef srcindex
= lp_build_const_int32(gallivm
, i
);
831 LLVMValueRef dstindex
= lp_build_const_int32(gallivm
, i
% dst_type
.length
);
832 LLVMValueRef val
= LLVMBuildExtractElement(builder
, src
[0], srcindex
, "");
834 if (src_type
.sign
&& dst_type
.sign
) {
835 val
= LLVMBuildSExt(builder
, val
, lp_build_elem_type(gallivm
, dst_type
), "");
837 val
= LLVMBuildZExt(builder
, val
, lp_build_elem_type(gallivm
, dst_type
), "");
839 tmp
[j
] = LLVMBuildInsertElement(builder
, tmp
[j
], val
, dstindex
, "");
848 /* "Conversion" must be N:N */
849 assert(num_srcs
== num_dsts
);
851 for(i
= 0; i
< num_dsts
; ++i
)
855 for(i
= 0; i
< num_dsts
; ++i
)
861 * Expands src vector from src.length to dst_length
864 lp_build_pad_vector(struct gallivm_state
*gallivm
,
868 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
871 unsigned i
, src_length
;
873 type
= LLVMTypeOf(src
);
875 if (LLVMGetTypeKind(type
) != LLVMVectorTypeKind
) {
876 /* Can't use ShuffleVector on non-vector type */
877 undef
= LLVMGetUndef(LLVMVectorType(type
, dst_length
));
878 return LLVMBuildInsertElement(gallivm
->builder
, undef
, src
, lp_build_const_int32(gallivm
, 0), "");
881 undef
= LLVMGetUndef(type
);
882 src_length
= LLVMGetVectorSize(type
);
884 assert(dst_length
<= ARRAY_SIZE(elems
));
885 assert(dst_length
>= src_length
);
887 if (src_length
== dst_length
)
890 /* All elements from src vector */
891 for (i
= 0; i
< src_length
; ++i
)
892 elems
[i
] = lp_build_const_int32(gallivm
, i
);
894 /* Undef fill remaining space */
895 for (i
= src_length
; i
< dst_length
; ++i
)
896 elems
[i
] = lp_build_const_int32(gallivm
, src_length
);
898 /* Combine the two vectors */
899 return LLVMBuildShuffleVector(gallivm
->builder
, src
, undef
, LLVMConstVector(elems
, dst_length
), "");