1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 * Helper functions for packing/unpacking.
33 * Pack/unpacking is necessary for conversion between types of different
36 * They are also commonly used when an computation needs higher
37 * precision for the intermediate values. For example, if one needs the
42 * to use more precision for intermediate results then one should implement it
46 * compute(LLVMBuilderRef builder struct lp_type type, LLVMValueRef a, LLVMValueRef b)
48 * struct lp_type wide_type = lp_wider_type(type);
49 * LLVMValueRef al, ah, bl, bh, cl, ch, c;
51 * lp_build_unpack2(builder, type, wide_type, a, &al, &ah);
52 * lp_build_unpack2(builder, type, wide_type, b, &bl, &bh);
54 * cl = compute_half(al, bl);
55 * ch = compute_half(ah, bh);
57 * c = lp_build_pack2(bld->builder, wide_type, type, cl, ch);
62 * where compute_half() would do the computation for half the elements with
63 * twice the precision.
65 * @author Jose Fonseca <jfonseca@vmware.com>
69 #include "util/u_debug.h"
70 #include "util/u_math.h"
71 #include "util/u_cpu_detect.h"
72 #include "util/u_memory.h"
74 #include "lp_bld_type.h"
75 #include "lp_bld_const.h"
76 #include "lp_bld_init.h"
77 #include "lp_bld_intr.h"
78 #include "lp_bld_arit.h"
79 #include "lp_bld_pack.h"
80 #include "lp_bld_swizzle.h"
84 * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions.
87 lp_build_const_unpack_shuffle(struct gallivm_state
*gallivm
,
88 unsigned n
, unsigned lo_hi
)
90 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
93 assert(n
<= LP_MAX_VECTOR_LENGTH
);
96 /* TODO: cache results in a static table */
98 for(i
= 0, j
= lo_hi
*n
/2; i
< n
; i
+= 2, ++j
) {
99 elems
[i
+ 0] = lp_build_const_int32(gallivm
, 0 + j
);
100 elems
[i
+ 1] = lp_build_const_int32(gallivm
, n
+ j
);
103 return LLVMConstVector(elems
, n
);
107 * Similar to lp_build_const_unpack_shuffle but for special AVX 256bit unpack.
108 * See comment above lp_build_interleave2_half for more details.
111 lp_build_const_unpack_shuffle_half(struct gallivm_state
*gallivm
,
112 unsigned n
, unsigned lo_hi
)
114 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
117 assert(n
<= LP_MAX_VECTOR_LENGTH
);
120 for (i
= 0, j
= lo_hi
*(n
/4); i
< n
; i
+= 2, ++j
) {
124 elems
[i
+ 0] = lp_build_const_int32(gallivm
, 0 + j
);
125 elems
[i
+ 1] = lp_build_const_int32(gallivm
, n
+ j
);
128 return LLVMConstVector(elems
, n
);
132 * Build shuffle vectors that match PACKxx instructions.
135 lp_build_const_pack_shuffle(struct gallivm_state
*gallivm
, unsigned n
)
137 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
140 assert(n
<= LP_MAX_VECTOR_LENGTH
);
142 for(i
= 0; i
< n
; ++i
)
143 elems
[i
] = lp_build_const_int32(gallivm
, 2*i
);
145 return LLVMConstVector(elems
, n
);
149 * Return a vector with elements src[start:start+size]
150 * Most useful for getting half the values out of a 256bit sized vector,
151 * otherwise may cause data rearrangement to happen.
154 lp_build_extract_range(struct gallivm_state
*gallivm
,
159 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
162 assert(size
<= Elements(elems
));
164 for (i
= 0; i
< size
; ++i
)
165 elems
[i
] = lp_build_const_int32(gallivm
, i
+ start
);
168 return LLVMBuildExtractElement(gallivm
->builder
, src
, elems
[0], "");
171 return LLVMBuildShuffleVector(gallivm
->builder
, src
, src
,
172 LLVMConstVector(elems
, size
), "");
177 * Concatenates several (must be a power of 2) vectors (of same type)
179 * Most useful for building up a 256bit sized vector out of two 128bit ones.
182 lp_build_concat(struct gallivm_state
*gallivm
,
184 struct lp_type src_type
,
185 unsigned num_vectors
)
187 unsigned new_length
, i
;
188 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
/2];
189 LLVMValueRef shuffles
[LP_MAX_VECTOR_LENGTH
];
191 assert(src_type
.length
* num_vectors
<= Elements(shuffles
));
192 assert(util_is_power_of_two(num_vectors
));
194 new_length
= src_type
.length
;
196 for (i
= 0; i
< num_vectors
; i
++)
199 while (num_vectors
> 1) {
202 for (i
= 0; i
< new_length
; i
++) {
203 shuffles
[i
] = lp_build_const_int32(gallivm
, i
);
205 for (i
= 0; i
< num_vectors
; i
++) {
206 tmp
[i
] = LLVMBuildShuffleVector(gallivm
->builder
, tmp
[i
*2], tmp
[i
*2 + 1],
207 LLVMConstVector(shuffles
, new_length
), "");
215 * Interleave vector elements.
217 * Matches the PUNPCKLxx and PUNPCKHxx SSE instructions.
220 lp_build_interleave2(struct gallivm_state
*gallivm
,
226 LLVMValueRef shuffle
;
228 shuffle
= lp_build_const_unpack_shuffle(gallivm
, type
.length
, lo_hi
);
230 return LLVMBuildShuffleVector(gallivm
->builder
, a
, b
, shuffle
, "");
234 * Interleave vector elements but with 256 bit,
235 * treats it as interleave with 2 concatenated 128 bit vectors.
237 * This differs to lp_build_interleave2 as that function would do the following (for lo):
238 * a0 b0 a1 b1 a2 b2 a3 b3, and this does not compile into an AVX unpack instruction.
241 * An example interleave 8x float with 8x float on AVX 256bit unpack:
242 * a0 a1 a2 a3 a4 a5 a6 a7 <-> b0 b1 b2 b3 b4 b5 b6 b7
244 * Equivalent to interleaving 2x 128 bit vectors
245 * a0 a1 a2 a3 <-> b0 b1 b2 b3 concatenated with a4 a5 a6 a7 <-> b4 b5 b6 b7
247 * So interleave-lo would result in:
248 * a0 b0 a1 b1 a4 b4 a5 b5
250 * And interleave-hi would result in:
251 * a2 b2 a3 b3 a6 b6 a7 b7
254 lp_build_interleave2_half(struct gallivm_state
*gallivm
,
260 if (type
.length
* type
.width
== 256) {
261 LLVMValueRef shuffle
= lp_build_const_unpack_shuffle_half(gallivm
, type
.length
, lo_hi
);
262 return LLVMBuildShuffleVector(gallivm
->builder
, a
, b
, shuffle
, "");
264 return lp_build_interleave2(gallivm
, type
, a
, b
, lo_hi
);
269 * Double the bit width.
271 * This will only change the number of bits the values are represented, not the
275 lp_build_unpack2(struct gallivm_state
*gallivm
,
276 struct lp_type src_type
,
277 struct lp_type dst_type
,
279 LLVMValueRef
*dst_lo
,
280 LLVMValueRef
*dst_hi
)
282 LLVMBuilderRef builder
= gallivm
->builder
;
284 LLVMTypeRef dst_vec_type
;
286 assert(!src_type
.floating
);
287 assert(!dst_type
.floating
);
288 assert(dst_type
.width
== src_type
.width
* 2);
289 assert(dst_type
.length
* 2 == src_type
.length
);
291 if(dst_type
.sign
&& src_type
.sign
) {
292 /* Replicate the sign bit in the most significant bits */
293 msb
= LLVMBuildAShr(builder
, src
, lp_build_const_int_vec(gallivm
, src_type
, src_type
.width
- 1), "");
296 /* Most significant bits always zero */
297 msb
= lp_build_zero(gallivm
, src_type
);
299 /* Interleave bits */
300 #ifdef PIPE_ARCH_LITTLE_ENDIAN
301 *dst_lo
= lp_build_interleave2(gallivm
, src_type
, src
, msb
, 0);
302 *dst_hi
= lp_build_interleave2(gallivm
, src_type
, src
, msb
, 1);
304 *dst_lo
= lp_build_interleave2(gallivm
, src_type
, msb
, src
, 0);
305 *dst_hi
= lp_build_interleave2(gallivm
, src_type
, msb
, src
, 1);
308 /* Cast the result into the new type (twice as wide) */
310 dst_vec_type
= lp_build_vec_type(gallivm
, dst_type
);
312 *dst_lo
= LLVMBuildBitCast(builder
, *dst_lo
, dst_vec_type
, "");
313 *dst_hi
= LLVMBuildBitCast(builder
, *dst_hi
, dst_vec_type
, "");
318 * Expand the bit width.
320 * This will only change the number of bits the values are represented, not the
324 lp_build_unpack(struct gallivm_state
*gallivm
,
325 struct lp_type src_type
,
326 struct lp_type dst_type
,
328 LLVMValueRef
*dst
, unsigned num_dsts
)
333 /* Register width must remain constant */
334 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
336 /* We must not loose or gain channels. Only precision */
337 assert(src_type
.length
== dst_type
.length
* num_dsts
);
342 while(src_type
.width
< dst_type
.width
) {
343 struct lp_type tmp_type
= src_type
;
346 tmp_type
.length
/= 2;
348 for(i
= num_tmps
; i
--; ) {
349 lp_build_unpack2(gallivm
, src_type
, tmp_type
, dst
[i
], &dst
[2*i
+ 0], &dst
[2*i
+ 1]);
357 assert(num_tmps
== num_dsts
);
362 * Non-interleaved pack.
364 * This will move values as
366 * lo = l0 __ l1 __ l2 __.. __ ln __
367 * hi = h0 __ h1 __ h2 __.. __ hn __
368 * res = l0 l1 l2 .. ln h0 h1 h2 .. hn
370 * This will only change the number of bits the values are represented, not the
373 * It is assumed the values are already clamped into the destination type range.
374 * Values outside that range will produce undefined results. Use
375 * lp_build_packs2 instead.
378 lp_build_pack2(struct gallivm_state
*gallivm
,
379 struct lp_type src_type
,
380 struct lp_type dst_type
,
384 LLVMBuilderRef builder
= gallivm
->builder
;
385 LLVMTypeRef dst_vec_type
= lp_build_vec_type(gallivm
, dst_type
);
386 LLVMValueRef shuffle
;
387 LLVMValueRef res
= NULL
;
388 struct lp_type intr_type
= dst_type
;
390 #if HAVE_LLVM < 0x0207
391 intr_type
= src_type
;
394 assert(!src_type
.floating
);
395 assert(!dst_type
.floating
);
396 assert(src_type
.width
== dst_type
.width
* 2);
397 assert(src_type
.length
* 2 == dst_type
.length
);
399 /* Check for special cases first */
400 if(util_cpu_caps
.has_sse2
&& src_type
.width
* src_type
.length
>= 128) {
401 const char *intrinsic
= NULL
;
403 switch(src_type
.width
) {
406 intrinsic
= "llvm.x86.sse2.packssdw.128";
409 if (util_cpu_caps
.has_sse4_1
) {
410 intrinsic
= "llvm.x86.sse41.packusdw";
411 #if HAVE_LLVM < 0x0207
412 /* llvm < 2.7 has inconsistent signatures except for packusdw */
413 intr_type
= dst_type
;
420 intrinsic
= "llvm.x86.sse2.packsswb.128";
423 intrinsic
= "llvm.x86.sse2.packuswb.128";
426 /* default uses generic shuffle below */
429 if (src_type
.width
* src_type
.length
== 128) {
430 LLVMTypeRef intr_vec_type
= lp_build_vec_type(gallivm
, intr_type
);
431 res
= lp_build_intrinsic_binary(builder
, intrinsic
, intr_vec_type
, lo
, hi
);
432 if (dst_vec_type
!= intr_vec_type
) {
433 res
= LLVMBuildBitCast(builder
, res
, dst_vec_type
, "");
437 int num_split
= src_type
.width
* src_type
.length
/ 128;
439 int nlen
= 128 / src_type
.width
;
440 struct lp_type ndst_type
= lp_type_unorm(dst_type
.width
, 128);
441 struct lp_type nintr_type
= lp_type_unorm(intr_type
.width
, 128);
442 LLVMValueRef tmpres
[LP_MAX_VECTOR_WIDTH
/ 128];
443 LLVMValueRef tmplo
, tmphi
;
444 LLVMTypeRef ndst_vec_type
= lp_build_vec_type(gallivm
, ndst_type
);
445 LLVMTypeRef nintr_vec_type
= lp_build_vec_type(gallivm
, nintr_type
);
447 assert(num_split
<= LP_MAX_VECTOR_WIDTH
/ 128);
449 for (i
= 0; i
< num_split
/ 2; i
++) {
450 tmplo
= lp_build_extract_range(gallivm
,
452 tmphi
= lp_build_extract_range(gallivm
,
453 lo
, i
*nlen
*2 + nlen
, nlen
);
454 tmpres
[i
] = lp_build_intrinsic_binary(builder
, intrinsic
,
455 nintr_vec_type
, tmplo
, tmphi
);
456 if (ndst_vec_type
!= nintr_vec_type
) {
457 tmpres
[i
] = LLVMBuildBitCast(builder
, tmpres
[i
], ndst_vec_type
, "");
460 for (i
= 0; i
< num_split
/ 2; i
++) {
461 tmplo
= lp_build_extract_range(gallivm
,
463 tmphi
= lp_build_extract_range(gallivm
,
464 hi
, i
*nlen
*2 + nlen
, nlen
);
465 tmpres
[i
+num_split
/2] = lp_build_intrinsic_binary(builder
, intrinsic
,
468 if (ndst_vec_type
!= nintr_vec_type
) {
469 tmpres
[i
+num_split
/2] = LLVMBuildBitCast(builder
, tmpres
[i
+num_split
/2],
473 res
= lp_build_concat(gallivm
, tmpres
, ndst_type
, num_split
);
479 /* generic shuffle */
480 lo
= LLVMBuildBitCast(builder
, lo
, dst_vec_type
, "");
481 hi
= LLVMBuildBitCast(builder
, hi
, dst_vec_type
, "");
483 shuffle
= lp_build_const_pack_shuffle(gallivm
, dst_type
.length
);
485 res
= LLVMBuildShuffleVector(builder
, lo
, hi
, shuffle
, "");
493 * Non-interleaved pack and saturate.
495 * Same as lp_build_pack2 but will saturate values so that they fit into the
499 lp_build_packs2(struct gallivm_state
*gallivm
,
500 struct lp_type src_type
,
501 struct lp_type dst_type
,
507 assert(!src_type
.floating
);
508 assert(!dst_type
.floating
);
509 assert(src_type
.sign
== dst_type
.sign
);
510 assert(src_type
.width
== dst_type
.width
* 2);
511 assert(src_type
.length
* 2 == dst_type
.length
);
515 /* All X86 SSE non-interleaved pack instructions take signed inputs and
516 * saturate them, so no need to clamp for those cases. */
517 if(util_cpu_caps
.has_sse2
&&
518 src_type
.width
* src_type
.length
>= 128 &&
520 (src_type
.width
== 32 || src_type
.width
== 16))
524 struct lp_build_context bld
;
525 unsigned dst_bits
= dst_type
.sign
? dst_type
.width
- 1 : dst_type
.width
;
526 LLVMValueRef dst_max
= lp_build_const_int_vec(gallivm
, src_type
, ((unsigned long long)1 << dst_bits
) - 1);
527 lp_build_context_init(&bld
, gallivm
, src_type
);
528 lo
= lp_build_min(&bld
, lo
, dst_max
);
529 hi
= lp_build_min(&bld
, hi
, dst_max
);
530 /* FIXME: What about lower bound? */
533 return lp_build_pack2(gallivm
, src_type
, dst_type
, lo
, hi
);
538 * Truncate the bit width.
540 * TODO: Handle saturation consistently.
543 lp_build_pack(struct gallivm_state
*gallivm
,
544 struct lp_type src_type
,
545 struct lp_type dst_type
,
547 const LLVMValueRef
*src
, unsigned num_srcs
)
549 LLVMValueRef (*pack2
)(struct gallivm_state
*gallivm
,
550 struct lp_type src_type
,
551 struct lp_type dst_type
,
554 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
557 /* Register width must remain constant */
558 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
560 /* We must not loose or gain channels. Only precision */
561 assert(src_type
.length
* num_srcs
== dst_type
.length
);
564 pack2
= &lp_build_pack2
;
566 pack2
= &lp_build_packs2
;
568 for(i
= 0; i
< num_srcs
; ++i
)
571 while(src_type
.width
> dst_type
.width
) {
572 struct lp_type tmp_type
= src_type
;
575 tmp_type
.length
*= 2;
577 /* Take in consideration the sign changes only in the last step */
578 if(tmp_type
.width
== dst_type
.width
)
579 tmp_type
.sign
= dst_type
.sign
;
583 for(i
= 0; i
< num_srcs
; ++i
)
584 tmp
[i
] = pack2(gallivm
, src_type
, tmp_type
,
585 tmp
[2*i
+ 0], tmp
[2*i
+ 1]);
590 assert(num_srcs
== 1);
597 * Truncate or expand the bitwidth.
599 * NOTE: Getting the right sign flags is crucial here, as we employ some
600 * intrinsics that do saturation.
603 lp_build_resize(struct gallivm_state
*gallivm
,
604 struct lp_type src_type
,
605 struct lp_type dst_type
,
606 const LLVMValueRef
*src
, unsigned num_srcs
,
607 LLVMValueRef
*dst
, unsigned num_dsts
)
609 LLVMBuilderRef builder
= gallivm
->builder
;
610 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
614 * We don't support float <-> int conversion here. That must be done
615 * before/after calling this function.
617 assert(src_type
.floating
== dst_type
.floating
);
620 * We don't support double <-> float conversion yet, although it could be
621 * added with little effort.
623 assert((!src_type
.floating
&& !dst_type
.floating
) ||
624 src_type
.width
== dst_type
.width
);
626 /* We must not loose or gain channels. Only precision */
627 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
629 /* We don't support M:N conversion, only 1:N, M:1, or 1:1 */
630 assert(num_srcs
== 1 || num_dsts
== 1);
632 assert(src_type
.length
<= LP_MAX_VECTOR_LENGTH
);
633 assert(dst_type
.length
<= LP_MAX_VECTOR_LENGTH
);
634 assert(num_srcs
<= LP_MAX_VECTOR_LENGTH
);
635 assert(num_dsts
<= LP_MAX_VECTOR_LENGTH
);
637 if (src_type
.width
> dst_type
.width
) {
639 * Truncate bit width.
642 assert(num_dsts
== 1);
644 if (src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
) {
646 * Register width remains constant -- use vector packing intrinsics
648 tmp
[0] = lp_build_pack(gallivm
, src_type
, dst_type
, TRUE
, src
, num_srcs
);
651 if (src_type
.width
/ dst_type
.width
> num_srcs
) {
653 * First change src vectors size (with shuffle) so they have the
654 * same size as the destination vector, then pack normally.
655 * Note: cannot use cast/extract because llvm generates atrocious code.
657 unsigned size_ratio
= (src_type
.width
* src_type
.length
) /
658 (dst_type
.length
* dst_type
.width
);
659 unsigned new_length
= src_type
.length
/ size_ratio
;
661 for (i
= 0; i
< size_ratio
* num_srcs
; i
++) {
662 unsigned start_index
= (i
% size_ratio
) * new_length
;
663 tmp
[i
] = lp_build_extract_range(gallivm
, src
[i
/ size_ratio
],
664 start_index
, new_length
);
666 num_srcs
*= size_ratio
;
667 src_type
.length
= new_length
;
668 tmp
[0] = lp_build_pack(gallivm
, src_type
, dst_type
, TRUE
, tmp
, num_srcs
);
672 * Truncate bit width but expand vector size - first pack
673 * then expand simply because this should be more AVX-friendly
674 * for the cases we probably hit.
676 unsigned size_ratio
= (dst_type
.width
* dst_type
.length
) /
677 (src_type
.length
* src_type
.width
);
678 unsigned num_pack_srcs
= num_srcs
/ size_ratio
;
679 dst_type
.length
= dst_type
.length
/ size_ratio
;
681 for (i
= 0; i
< size_ratio
; i
++) {
682 tmp
[i
] = lp_build_pack(gallivm
, src_type
, dst_type
, TRUE
,
683 &src
[i
*num_pack_srcs
], num_pack_srcs
);
685 tmp
[0] = lp_build_concat(gallivm
, tmp
, dst_type
, size_ratio
);
689 else if (src_type
.width
< dst_type
.width
) {
694 assert(num_srcs
== 1);
696 if (src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
) {
698 * Register width remains constant -- use vector unpack intrinsics
700 lp_build_unpack(gallivm
, src_type
, dst_type
, src
[0], tmp
, num_dsts
);
704 * Do it element-wise.
706 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
708 for (i
= 0; i
< num_dsts
; i
++) {
709 tmp
[i
] = lp_build_undef(gallivm
, dst_type
);
712 for (i
= 0; i
< src_type
.length
; ++i
) {
713 unsigned j
= i
/ dst_type
.length
;
714 LLVMValueRef srcindex
= lp_build_const_int32(gallivm
, i
);
715 LLVMValueRef dstindex
= lp_build_const_int32(gallivm
, i
% dst_type
.length
);
716 LLVMValueRef val
= LLVMBuildExtractElement(builder
, src
[0], srcindex
, "");
718 if (src_type
.sign
&& dst_type
.sign
) {
719 val
= LLVMBuildSExt(builder
, val
, lp_build_elem_type(gallivm
, dst_type
), "");
721 val
= LLVMBuildZExt(builder
, val
, lp_build_elem_type(gallivm
, dst_type
), "");
723 tmp
[j
] = LLVMBuildInsertElement(builder
, tmp
[j
], val
, dstindex
, "");
732 assert(num_srcs
== 1);
733 assert(num_dsts
== 1);
738 for(i
= 0; i
< num_dsts
; ++i
)
744 * Expands src vector from src.length to dst_length
747 lp_build_pad_vector(struct gallivm_state
*gallivm
,
751 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
754 unsigned i
, src_length
;
756 type
= LLVMTypeOf(src
);
758 if (LLVMGetTypeKind(type
) != LLVMVectorTypeKind
) {
759 /* Can't use ShuffleVector on non-vector type */
760 undef
= LLVMGetUndef(LLVMVectorType(type
, dst_length
));
761 return LLVMBuildInsertElement(gallivm
->builder
, undef
, src
, lp_build_const_int32(gallivm
, 0), "");
764 undef
= LLVMGetUndef(type
);
765 src_length
= LLVMGetVectorSize(type
);
767 assert(dst_length
<= Elements(elems
));
768 assert(dst_length
>= src_length
);
770 if (src_length
== dst_length
)
773 /* All elements from src vector */
774 for (i
= 0; i
< src_length
; ++i
)
775 elems
[i
] = lp_build_const_int32(gallivm
, i
);
777 /* Undef fill remaining space */
778 for (i
= src_length
; i
< dst_length
; ++i
)
779 elems
[i
] = lp_build_const_int32(gallivm
, src_length
);
781 /* Combine the two vectors */
782 return LLVMBuildShuffleVector(gallivm
->builder
, src
, undef
, LLVMConstVector(elems
, dst_length
), "");