1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 * Helper functions for packing/unpacking.
33 * Pack/unpacking is necessary for conversion between types of different
36 * They are also commonly used when an computation needs higher
37 * precision for the intermediate values. For example, if one needs the
42 * to use more precision for intermediate results then one should implement it
46 * compute(LLVMBuilderRef builder struct lp_type type, LLVMValueRef a, LLVMValueRef b)
48 * struct lp_type wide_type = lp_wider_type(type);
49 * LLVMValueRef al, ah, bl, bh, cl, ch, c;
51 * lp_build_unpack2(builder, type, wide_type, a, &al, &ah);
52 * lp_build_unpack2(builder, type, wide_type, b, &bl, &bh);
54 * cl = compute_half(al, bl);
55 * ch = compute_half(ah, bh);
57 * c = lp_build_pack2(bld->builder, wide_type, type, cl, ch);
62 * where compute_half() would do the computation for half the elements with
63 * twice the precision.
65 * @author Jose Fonseca <jfonseca@vmware.com>
69 #include "util/u_debug.h"
70 #include "util/u_math.h"
71 #include "util/u_cpu_detect.h"
72 #include "util/u_memory.h"
74 #include "lp_bld_type.h"
75 #include "lp_bld_const.h"
76 #include "lp_bld_init.h"
77 #include "lp_bld_intr.h"
78 #include "lp_bld_arit.h"
79 #include "lp_bld_pack.h"
80 #include "lp_bld_swizzle.h"
84 * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions.
87 lp_build_const_unpack_shuffle(struct gallivm_state
*gallivm
,
88 unsigned n
, unsigned lo_hi
)
90 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
93 assert(n
<= LP_MAX_VECTOR_LENGTH
);
96 /* TODO: cache results in a static table */
98 for(i
= 0, j
= lo_hi
*n
/2; i
< n
; i
+= 2, ++j
) {
99 elems
[i
+ 0] = lp_build_const_int32(gallivm
, 0 + j
);
100 elems
[i
+ 1] = lp_build_const_int32(gallivm
, n
+ j
);
103 return LLVMConstVector(elems
, n
);
107 * Similar to lp_build_const_unpack_shuffle but for special AVX 256bit unpack.
108 * See comment above lp_build_interleave2_half for more details.
111 lp_build_const_unpack_shuffle_half(struct gallivm_state
*gallivm
,
112 unsigned n
, unsigned lo_hi
)
114 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
117 assert(n
<= LP_MAX_VECTOR_LENGTH
);
120 for (i
= 0, j
= lo_hi
*(n
/4); i
< n
; i
+= 2, ++j
) {
124 elems
[i
+ 0] = lp_build_const_int32(gallivm
, 0 + j
);
125 elems
[i
+ 1] = lp_build_const_int32(gallivm
, n
+ j
);
128 return LLVMConstVector(elems
, n
);
132 * Similar to lp_build_const_unpack_shuffle_half, but for AVX512
133 * See comment above lp_build_interleave2_half for more details.
136 lp_build_const_unpack_shuffle_16wide(struct gallivm_state
*gallivm
,
139 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
144 // for the following lo_hi setting, convert 0 -> f to:
145 // 0: 0 16 4 20 8 24 12 28 1 17 5 21 9 25 13 29
146 // 1: 2 18 6 22 10 26 14 30 3 19 7 23 11 27 15 31
147 for (i
= 0; i
< 16; i
++) {
148 j
= ((i
&0x06)<<1) + ((i
&1)<<4) + (i
>>3) + (lo_hi
<<1);
150 elems
[i
] = lp_build_const_int32(gallivm
, j
);
153 return LLVMConstVector(elems
, 16);
157 * Build shuffle vectors that match PACKxx (SSE) instructions or
161 lp_build_const_pack_shuffle(struct gallivm_state
*gallivm
, unsigned n
)
163 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
166 assert(n
<= LP_MAX_VECTOR_LENGTH
);
168 for(i
= 0; i
< n
; ++i
)
169 #if UTIL_ARCH_LITTLE_ENDIAN
170 elems
[i
] = lp_build_const_int32(gallivm
, 2*i
);
172 elems
[i
] = lp_build_const_int32(gallivm
, 2*i
+1);
175 return LLVMConstVector(elems
, n
);
179 * Return a vector with elements src[start:start+size]
180 * Most useful for getting half the values out of a 256bit sized vector,
181 * otherwise may cause data rearrangement to happen.
184 lp_build_extract_range(struct gallivm_state
*gallivm
,
189 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
192 assert(size
<= ARRAY_SIZE(elems
));
194 for (i
= 0; i
< size
; ++i
)
195 elems
[i
] = lp_build_const_int32(gallivm
, i
+ start
);
198 return LLVMBuildExtractElement(gallivm
->builder
, src
, elems
[0], "");
201 return LLVMBuildShuffleVector(gallivm
->builder
, src
, src
,
202 LLVMConstVector(elems
, size
), "");
207 * Concatenates several (must be a power of 2) vectors (of same type)
209 * Most useful for building up a 256bit sized vector out of two 128bit ones.
212 lp_build_concat(struct gallivm_state
*gallivm
,
214 struct lp_type src_type
,
215 unsigned num_vectors
)
217 unsigned new_length
, i
;
218 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
/2];
219 LLVMValueRef shuffles
[LP_MAX_VECTOR_LENGTH
];
221 assert(src_type
.length
* num_vectors
<= ARRAY_SIZE(shuffles
));
222 assert(util_is_power_of_two_or_zero(num_vectors
));
224 new_length
= src_type
.length
;
226 for (i
= 0; i
< num_vectors
; i
++)
229 while (num_vectors
> 1) {
232 for (i
= 0; i
< new_length
; i
++) {
233 shuffles
[i
] = lp_build_const_int32(gallivm
, i
);
235 for (i
= 0; i
< num_vectors
; i
++) {
236 tmp
[i
] = LLVMBuildShuffleVector(gallivm
->builder
, tmp
[i
*2], tmp
[i
*2 + 1],
237 LLVMConstVector(shuffles
, new_length
), "");
246 * Combines vectors to reduce from num_srcs to num_dsts.
247 * Returns the number of src vectors concatenated in a single dst.
249 * num_srcs must be exactly divisible by num_dsts.
251 * e.g. For num_srcs = 4 and src = [x, y, z, w]
252 * num_dsts = 1 dst = [xyzw] return = 4
253 * num_dsts = 2 dst = [xy, zw] return = 2
256 lp_build_concat_n(struct gallivm_state
*gallivm
,
257 struct lp_type src_type
,
263 int size
= num_srcs
/ num_dsts
;
266 assert(num_srcs
>= num_dsts
);
267 assert((num_srcs
% size
) == 0);
269 if (num_srcs
== num_dsts
) {
270 for (i
= 0; i
< num_dsts
; ++i
) {
276 for (i
= 0; i
< num_dsts
; ++i
) {
277 dst
[i
] = lp_build_concat(gallivm
, &src
[i
* size
], src_type
, size
);
285 * Un-interleave vector.
286 * This will return a vector consisting of every second element
287 * (depending on lo_hi, beginning at 0 or 1).
288 * The returned vector size (elems and width) will only be half
289 * that of the source vector.
292 lp_build_uninterleave1(struct gallivm_state
*gallivm
,
297 LLVMValueRef shuffle
, elems
[LP_MAX_VECTOR_LENGTH
];
299 assert(num_elems
<= LP_MAX_VECTOR_LENGTH
);
301 for (i
= 0; i
< num_elems
/ 2; ++i
)
302 elems
[i
] = lp_build_const_int32(gallivm
, 2*i
+ lo_hi
);
304 shuffle
= LLVMConstVector(elems
, num_elems
/ 2);
306 return LLVMBuildShuffleVector(gallivm
->builder
, a
, a
, shuffle
, "");
311 * Interleave vector elements.
313 * Matches the PUNPCKLxx and PUNPCKHxx SSE instructions
314 * (but not for 256bit AVX vectors).
317 lp_build_interleave2(struct gallivm_state
*gallivm
,
323 LLVMValueRef shuffle
;
325 if (type
.length
== 2 && type
.width
== 128 && util_cpu_caps
.has_avx
) {
327 * XXX: This is a workaround for llvm code generation deficiency. Strangely
328 * enough, while this needs vinsertf128/vextractf128 instructions (hence
329 * a natural match when using 2x128bit vectors) the "normal" unpack shuffle
330 * generates code ranging from atrocious (llvm 3.1) to terrible (llvm 3.2, 3.3).
331 * So use some different shuffles instead (the exact shuffles don't seem to
332 * matter, as long as not using 128bit wide vectors, works with 8x32 or 4x64).
334 struct lp_type tmp_type
= type
;
335 LLVMValueRef srchalf
[2], tmpdst
;
338 a
= LLVMBuildBitCast(gallivm
->builder
, a
, lp_build_vec_type(gallivm
, tmp_type
), "");
339 b
= LLVMBuildBitCast(gallivm
->builder
, b
, lp_build_vec_type(gallivm
, tmp_type
), "");
340 srchalf
[0] = lp_build_extract_range(gallivm
, a
, lo_hi
* 2, 2);
341 srchalf
[1] = lp_build_extract_range(gallivm
, b
, lo_hi
* 2, 2);
343 tmpdst
= lp_build_concat(gallivm
, srchalf
, tmp_type
, 2);
344 return LLVMBuildBitCast(gallivm
->builder
, tmpdst
, lp_build_vec_type(gallivm
, type
), "");
347 shuffle
= lp_build_const_unpack_shuffle(gallivm
, type
.length
, lo_hi
);
349 return LLVMBuildShuffleVector(gallivm
->builder
, a
, b
, shuffle
, "");
353 * Interleave vector elements but with 256 (or 512) bit,
354 * treats it as interleave with 2 concatenated 128 (or 256) bit vectors.
356 * This differs to lp_build_interleave2 as that function would do the following (for lo):
357 * a0 b0 a1 b1 a2 b2 a3 b3, and this does not compile into an AVX unpack instruction.
360 * An example interleave 8x float with 8x float on AVX 256bit unpack:
361 * a0 a1 a2 a3 a4 a5 a6 a7 <-> b0 b1 b2 b3 b4 b5 b6 b7
363 * Equivalent to interleaving 2x 128 bit vectors
364 * a0 a1 a2 a3 <-> b0 b1 b2 b3 concatenated with a4 a5 a6 a7 <-> b4 b5 b6 b7
366 * So interleave-lo would result in:
367 * a0 b0 a1 b1 a4 b4 a5 b5
369 * And interleave-hi would result in:
370 * a2 b2 a3 b3 a6 b6 a7 b7
372 * For 512 bits, the following are true:
374 * Interleave-lo would result in (capital letters denote hex indices):
375 * a0 b0 a1 b1 a4 b4 a5 b5 a8 b8 a9 b9 aC bC aD bD
377 * Interleave-hi would result in:
378 * a2 b2 a3 b3 a6 b6 a7 b7 aA bA aB bB aE bE aF bF
381 lp_build_interleave2_half(struct gallivm_state
*gallivm
,
387 if (type
.length
* type
.width
== 256) {
388 LLVMValueRef shuffle
= lp_build_const_unpack_shuffle_half(gallivm
, type
.length
, lo_hi
);
389 return LLVMBuildShuffleVector(gallivm
->builder
, a
, b
, shuffle
, "");
390 } else if ((type
.length
== 16) && (type
.width
== 32)) {
391 LLVMValueRef shuffle
= lp_build_const_unpack_shuffle_16wide(gallivm
, lo_hi
);
392 return LLVMBuildShuffleVector(gallivm
->builder
, a
, b
, shuffle
, "");
394 return lp_build_interleave2(gallivm
, type
, a
, b
, lo_hi
);
400 * Double the bit width.
402 * This will only change the number of bits the values are represented, not the
407 lp_build_unpack2(struct gallivm_state
*gallivm
,
408 struct lp_type src_type
,
409 struct lp_type dst_type
,
411 LLVMValueRef
*dst_lo
,
412 LLVMValueRef
*dst_hi
)
414 LLVMBuilderRef builder
= gallivm
->builder
;
416 LLVMTypeRef dst_vec_type
;
418 assert(!src_type
.floating
);
419 assert(!dst_type
.floating
);
420 assert(dst_type
.width
== src_type
.width
* 2);
421 assert(dst_type
.length
* 2 == src_type
.length
);
423 if(dst_type
.sign
&& src_type
.sign
) {
424 /* Replicate the sign bit in the most significant bits */
425 msb
= LLVMBuildAShr(builder
, src
, lp_build_const_int_vec(gallivm
, src_type
, src_type
.width
- 1), "");
428 /* Most significant bits always zero */
429 msb
= lp_build_zero(gallivm
, src_type
);
431 /* Interleave bits */
432 #if UTIL_ARCH_LITTLE_ENDIAN
433 *dst_lo
= lp_build_interleave2(gallivm
, src_type
, src
, msb
, 0);
434 *dst_hi
= lp_build_interleave2(gallivm
, src_type
, src
, msb
, 1);
437 *dst_lo
= lp_build_interleave2(gallivm
, src_type
, msb
, src
, 0);
438 *dst_hi
= lp_build_interleave2(gallivm
, src_type
, msb
, src
, 1);
441 /* Cast the result into the new type (twice as wide) */
443 dst_vec_type
= lp_build_vec_type(gallivm
, dst_type
);
445 *dst_lo
= LLVMBuildBitCast(builder
, *dst_lo
, dst_vec_type
, "");
446 *dst_hi
= LLVMBuildBitCast(builder
, *dst_hi
, dst_vec_type
, "");
451 * Double the bit width, with an order which fits the cpu nicely.
453 * This will only change the number of bits the values are represented, not the
456 * The order of the results is not guaranteed, other than it will match
457 * the corresponding lp_build_pack2_native call.
460 lp_build_unpack2_native(struct gallivm_state
*gallivm
,
461 struct lp_type src_type
,
462 struct lp_type dst_type
,
464 LLVMValueRef
*dst_lo
,
465 LLVMValueRef
*dst_hi
)
467 LLVMBuilderRef builder
= gallivm
->builder
;
469 LLVMTypeRef dst_vec_type
;
471 assert(!src_type
.floating
);
472 assert(!dst_type
.floating
);
473 assert(dst_type
.width
== src_type
.width
* 2);
474 assert(dst_type
.length
* 2 == src_type
.length
);
476 if(dst_type
.sign
&& src_type
.sign
) {
477 /* Replicate the sign bit in the most significant bits */
478 msb
= LLVMBuildAShr(builder
, src
,
479 lp_build_const_int_vec(gallivm
, src_type
, src_type
.width
- 1), "");
482 /* Most significant bits always zero */
483 msb
= lp_build_zero(gallivm
, src_type
);
485 /* Interleave bits */
486 #if UTIL_ARCH_LITTLE_ENDIAN
487 if (src_type
.length
* src_type
.width
== 256 && util_cpu_caps
.has_avx2
) {
488 *dst_lo
= lp_build_interleave2_half(gallivm
, src_type
, src
, msb
, 0);
489 *dst_hi
= lp_build_interleave2_half(gallivm
, src_type
, src
, msb
, 1);
491 *dst_lo
= lp_build_interleave2(gallivm
, src_type
, src
, msb
, 0);
492 *dst_hi
= lp_build_interleave2(gallivm
, src_type
, src
, msb
, 1);
495 *dst_lo
= lp_build_interleave2(gallivm
, src_type
, msb
, src
, 0);
496 *dst_hi
= lp_build_interleave2(gallivm
, src_type
, msb
, src
, 1);
499 /* Cast the result into the new type (twice as wide) */
501 dst_vec_type
= lp_build_vec_type(gallivm
, dst_type
);
503 *dst_lo
= LLVMBuildBitCast(builder
, *dst_lo
, dst_vec_type
, "");
504 *dst_hi
= LLVMBuildBitCast(builder
, *dst_hi
, dst_vec_type
, "");
509 * Expand the bit width.
511 * This will only change the number of bits the values are represented, not the
515 lp_build_unpack(struct gallivm_state
*gallivm
,
516 struct lp_type src_type
,
517 struct lp_type dst_type
,
519 LLVMValueRef
*dst
, unsigned num_dsts
)
524 /* Register width must remain constant */
525 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
527 /* We must not loose or gain channels. Only precision */
528 assert(src_type
.length
== dst_type
.length
* num_dsts
);
533 while(src_type
.width
< dst_type
.width
) {
534 struct lp_type tmp_type
= src_type
;
537 tmp_type
.length
/= 2;
539 for(i
= num_tmps
; i
--; ) {
540 lp_build_unpack2(gallivm
, src_type
, tmp_type
, dst
[i
], &dst
[2*i
+ 0],
549 assert(num_tmps
== num_dsts
);
554 * Non-interleaved pack.
556 * This will move values as
558 * lo = l0 __ l1 __ l2 __.. __ ln __
559 * hi = h0 __ h1 __ h2 __.. __ hn __
560 * res = l0 l1 l2 .. ln h0 h1 h2 .. hn
562 * This will only change the number of bits the values are represented, not the
565 * It is assumed the values are already clamped into the destination type range.
566 * Values outside that range will produce undefined results. Use
567 * lp_build_packs2 instead.
570 lp_build_pack2(struct gallivm_state
*gallivm
,
571 struct lp_type src_type
,
572 struct lp_type dst_type
,
576 LLVMBuilderRef builder
= gallivm
->builder
;
577 LLVMTypeRef dst_vec_type
= lp_build_vec_type(gallivm
, dst_type
);
578 LLVMValueRef shuffle
;
579 LLVMValueRef res
= NULL
;
580 struct lp_type intr_type
= dst_type
;
582 assert(!src_type
.floating
);
583 assert(!dst_type
.floating
);
584 assert(src_type
.width
== dst_type
.width
* 2);
585 assert(src_type
.length
* 2 == dst_type
.length
);
587 /* Check for special cases first */
588 if ((util_cpu_caps
.has_sse2
|| util_cpu_caps
.has_altivec
) &&
589 src_type
.width
* src_type
.length
>= 128) {
590 const char *intrinsic
= NULL
;
591 boolean swap_intrinsic_operands
= FALSE
;
593 switch(src_type
.width
) {
595 if (util_cpu_caps
.has_sse2
) {
597 intrinsic
= "llvm.x86.sse2.packssdw.128";
599 if (util_cpu_caps
.has_sse4_1
) {
600 intrinsic
= "llvm.x86.sse41.packusdw";
603 } else if (util_cpu_caps
.has_altivec
) {
605 intrinsic
= "llvm.ppc.altivec.vpkswss";
607 intrinsic
= "llvm.ppc.altivec.vpkuwus";
609 #if UTIL_ARCH_LITTLE_ENDIAN
610 swap_intrinsic_operands
= TRUE
;
616 if (util_cpu_caps
.has_sse2
) {
617 intrinsic
= "llvm.x86.sse2.packsswb.128";
618 } else if (util_cpu_caps
.has_altivec
) {
619 intrinsic
= "llvm.ppc.altivec.vpkshss";
620 #if UTIL_ARCH_LITTLE_ENDIAN
621 swap_intrinsic_operands
= TRUE
;
625 if (util_cpu_caps
.has_sse2
) {
626 intrinsic
= "llvm.x86.sse2.packuswb.128";
627 } else if (util_cpu_caps
.has_altivec
) {
628 intrinsic
= "llvm.ppc.altivec.vpkshus";
629 #if UTIL_ARCH_LITTLE_ENDIAN
630 swap_intrinsic_operands
= TRUE
;
635 /* default uses generic shuffle below */
638 if (src_type
.width
* src_type
.length
== 128) {
639 LLVMTypeRef intr_vec_type
= lp_build_vec_type(gallivm
, intr_type
);
640 if (swap_intrinsic_operands
) {
641 res
= lp_build_intrinsic_binary(builder
, intrinsic
, intr_vec_type
, hi
, lo
);
643 res
= lp_build_intrinsic_binary(builder
, intrinsic
, intr_vec_type
, lo
, hi
);
645 if (dst_vec_type
!= intr_vec_type
) {
646 res
= LLVMBuildBitCast(builder
, res
, dst_vec_type
, "");
650 int num_split
= src_type
.width
* src_type
.length
/ 128;
652 int nlen
= 128 / src_type
.width
;
653 int lo_off
= swap_intrinsic_operands
? nlen
: 0;
654 int hi_off
= swap_intrinsic_operands
? 0 : nlen
;
655 struct lp_type ndst_type
= lp_type_unorm(dst_type
.width
, 128);
656 struct lp_type nintr_type
= lp_type_unorm(intr_type
.width
, 128);
657 LLVMValueRef tmpres
[LP_MAX_VECTOR_WIDTH
/ 128];
658 LLVMValueRef tmplo
, tmphi
;
659 LLVMTypeRef ndst_vec_type
= lp_build_vec_type(gallivm
, ndst_type
);
660 LLVMTypeRef nintr_vec_type
= lp_build_vec_type(gallivm
, nintr_type
);
662 assert(num_split
<= LP_MAX_VECTOR_WIDTH
/ 128);
664 for (i
= 0; i
< num_split
/ 2; i
++) {
665 tmplo
= lp_build_extract_range(gallivm
,
666 lo
, i
*nlen
*2 + lo_off
, nlen
);
667 tmphi
= lp_build_extract_range(gallivm
,
668 lo
, i
*nlen
*2 + hi_off
, nlen
);
669 tmpres
[i
] = lp_build_intrinsic_binary(builder
, intrinsic
,
670 nintr_vec_type
, tmplo
, tmphi
);
671 if (ndst_vec_type
!= nintr_vec_type
) {
672 tmpres
[i
] = LLVMBuildBitCast(builder
, tmpres
[i
], ndst_vec_type
, "");
675 for (i
= 0; i
< num_split
/ 2; i
++) {
676 tmplo
= lp_build_extract_range(gallivm
,
677 hi
, i
*nlen
*2 + lo_off
, nlen
);
678 tmphi
= lp_build_extract_range(gallivm
,
679 hi
, i
*nlen
*2 + hi_off
, nlen
);
680 tmpres
[i
+num_split
/2] = lp_build_intrinsic_binary(builder
, intrinsic
,
683 if (ndst_vec_type
!= nintr_vec_type
) {
684 tmpres
[i
+num_split
/2] = LLVMBuildBitCast(builder
, tmpres
[i
+num_split
/2],
688 res
= lp_build_concat(gallivm
, tmpres
, ndst_type
, num_split
);
694 /* generic shuffle */
695 lo
= LLVMBuildBitCast(builder
, lo
, dst_vec_type
, "");
696 hi
= LLVMBuildBitCast(builder
, hi
, dst_vec_type
, "");
698 shuffle
= lp_build_const_pack_shuffle(gallivm
, dst_type
.length
);
700 res
= LLVMBuildShuffleVector(builder
, lo
, hi
, shuffle
, "");
707 * Non-interleaved native pack.
709 * Similar to lp_build_pack2, but the ordering of values is not
710 * guaranteed, other than it will match lp_build_unpack2_native.
712 * In particular, with avx2, the lower and upper 128bits of the vectors will
713 * be packed independently, so that (with 32bit->16bit values)
715 * lo = l0 __ l1 __ l2 __ l3 __ l4 __ l5 __ l6 __ l7 __
716 * hi = h0 __ h1 __ h2 __ h3 __ h4 __ h5 __ h6 __ h7 __
717 * res = l0 l1 l2 l3 h0 h1 h2 h3 l4 l5 l6 l7 h4 h5 h6 h7
719 * This will only change the number of bits the values are represented, not the
722 * It is assumed the values are already clamped into the destination type range.
723 * Values outside that range will produce undefined results.
726 lp_build_pack2_native(struct gallivm_state
*gallivm
,
727 struct lp_type src_type
,
728 struct lp_type dst_type
,
732 LLVMBuilderRef builder
= gallivm
->builder
;
733 struct lp_type intr_type
= dst_type
;
734 const char *intrinsic
= NULL
;
736 assert(!src_type
.floating
);
737 assert(!dst_type
.floating
);
738 assert(src_type
.width
== dst_type
.width
* 2);
739 assert(src_type
.length
* 2 == dst_type
.length
);
741 /* At this point only have special case for avx2 */
742 if (src_type
.length
* src_type
.width
== 256 &&
743 util_cpu_caps
.has_avx2
) {
744 switch(src_type
.width
) {
747 intrinsic
= "llvm.x86.avx2.packssdw";
749 intrinsic
= "llvm.x86.avx2.packusdw";
754 intrinsic
= "llvm.x86.avx2.packsswb";
756 intrinsic
= "llvm.x86.avx2.packuswb";
762 LLVMTypeRef intr_vec_type
= lp_build_vec_type(gallivm
, intr_type
);
763 return lp_build_intrinsic_binary(builder
, intrinsic
, intr_vec_type
,
767 return lp_build_pack2(gallivm
, src_type
, dst_type
, lo
, hi
);
772 * Non-interleaved pack and saturate.
774 * Same as lp_build_pack2 but will saturate values so that they fit into the
778 lp_build_packs2(struct gallivm_state
*gallivm
,
779 struct lp_type src_type
,
780 struct lp_type dst_type
,
786 assert(!src_type
.floating
);
787 assert(!dst_type
.floating
);
788 assert(src_type
.sign
== dst_type
.sign
);
789 assert(src_type
.width
== dst_type
.width
* 2);
790 assert(src_type
.length
* 2 == dst_type
.length
);
794 /* All X86 SSE non-interleaved pack instructions take signed inputs and
795 * saturate them, so no need to clamp for those cases. */
796 if(util_cpu_caps
.has_sse2
&&
797 src_type
.width
* src_type
.length
>= 128 &&
799 (src_type
.width
== 32 || src_type
.width
== 16))
803 struct lp_build_context bld
;
804 unsigned dst_bits
= dst_type
.sign
? dst_type
.width
- 1 : dst_type
.width
;
805 LLVMValueRef dst_max
= lp_build_const_int_vec(gallivm
, src_type
,
806 ((unsigned long long)1 << dst_bits
) - 1);
807 lp_build_context_init(&bld
, gallivm
, src_type
);
808 lo
= lp_build_min(&bld
, lo
, dst_max
);
809 hi
= lp_build_min(&bld
, hi
, dst_max
);
810 /* FIXME: What about lower bound? */
813 return lp_build_pack2(gallivm
, src_type
, dst_type
, lo
, hi
);
818 * Truncate the bit width.
820 * TODO: Handle saturation consistently.
823 lp_build_pack(struct gallivm_state
*gallivm
,
824 struct lp_type src_type
,
825 struct lp_type dst_type
,
827 const LLVMValueRef
*src
, unsigned num_srcs
)
829 LLVMValueRef (*pack2
)(struct gallivm_state
*gallivm
,
830 struct lp_type src_type
,
831 struct lp_type dst_type
,
834 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
837 /* Register width must remain constant */
838 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
840 /* We must not loose or gain channels. Only precision */
841 assert(src_type
.length
* num_srcs
== dst_type
.length
);
844 pack2
= &lp_build_pack2
;
846 pack2
= &lp_build_packs2
;
848 for(i
= 0; i
< num_srcs
; ++i
)
851 while(src_type
.width
> dst_type
.width
) {
852 struct lp_type tmp_type
= src_type
;
855 tmp_type
.length
*= 2;
857 /* Take in consideration the sign changes only in the last step */
858 if(tmp_type
.width
== dst_type
.width
)
859 tmp_type
.sign
= dst_type
.sign
;
863 for(i
= 0; i
< num_srcs
; ++i
)
864 tmp
[i
] = pack2(gallivm
, src_type
, tmp_type
,
865 tmp
[2*i
+ 0], tmp
[2*i
+ 1]);
870 assert(num_srcs
== 1);
877 * Truncate or expand the bitwidth.
879 * NOTE: Getting the right sign flags is crucial here, as we employ some
880 * intrinsics that do saturation.
883 lp_build_resize(struct gallivm_state
*gallivm
,
884 struct lp_type src_type
,
885 struct lp_type dst_type
,
886 const LLVMValueRef
*src
, unsigned num_srcs
,
887 LLVMValueRef
*dst
, unsigned num_dsts
)
889 LLVMBuilderRef builder
= gallivm
->builder
;
890 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
894 * We don't support float <-> int conversion here. That must be done
895 * before/after calling this function.
897 assert(src_type
.floating
== dst_type
.floating
);
900 * We don't support double <-> float conversion yet, although it could be
901 * added with little effort.
903 assert((!src_type
.floating
&& !dst_type
.floating
) ||
904 src_type
.width
== dst_type
.width
);
906 /* We must not loose or gain channels. Only precision */
907 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
909 assert(src_type
.length
<= LP_MAX_VECTOR_LENGTH
);
910 assert(dst_type
.length
<= LP_MAX_VECTOR_LENGTH
);
911 assert(num_srcs
<= LP_MAX_VECTOR_LENGTH
);
912 assert(num_dsts
<= LP_MAX_VECTOR_LENGTH
);
914 if (src_type
.width
> dst_type
.width
) {
916 * Truncate bit width.
919 /* Conversion must be M:1 */
920 assert(num_dsts
== 1);
922 if (src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
) {
924 * Register width remains constant -- use vector packing intrinsics
926 tmp
[0] = lp_build_pack(gallivm
, src_type
, dst_type
, TRUE
, src
, num_srcs
);
929 if (src_type
.width
/ dst_type
.width
> num_srcs
) {
931 * First change src vectors size (with shuffle) so they have the
932 * same size as the destination vector, then pack normally.
933 * Note: cannot use cast/extract because llvm generates atrocious code.
935 unsigned size_ratio
= (src_type
.width
* src_type
.length
) /
936 (dst_type
.length
* dst_type
.width
);
937 unsigned new_length
= src_type
.length
/ size_ratio
;
939 for (i
= 0; i
< size_ratio
* num_srcs
; i
++) {
940 unsigned start_index
= (i
% size_ratio
) * new_length
;
941 tmp
[i
] = lp_build_extract_range(gallivm
, src
[i
/ size_ratio
],
942 start_index
, new_length
);
944 num_srcs
*= size_ratio
;
945 src_type
.length
= new_length
;
946 tmp
[0] = lp_build_pack(gallivm
, src_type
, dst_type
, TRUE
, tmp
, num_srcs
);
950 * Truncate bit width but expand vector size - first pack
951 * then expand simply because this should be more AVX-friendly
952 * for the cases we probably hit.
954 unsigned size_ratio
= (dst_type
.width
* dst_type
.length
) /
955 (src_type
.length
* src_type
.width
);
956 unsigned num_pack_srcs
= num_srcs
/ size_ratio
;
957 dst_type
.length
= dst_type
.length
/ size_ratio
;
959 for (i
= 0; i
< size_ratio
; i
++) {
960 tmp
[i
] = lp_build_pack(gallivm
, src_type
, dst_type
, TRUE
,
961 &src
[i
*num_pack_srcs
], num_pack_srcs
);
963 tmp
[0] = lp_build_concat(gallivm
, tmp
, dst_type
, size_ratio
);
967 else if (src_type
.width
< dst_type
.width
) {
972 /* Conversion must be 1:N */
973 assert(num_srcs
== 1);
975 if (src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
) {
977 * Register width remains constant -- use vector unpack intrinsics
979 lp_build_unpack(gallivm
, src_type
, dst_type
, src
[0], tmp
, num_dsts
);
983 * Do it element-wise.
985 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
987 for (i
= 0; i
< num_dsts
; i
++) {
988 tmp
[i
] = lp_build_undef(gallivm
, dst_type
);
991 for (i
= 0; i
< src_type
.length
; ++i
) {
992 unsigned j
= i
/ dst_type
.length
;
993 LLVMValueRef srcindex
= lp_build_const_int32(gallivm
, i
);
994 LLVMValueRef dstindex
= lp_build_const_int32(gallivm
, i
% dst_type
.length
);
995 LLVMValueRef val
= LLVMBuildExtractElement(builder
, src
[0], srcindex
, "");
997 if (src_type
.sign
&& dst_type
.sign
) {
998 val
= LLVMBuildSExt(builder
, val
, lp_build_elem_type(gallivm
, dst_type
), "");
1000 val
= LLVMBuildZExt(builder
, val
, lp_build_elem_type(gallivm
, dst_type
), "");
1002 tmp
[j
] = LLVMBuildInsertElement(builder
, tmp
[j
], val
, dstindex
, "");
1011 /* "Conversion" must be N:N */
1012 assert(num_srcs
== num_dsts
);
1014 for(i
= 0; i
< num_dsts
; ++i
)
1018 for(i
= 0; i
< num_dsts
; ++i
)
1024 * Expands src vector from src.length to dst_length
1027 lp_build_pad_vector(struct gallivm_state
*gallivm
,
1029 unsigned dst_length
)
1031 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
1034 unsigned i
, src_length
;
1036 type
= LLVMTypeOf(src
);
1038 if (LLVMGetTypeKind(type
) != LLVMVectorTypeKind
) {
1039 /* Can't use ShuffleVector on non-vector type */
1040 undef
= LLVMGetUndef(LLVMVectorType(type
, dst_length
));
1041 return LLVMBuildInsertElement(gallivm
->builder
, undef
, src
, lp_build_const_int32(gallivm
, 0), "");
1044 undef
= LLVMGetUndef(type
);
1045 src_length
= LLVMGetVectorSize(type
);
1047 assert(dst_length
<= ARRAY_SIZE(elems
));
1048 assert(dst_length
>= src_length
);
1050 if (src_length
== dst_length
)
1053 /* All elements from src vector */
1054 for (i
= 0; i
< src_length
; ++i
)
1055 elems
[i
] = lp_build_const_int32(gallivm
, i
);
1057 /* Undef fill remaining space */
1058 for (i
= src_length
; i
< dst_length
; ++i
)
1059 elems
[i
] = lp_build_const_int32(gallivm
, src_length
);
1061 /* Combine the two vectors */
1062 return LLVMBuildShuffleVector(gallivm
->builder
, src
, undef
, LLVMConstVector(elems
, dst_length
), "");