1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 * Helper functions for type conversions.
33 * We want to use the fastest type for a given computation whenever feasible.
34 * The other side of this is that we need to be able convert between several
35 * types accurately and efficiently.
37 * Conversion between types of different bit width is quite complex since a
39 * To remember there are a few invariants in type conversions:
41 * - register width must remain constant:
43 * src_type.width * src_type.length == dst_type.width * dst_type.length
45 * - total number of elements must remain constant:
47 * src_type.length * num_srcs == dst_type.length * num_dsts
49 * It is not always possible to do the conversion both accurately and
50 * efficiently, usually due to lack of adequate machine instructions. In these
51 * cases it is important not to cut shortcuts here and sacrifice accuracy, as
52 * there this functions can be used anywhere. In the future we might have a
53 * precision parameter which can gauge the accuracy vs efficiency compromise,
54 * but for now if the data conversion between two stages happens to be the
55 * bottleneck, then most likely should just avoid converting at all and run
56 * both stages with the same type.
58 * Make sure to run lp_test_conv unit test after any change to this file.
60 * @author Jose Fonseca <jfonseca@vmware.com>
64 #include "util/u_debug.h"
65 #include "util/u_math.h"
67 #include "lp_bld_type.h"
68 #include "lp_bld_const.h"
69 #include "lp_bld_intr.h"
70 #include "lp_bld_arit.h"
71 #include "lp_bld_conv.h"
75 * Special case for converting clamped IEEE-754 floats to unsigned norms.
77 * The mathematical voodoo below may seem excessive but it is actually
78 * paramount we do it this way for several reasons. First, there is no single
79 * precision FP to unsigned integer conversion Intel SSE instruction. Second,
80 * secondly, even if there was, since the FP's mantissa takes only a fraction
81 * of register bits the typically scale and cast approach would require double
82 * precision for accurate results, and therefore half the throughput
84 * Although the result values can be scaled to an arbitrary bit width specified
85 * by dst_width, the actual result type will have the same width.
88 lp_build_clamped_float_to_unsigned_norm(LLVMBuilderRef builder
,
89 union lp_type src_type
,
93 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(src_type
);
97 unsigned long long ubound
;
98 unsigned long long mask
;
102 assert(src_type
.floating
);
104 mantissa
= lp_mantissa(src_type
);
106 /* We cannot carry more bits than the mantissa */
107 n
= MIN2(mantissa
, dst_width
);
109 /* This magic coefficients will make the desired result to appear in the
110 * lowest significant bits of the mantissa.
112 ubound
= ((unsigned long long)1 << n
);
114 scale
= (double)mask
/ubound
;
115 bias
= (double)((unsigned long long)1 << (mantissa
- n
));
117 res
= LLVMBuildMul(builder
, src
, lp_build_const_scalar(src_type
, scale
), "");
118 res
= LLVMBuildAdd(builder
, res
, lp_build_const_scalar(src_type
, bias
), "");
119 res
= LLVMBuildBitCast(builder
, res
, int_vec_type
, "");
122 int shift
= dst_width
- n
;
123 res
= LLVMBuildShl(builder
, res
, lp_build_int_const_scalar(src_type
, shift
), "");
125 /* Fill in the empty lower bits for added precision? */
129 msb
= LLVMBuildLShr(builder
, res
, lp_build_int_const_scalar(src_type
, dst_width
- 1), "");
130 msb
= LLVMBuildShl(builder
, msb
, lp_build_int_const_scalar(src_type
, shift
), "");
131 msb
= LLVMBuildSub(builder
, msb
, lp_build_int_const_scalar(src_type
, 1), "");
132 res
= LLVMBuildOr(builder
, res
, msb
, "");
136 res
= LLVMBuildOr(builder
, res
, LLVMBuildLShr(builder
, res
, lp_build_int_const_scalar(src_type
, n
), ""), "");
143 res
= LLVMBuildAnd(builder
, res
, lp_build_int_const_scalar(src_type
, mask
), "");
150 * Inverse of lp_build_clamped_float_to_unsigned_norm above.
153 lp_build_unsigned_norm_to_float(LLVMBuilderRef builder
,
155 union lp_type dst_type
,
158 LLVMTypeRef vec_type
= lp_build_vec_type(dst_type
);
159 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(dst_type
);
164 unsigned long long ubound
;
165 unsigned long long mask
;
169 mantissa
= lp_mantissa(dst_type
);
171 n
= MIN2(mantissa
, src_width
);
173 ubound
= ((unsigned long long)1 << n
);
175 scale
= (double)ubound
/mask
;
176 bias
= (double)((unsigned long long)1 << (mantissa
- n
));
180 if(src_width
> mantissa
) {
181 int shift
= src_width
- mantissa
;
182 res
= LLVMBuildLShr(builder
, res
, lp_build_int_const_scalar(dst_type
, shift
), "");
185 bias_
= lp_build_const_scalar(dst_type
, bias
);
187 res
= LLVMBuildOr(builder
,
189 LLVMBuildBitCast(builder
, bias_
, int_vec_type
, ""), "");
191 res
= LLVMBuildBitCast(builder
, res
, vec_type
, "");
193 res
= LLVMBuildSub(builder
, res
, bias_
, "");
194 res
= LLVMBuildMul(builder
, res
, lp_build_const_scalar(dst_type
, scale
), "");
201 * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions.
204 lp_build_const_unpack_shuffle(unsigned n
, unsigned lo_hi
)
206 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
209 assert(n
<= LP_MAX_VECTOR_LENGTH
);
212 /* TODO: cache results in a static table */
214 for(i
= 0, j
= lo_hi
*n
/2; i
< n
; i
+= 2, ++j
) {
215 elems
[i
+ 0] = LLVMConstInt(LLVMInt32Type(), 0 + j
, 0);
216 elems
[i
+ 1] = LLVMConstInt(LLVMInt32Type(), n
+ j
, 0);
219 return LLVMConstVector(elems
, n
);
224 * Build shuffle vectors that match PACKxx instructions.
227 lp_build_const_pack_shuffle(unsigned n
)
229 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
232 assert(n
<= LP_MAX_VECTOR_LENGTH
);
234 /* TODO: cache results in a static table */
236 for(i
= 0; i
< n
; ++i
)
237 elems
[i
] = LLVMConstInt(LLVMInt32Type(), 2*i
, 0);
239 return LLVMConstVector(elems
, n
);
244 * Expand the bit width.
246 * This will only change the number of bits the values are represented, not the
250 lp_build_expand(LLVMBuilderRef builder
,
251 union lp_type src_type
,
252 union lp_type dst_type
,
254 LLVMValueRef
*dst
, unsigned num_dsts
)
259 /* Register width must remain constant */
260 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
262 /* We must not loose or gain channels. Only precision */
263 assert(src_type
.length
== dst_type
.length
* num_dsts
);
268 while(src_type
.width
< dst_type
.width
) {
269 union lp_type new_type
= src_type
;
270 LLVMTypeRef new_vec_type
;
273 new_type
.length
/= 2;
274 new_vec_type
= lp_build_vec_type(new_type
);
276 for(i
= num_tmps
; i
--; ) {
278 LLVMValueRef shuffle_lo
;
279 LLVMValueRef shuffle_hi
;
283 zero
= lp_build_zero(src_type
);
284 shuffle_lo
= lp_build_const_unpack_shuffle(src_type
.length
, 0);
285 shuffle_hi
= lp_build_const_unpack_shuffle(src_type
.length
, 1);
287 /* PUNPCKLBW, PUNPCKHBW */
288 lo
= LLVMBuildShuffleVector(builder
, dst
[i
], zero
, shuffle_lo
, "");
289 hi
= LLVMBuildShuffleVector(builder
, dst
[i
], zero
, shuffle_hi
, "");
291 dst
[2*i
+ 0] = LLVMBuildBitCast(builder
, lo
, new_vec_type
, "");
292 dst
[2*i
+ 1] = LLVMBuildBitCast(builder
, hi
, new_vec_type
, "");
300 assert(num_tmps
== num_dsts
);
305 * Non-interleaved pack.
307 * This will move values as
309 * lo = __ l0 __ l1 __ l2 __.. __ ln
310 * hi = __ h0 __ h1 __ h2 __.. __ hn
311 * res = l0 l1 l2 .. ln h0 h1 h2 .. hn
313 * TODO: handle saturation consistently.
316 lp_build_pack2(LLVMBuilderRef builder
,
317 union lp_type src_type
,
318 union lp_type dst_type
,
323 LLVMTypeRef src_vec_type
= lp_build_vec_type(src_type
);
324 LLVMTypeRef dst_vec_type
= lp_build_vec_type(dst_type
);
325 LLVMValueRef shuffle
;
328 /* Register width must remain constant */
329 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
331 /* We must not loose or gain channels. Only precision */
332 assert(src_type
.length
* 2 == dst_type
.length
);
334 assert(!src_type
.floating
);
335 assert(!dst_type
.floating
);
337 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
338 if(src_type
.width
* src_type
.length
== 128) {
339 /* All X86 non-interleaved pack instructions all take signed inputs and
340 * saturate them, so saturate beforehand. */
341 if(!src_type
.sign
&& !clamped
) {
342 struct lp_build_context bld
;
343 unsigned dst_bits
= dst_type
.sign
? dst_type
.width
- 1 : dst_type
.width
;
344 LLVMValueRef dst_max
= lp_build_int_const_scalar(src_type
, ((unsigned long long)1 << dst_bits
) - 1);
345 lp_build_context_init(&bld
, builder
, src_type
);
346 lo
= lp_build_min(&bld
, lo
, dst_max
);
347 hi
= lp_build_min(&bld
, hi
, dst_max
);
350 switch(src_type
.width
) {
353 res
= lp_build_intrinsic_binary(builder
, "llvm.x86.sse2.packssdw.128", src_vec_type
, lo
, hi
);
355 /* PACKUSDW is the only instrinsic with a consistent signature */
356 return lp_build_intrinsic_binary(builder
, "llvm.x86.sse41.packusdw", dst_vec_type
, lo
, hi
);
361 res
= lp_build_intrinsic_binary(builder
, "llvm.x86.sse2.packsswb.128", src_vec_type
, lo
, hi
);
363 res
= lp_build_intrinsic_binary(builder
, "llvm.x86.sse2.packuswb.128", src_vec_type
, lo
, hi
);
368 return LLVMGetUndef(dst_vec_type
);
372 res
= LLVMBuildBitCast(builder
, res
, dst_vec_type
, "");
377 lo
= LLVMBuildBitCast(builder
, lo
, dst_vec_type
, "");
378 hi
= LLVMBuildBitCast(builder
, hi
, dst_vec_type
, "");
380 shuffle
= lp_build_const_pack_shuffle(dst_type
.length
);
382 res
= LLVMBuildShuffleVector(builder
, lo
, hi
, shuffle
, "");
389 * Truncate the bit width.
391 * TODO: Handle saturation consistently.
394 lp_build_trunc(LLVMBuilderRef builder
,
395 union lp_type src_type
,
396 union lp_type dst_type
,
398 const LLVMValueRef
*src
, unsigned num_srcs
)
400 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
403 /* Register width must remain constant */
404 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
406 /* We must not loose or gain channels. Only precision */
407 assert(src_type
.length
* num_srcs
== dst_type
.length
);
409 for(i
= 0; i
< num_srcs
; ++i
)
412 while(src_type
.width
> dst_type
.width
) {
413 union lp_type new_type
= src_type
;
416 new_type
.length
*= 2;
418 /* Take in consideration the sign changes only in the last step */
419 if(new_type
.width
== dst_type
.width
)
420 new_type
.sign
= dst_type
.sign
;
424 for(i
= 0; i
< num_srcs
; ++i
)
425 tmp
[i
] = lp_build_pack2(builder
, src_type
, new_type
, clamped
,
426 tmp
[2*i
+ 0], tmp
[2*i
+ 1]);
431 assert(num_srcs
== 1);
438 * Generic type conversion.
440 * TODO: Take a precision argument, or even better, add a new precision member
441 * to the lp_type union.
444 lp_build_conv(LLVMBuilderRef builder
,
445 union lp_type src_type
,
446 union lp_type dst_type
,
447 const LLVMValueRef
*src
, unsigned num_srcs
,
448 LLVMValueRef
*dst
, unsigned num_dsts
)
450 union lp_type tmp_type
;
451 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
455 /* Register width must remain constant */
456 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
458 /* We must not loose or gain channels. Only precision */
459 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
461 assert(src_type
.length
<= LP_MAX_VECTOR_LENGTH
);
462 assert(dst_type
.length
<= LP_MAX_VECTOR_LENGTH
);
465 for(i
= 0; i
< num_srcs
; ++i
)
473 if(src_type
.value
!= dst_type
.value
) {
474 struct lp_build_context bld
;
475 double src_min
= lp_const_min(src_type
);
476 double dst_min
= lp_const_min(dst_type
);
477 double src_max
= lp_const_max(src_type
);
478 double dst_max
= lp_const_max(dst_type
);
481 lp_build_context_init(&bld
, builder
, tmp_type
);
483 if(src_min
< dst_min
) {
487 thres
= lp_build_const_scalar(src_type
, dst_min
);
488 for(i
= 0; i
< num_tmps
; ++i
)
489 tmp
[i
] = lp_build_max(&bld
, tmp
[i
], thres
);
492 if(src_max
> dst_max
) {
496 thres
= lp_build_const_scalar(src_type
, dst_max
);
497 for(i
= 0; i
< num_tmps
; ++i
)
498 tmp
[i
] = lp_build_min(&bld
, tmp
[i
], thres
);
503 * Scale to the narrowest range
506 if(dst_type
.floating
) {
509 else if(tmp_type
.floating
) {
510 if(!dst_type
.fixed
&& !dst_type
.sign
&& dst_type
.norm
) {
511 for(i
= 0; i
< num_tmps
; ++i
) {
512 tmp
[i
] = lp_build_clamped_float_to_unsigned_norm(builder
,
517 tmp_type
.floating
= FALSE
;
520 double dst_scale
= lp_const_scale(dst_type
);
521 LLVMTypeRef tmp_vec_type
;
523 if (dst_scale
!= 1.0) {
524 LLVMValueRef scale
= lp_build_const_scalar(tmp_type
, dst_scale
);
525 for(i
= 0; i
< num_tmps
; ++i
)
526 tmp
[i
] = LLVMBuildMul(builder
, tmp
[i
], scale
, "");
529 /* Use an equally sized integer for intermediate computations */
530 tmp_type
.floating
= FALSE
;
531 tmp_vec_type
= lp_build_vec_type(tmp_type
);
532 for(i
= 0; i
< num_tmps
; ++i
) {
535 tmp
[i
] = LLVMBuildFPToSI(builder
, tmp
[i
], tmp_vec_type
, "");
537 tmp
[i
] = LLVMBuildFPToUI(builder
, tmp
[i
], tmp_vec_type
, "");
539 /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
540 tmp
[i
] = LLVMBuildFPToSI(builder
, tmp
[i
], tmp_vec_type
, "");
546 unsigned src_shift
= lp_const_shift(src_type
);
547 unsigned dst_shift
= lp_const_shift(dst_type
);
549 /* FIXME: compensate different offsets too */
550 if(src_shift
> dst_shift
) {
551 LLVMValueRef shift
= lp_build_int_const_scalar(tmp_type
, src_shift
- dst_shift
);
552 for(i
= 0; i
< num_tmps
; ++i
)
554 tmp
[i
] = LLVMBuildAShr(builder
, tmp
[i
], shift
, "");
556 tmp
[i
] = LLVMBuildLShr(builder
, tmp
[i
], shift
, "");
561 * Truncate or expand bit width
564 assert(!tmp_type
.floating
|| tmp_type
.width
== dst_type
.width
);
566 if(tmp_type
.width
> dst_type
.width
) {
567 assert(num_dsts
== 1);
568 tmp
[0] = lp_build_trunc(builder
, tmp_type
, dst_type
, TRUE
, tmp
, num_tmps
);
569 tmp_type
.width
= dst_type
.width
;
570 tmp_type
.length
= dst_type
.length
;
574 if(tmp_type
.width
< dst_type
.width
) {
575 assert(num_tmps
== 1);
576 lp_build_expand(builder
, tmp_type
, dst_type
, tmp
[0], tmp
, num_dsts
);
577 tmp_type
.width
= dst_type
.width
;
578 tmp_type
.length
= dst_type
.length
;
582 assert(tmp_type
.width
== dst_type
.width
);
583 assert(tmp_type
.length
== dst_type
.length
);
584 assert(num_tmps
== num_dsts
);
587 * Scale to the widest range
590 if(src_type
.floating
) {
593 else if(!src_type
.floating
&& dst_type
.floating
) {
594 if(!src_type
.fixed
&& !src_type
.sign
&& src_type
.norm
) {
595 for(i
= 0; i
< num_tmps
; ++i
) {
596 tmp
[i
] = lp_build_unsigned_norm_to_float(builder
,
601 tmp_type
.floating
= TRUE
;
604 double src_scale
= lp_const_scale(src_type
);
605 LLVMTypeRef tmp_vec_type
;
607 /* Use an equally sized integer for intermediate computations */
608 tmp_type
.floating
= TRUE
;
609 tmp_type
.sign
= TRUE
;
610 tmp_vec_type
= lp_build_vec_type(tmp_type
);
611 for(i
= 0; i
< num_tmps
; ++i
) {
614 tmp
[i
] = LLVMBuildSIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
616 tmp
[i
] = LLVMBuildUIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
618 /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
619 tmp
[i
] = LLVMBuildSIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
623 if (src_scale
!= 1.0) {
624 LLVMValueRef scale
= lp_build_const_scalar(tmp_type
, 1.0/src_scale
);
625 for(i
= 0; i
< num_tmps
; ++i
)
626 tmp
[i
] = LLVMBuildMul(builder
, tmp
[i
], scale
, "");
631 unsigned src_shift
= lp_const_shift(src_type
);
632 unsigned dst_shift
= lp_const_shift(dst_type
);
634 /* FIXME: compensate different offsets too */
635 if(src_shift
< dst_shift
) {
636 LLVMValueRef shift
= lp_build_int_const_scalar(tmp_type
, dst_shift
- src_shift
);
637 for(i
= 0; i
< num_tmps
; ++i
)
638 tmp
[i
] = LLVMBuildShl(builder
, tmp
[i
], shift
, "");
642 for(i
= 0; i
< num_dsts
; ++i
)
648 * Bit mask conversion.
650 * This will convert the integer masks that match the given types.
652 * The mask values should 0 or -1, i.e., all bits either set to zero or one.
653 * Any other value will likely cause in unpredictable results.
655 * This is basically a very trimmed down version of lp_build_conv.
658 lp_build_conv_mask(LLVMBuilderRef builder
,
659 union lp_type src_type
,
660 union lp_type dst_type
,
661 const LLVMValueRef
*src
, unsigned num_srcs
,
662 LLVMValueRef
*dst
, unsigned num_dsts
)
664 /* Register width must remain constant */
665 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
667 /* We must not loose or gain channels. Only precision */
668 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
673 * We assume all values are 0 or -1
676 src_type
.floating
= FALSE
;
677 src_type
.fixed
= FALSE
;
678 src_type
.sign
= TRUE
;
679 src_type
.norm
= FALSE
;
681 dst_type
.floating
= FALSE
;
682 dst_type
.fixed
= FALSE
;
683 dst_type
.sign
= TRUE
;
684 dst_type
.norm
= FALSE
;
687 * Truncate or expand bit width
690 if(src_type
.width
> dst_type
.width
) {
691 assert(num_dsts
== 1);
692 dst
[0] = lp_build_trunc(builder
, src_type
, dst_type
, TRUE
, src
, num_srcs
);
694 else if(src_type
.width
< dst_type
.width
) {
695 assert(num_srcs
== 1);
696 lp_build_expand(builder
, src_type
, dst_type
, src
[0], dst
, num_dsts
);
699 assert(num_srcs
== num_dsts
);
700 memcpy(dst
, src
, num_dsts
* sizeof *dst
);