1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 * Helper functions for type conversions.
33 * We want to use the fastest type for a given computation whenever feasible.
34 * The other side of this is that we need to be able convert between several
35 * types accurately and efficiently.
37 * Conversion between types of different bit width is quite complex since a
39 * To remember there are a few invariants in type conversions:
41 * - register width must remain constant:
43 * src_type.width * src_type.length == dst_type.width * dst_type.length
45 * - total number of elements must remain constant:
47 * src_type.length * num_srcs == dst_type.length * num_dsts
49 * It is not always possible to do the conversion both accurately and
50 * efficiently, usually due to lack of adequate machine instructions. In these
51 * cases it is important not to cut shortcuts here and sacrifice accuracy, as
52 * there this functions can be used anywhere. In the future we might have a
53 * precision parameter which can gauge the accuracy vs efficiency compromise,
54 * but for now if the data conversion between two stages happens to be the
55 * bottleneck, then most likely should just avoid converting at all and run
56 * both stages with the same type.
58 * Make sure to run lp_test_conv unit test after any change to this file.
60 * @author Jose Fonseca <jfonseca@vmware.com>
64 #include "util/u_debug.h"
65 #include "util/u_math.h"
66 #include "util/u_cpu_detect.h"
68 #include "lp_bld_type.h"
69 #include "lp_bld_const.h"
70 #include "lp_bld_arit.h"
71 #include "lp_bld_pack.h"
72 #include "lp_bld_conv.h"
73 #include "lp_bld_logic.h"
77 * Converts int16 half-float to float32
78 * Note this can be performed in 1 instruction if vcvtph2ps exists (sse5 i think?)
79 * [llvm.x86.vcvtph2ps / _mm_cvtph_ps]
81 * @param src_type <vector> type of int16
82 * @param src value to convert
84 * ref http://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/
87 lp_build_half_to_float(struct gallivm_state
*gallivm
,
88 struct lp_type src_type
,
91 struct lp_type f32_type
= lp_type_float_vec(32, 32 * src_type
.length
);
92 struct lp_type i32_type
= lp_type_int_vec(32, 32 * src_type
.length
);
94 LLVMBuilderRef builder
= gallivm
->builder
;
95 LLVMTypeRef int_vec_type
= lp_build_vec_type(gallivm
, i32_type
);
96 LLVMTypeRef float_vec_type
= lp_build_vec_type(gallivm
, f32_type
);
99 LLVMValueRef i32_13
= lp_build_const_int_vec(gallivm
, i32_type
, 13);
100 LLVMValueRef i32_16
= lp_build_const_int_vec(gallivm
, i32_type
, 16);
101 LLVMValueRef i32_mask_nosign
= lp_build_const_int_vec(gallivm
, i32_type
, 0x7fff);
102 LLVMValueRef i32_was_infnan
= lp_build_const_int_vec(gallivm
, i32_type
, 0x7bff);
103 LLVMValueRef i32_exp_infnan
= lp_build_const_int_vec(gallivm
, i32_type
, 0xff << 23);
104 LLVMValueRef f32_magic
= LLVMBuildBitCast(builder
,
105 lp_build_const_int_vec(gallivm
, i32_type
, (254 - 15) << 23),
108 /* Convert int16 vector to int32 vector by zero ext */
109 LLVMValueRef h
= LLVMBuildZExt(builder
, src
, int_vec_type
, "");
111 /* Exponent / mantissa bits */
112 LLVMValueRef expmant
= LLVMBuildAnd(builder
, i32_mask_nosign
, h
, "");
113 LLVMValueRef shifted
= LLVMBuildBitCast(builder
, LLVMBuildShl(builder
, expmant
, i32_13
, ""), float_vec_type
, "");
115 /* Exponent adjust */
116 LLVMValueRef scaled
= LLVMBuildBitCast(builder
, LLVMBuildFMul(builder
, shifted
, f32_magic
, ""), int_vec_type
, "");
118 /* Make sure Inf/NaN survive */
119 LLVMValueRef b_wasinfnan
= lp_build_compare(gallivm
, i32_type
, PIPE_FUNC_GREATER
, expmant
, i32_was_infnan
);
120 LLVMValueRef infnanexp
= LLVMBuildAnd(builder
, b_wasinfnan
, i32_exp_infnan
, "");
123 LLVMValueRef justsign
= LLVMBuildXor(builder
, h
, expmant
, "");
124 LLVMValueRef sign
= LLVMBuildShl(builder
, justsign
, i32_16
, "");
127 LLVMValueRef sign_inf
= LLVMBuildOr(builder
, sign
, infnanexp
, "");
128 LLVMValueRef final
= LLVMBuildOr(builder
, scaled
, sign_inf
, "");
130 /* Cast from int32 vector to float32 vector */
131 return LLVMBuildBitCast(builder
, final
, float_vec_type
, "");
136 * Special case for converting clamped IEEE-754 floats to unsigned norms.
138 * The mathematical voodoo below may seem excessive but it is actually
139 * paramount we do it this way for several reasons. First, there is no single
140 * precision FP to unsigned integer conversion Intel SSE instruction. Second,
141 * secondly, even if there was, since the FP's mantissa takes only a fraction
142 * of register bits the typically scale and cast approach would require double
143 * precision for accurate results, and therefore half the throughput
145 * Although the result values can be scaled to an arbitrary bit width specified
146 * by dst_width, the actual result type will have the same width.
148 * Ex: src = { float, float, float, float }
149 * return { i32, i32, i32, i32 } where each value is in [0, 2^dst_width-1].
152 lp_build_clamped_float_to_unsigned_norm(struct gallivm_state
*gallivm
,
153 struct lp_type src_type
,
157 LLVMBuilderRef builder
= gallivm
->builder
;
158 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(gallivm
, src_type
);
162 assert(src_type
.floating
);
163 assert(dst_width
<= src_type
.width
);
164 src_type
.sign
= FALSE
;
166 mantissa
= lp_mantissa(src_type
);
168 if (dst_width
<= mantissa
) {
170 * Apply magic coefficients that will make the desired result to appear
171 * in the lowest significant bits of the mantissa, with correct rounding.
173 * This only works if the destination width fits in the mantissa.
176 unsigned long long ubound
;
177 unsigned long long mask
;
181 ubound
= (1ULL << dst_width
);
183 scale
= (double)mask
/ubound
;
184 bias
= (double)(1ULL << (mantissa
- dst_width
));
186 res
= LLVMBuildFMul(builder
, src
, lp_build_const_vec(gallivm
, src_type
, scale
), "");
187 res
= LLVMBuildFAdd(builder
, res
, lp_build_const_vec(gallivm
, src_type
, bias
), "");
188 res
= LLVMBuildBitCast(builder
, res
, int_vec_type
, "");
189 res
= LLVMBuildAnd(builder
, res
,
190 lp_build_const_int_vec(gallivm
, src_type
, mask
), "");
192 else if (dst_width
== (mantissa
+ 1)) {
194 * The destination width matches exactly what can be represented in
195 * floating point (i.e., mantissa + 1 bits). So do a straight
196 * multiplication followed by casting. No further rounding is necessary.
201 scale
= (double)((1ULL << dst_width
) - 1);
203 res
= LLVMBuildFMul(builder
, src
,
204 lp_build_const_vec(gallivm
, src_type
, scale
), "");
205 res
= LLVMBuildFPToSI(builder
, res
, int_vec_type
, "");
209 * The destination exceeds what can be represented in the floating point.
210 * So multiply by the largest power two we get away with, and when
211 * subtract the most significant bit to rescale to normalized values.
213 * The largest power of two factor we can get away is
214 * (1 << (src_type.width - 1)), because we need to use signed . In theory it
215 * should be (1 << (src_type.width - 2)), but IEEE 754 rules states
216 * INT_MIN should be returned in FPToSI, which is the correct result for
219 * This means we get (src_type.width - 1) correct bits for values near 0.0,
220 * and (mantissa + 1) correct bits for values near 1.0. Equally or more
221 * important, we also get exact results for 0.0 and 1.0.
224 unsigned n
= MIN2(src_type
.width
- 1, dst_width
);
226 double scale
= (double)(1ULL << n
);
227 unsigned lshift
= dst_width
- n
;
229 LLVMValueRef lshifted
;
230 LLVMValueRef rshifted
;
232 res
= LLVMBuildFMul(builder
, src
,
233 lp_build_const_vec(gallivm
, src_type
, scale
), "");
234 res
= LLVMBuildFPToSI(builder
, res
, int_vec_type
, "");
237 * Align the most significant bit to its final place.
239 * This will cause 1.0 to overflow to 0, but the later adjustment will
243 lshifted
= LLVMBuildShl(builder
, res
,
244 lp_build_const_int_vec(gallivm
, src_type
,
251 * Align the most significant bit to the right.
253 rshifted
= LLVMBuildLShr(builder
, res
,
254 lp_build_const_int_vec(gallivm
, src_type
, rshift
),
258 * Subtract the MSB to the LSB, therefore re-scaling from
259 * (1 << dst_width) to ((1 << dst_width) - 1).
262 res
= LLVMBuildSub(builder
, lshifted
, rshifted
, "");
270 * Inverse of lp_build_clamped_float_to_unsigned_norm above.
271 * Ex: src = { i32, i32, i32, i32 } with values in range [0, 2^src_width-1]
272 * return {float, float, float, float} with values in range [0, 1].
275 lp_build_unsigned_norm_to_float(struct gallivm_state
*gallivm
,
277 struct lp_type dst_type
,
280 LLVMBuilderRef builder
= gallivm
->builder
;
281 LLVMTypeRef vec_type
= lp_build_vec_type(gallivm
, dst_type
);
282 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(gallivm
, dst_type
);
287 unsigned long long ubound
;
288 unsigned long long mask
;
292 assert(dst_type
.floating
);
294 mantissa
= lp_mantissa(dst_type
);
296 if (src_width
<= (mantissa
+ 1)) {
298 * The source width matches fits what can be represented in floating
299 * point (i.e., mantissa + 1 bits). So do a straight multiplication
300 * followed by casting. No further rounding is necessary.
303 scale
= 1.0/(double)((1ULL << src_width
) - 1);
304 res
= LLVMBuildSIToFP(builder
, src
, vec_type
, "");
305 res
= LLVMBuildFMul(builder
, res
,
306 lp_build_const_vec(gallivm
, dst_type
, scale
), "");
311 * The source width exceeds what can be represented in floating
312 * point. So truncate the incoming values.
315 n
= MIN2(mantissa
, src_width
);
317 ubound
= ((unsigned long long)1 << n
);
319 scale
= (double)ubound
/mask
;
320 bias
= (double)((unsigned long long)1 << (mantissa
- n
));
324 if (src_width
> mantissa
) {
325 int shift
= src_width
- mantissa
;
326 res
= LLVMBuildLShr(builder
, res
,
327 lp_build_const_int_vec(gallivm
, dst_type
, shift
), "");
330 bias_
= lp_build_const_vec(gallivm
, dst_type
, bias
);
332 res
= LLVMBuildOr(builder
,
334 LLVMBuildBitCast(builder
, bias_
, int_vec_type
, ""), "");
336 res
= LLVMBuildBitCast(builder
, res
, vec_type
, "");
338 res
= LLVMBuildFSub(builder
, res
, bias_
, "");
339 res
= LLVMBuildFMul(builder
, res
, lp_build_const_vec(gallivm
, dst_type
, scale
), "");
347 * Generic type conversion.
349 * TODO: Take a precision argument, or even better, add a new precision member
350 * to the lp_type union.
353 lp_build_conv(struct gallivm_state
*gallivm
,
354 struct lp_type src_type
,
355 struct lp_type dst_type
,
356 const LLVMValueRef
*src
, unsigned num_srcs
,
357 LLVMValueRef
*dst
, unsigned num_dsts
)
359 LLVMBuilderRef builder
= gallivm
->builder
;
360 struct lp_type tmp_type
;
361 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
365 /* We must not loose or gain channels. Only precision */
366 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
368 assert(src_type
.length
<= LP_MAX_VECTOR_LENGTH
);
369 assert(dst_type
.length
<= LP_MAX_VECTOR_LENGTH
);
370 assert(num_srcs
<= LP_MAX_VECTOR_LENGTH
);
371 assert(num_dsts
<= LP_MAX_VECTOR_LENGTH
);
374 for(i
= 0; i
< num_srcs
; ++i
) {
375 assert(lp_check_value(src_type
, src
[i
]));
381 /* Special case 4x4f --> 1x16ub
383 if (src_type
.floating
== 1 &&
384 src_type
.fixed
== 0 &&
385 src_type
.sign
== 1 &&
386 src_type
.norm
== 0 &&
387 src_type
.width
== 32 &&
388 src_type
.length
== 4 &&
390 dst_type
.floating
== 0 &&
391 dst_type
.fixed
== 0 &&
392 dst_type
.sign
== 0 &&
393 dst_type
.norm
== 1 &&
394 dst_type
.width
== 8 &&
395 dst_type
.length
== 16 &&
397 4 * num_dsts
== num_srcs
&&
399 util_cpu_caps
.has_sse2
)
401 struct lp_build_context bld
;
402 struct lp_type int16_type
= dst_type
;
403 struct lp_type int32_type
= dst_type
;
404 LLVMValueRef const_255f
;
407 lp_build_context_init(&bld
, gallivm
, src_type
);
409 int16_type
.width
*= 2;
410 int16_type
.length
/= 2;
413 int32_type
.width
*= 4;
414 int32_type
.length
/= 4;
417 const_255f
= lp_build_const_vec(gallivm
, src_type
, 255.0f
);
419 for (i
= 0; i
< num_dsts
; ++i
, src
+= 4) {
422 for (j
= 0; j
< 4; ++j
) {
423 tmp
[j
] = LLVMBuildFMul(builder
, src
[j
], const_255f
, "");
424 tmp
[j
] = lp_build_iround(&bld
, tmp
[j
]);
427 /* relying on clamping behavior of sse2 intrinsics here */
428 lo
= lp_build_pack2(gallivm
, int32_type
, int16_type
, tmp
[0], tmp
[1]);
429 hi
= lp_build_pack2(gallivm
, int32_type
, int16_type
, tmp
[2], tmp
[3]);
430 dst
[i
] = lp_build_pack2(gallivm
, int16_type
, dst_type
, lo
, hi
);
436 /* Special case 2x8f --> 1x16ub
438 else if (src_type
.floating
== 1 &&
439 src_type
.fixed
== 0 &&
440 src_type
.sign
== 1 &&
441 src_type
.norm
== 0 &&
442 src_type
.width
== 32 &&
443 src_type
.length
== 8 &&
445 dst_type
.floating
== 0 &&
446 dst_type
.fixed
== 0 &&
447 dst_type
.sign
== 0 &&
448 dst_type
.norm
== 1 &&
449 dst_type
.width
== 8 &&
450 dst_type
.length
== 16 &&
452 2 * num_dsts
== num_srcs
&&
454 util_cpu_caps
.has_avx
) {
456 struct lp_build_context bld
;
457 struct lp_type int16_type
= dst_type
;
458 struct lp_type int32_type
= dst_type
;
459 LLVMValueRef const_255f
;
462 lp_build_context_init(&bld
, gallivm
, src_type
);
464 int16_type
.width
*= 2;
465 int16_type
.length
/= 2;
468 int32_type
.width
*= 4;
469 int32_type
.length
/= 4;
472 const_255f
= lp_build_const_vec(gallivm
, src_type
, 255.0f
);
474 for (i
= 0; i
< num_dsts
; ++i
, src
+= 2) {
475 LLVMValueRef lo
, hi
, a
, b
;
477 a
= LLVMBuildFMul(builder
, src
[0], const_255f
, "");
478 b
= LLVMBuildFMul(builder
, src
[1], const_255f
, "");
480 a
= lp_build_iround(&bld
, a
);
481 b
= lp_build_iround(&bld
, b
);
483 tmp
[0] = lp_build_extract_range(gallivm
, a
, 0, 4);
484 tmp
[1] = lp_build_extract_range(gallivm
, a
, 4, 4);
485 tmp
[2] = lp_build_extract_range(gallivm
, b
, 0, 4);
486 tmp
[3] = lp_build_extract_range(gallivm
, b
, 4, 4);
488 /* relying on clamping behavior of sse2 intrinsics here */
489 lo
= lp_build_pack2(gallivm
, int32_type
, int16_type
, tmp
[0], tmp
[1]);
490 hi
= lp_build_pack2(gallivm
, int32_type
, int16_type
, tmp
[2], tmp
[3]);
491 dst
[i
] = lp_build_pack2(gallivm
, int16_type
, dst_type
, lo
, hi
);
496 /* Pre convert half-floats to floats
498 else if (src_type
.floating
&& src_type
.width
== 16)
500 for(i
= 0; i
< num_tmps
; ++i
)
501 tmp
[i
] = lp_build_half_to_float(gallivm
, src_type
, tmp
[i
]);
510 if(memcmp(&src_type
, &dst_type
, sizeof src_type
) != 0) {
511 struct lp_build_context bld
;
512 double src_min
= lp_const_min(src_type
);
513 double dst_min
= lp_const_min(dst_type
);
514 double src_max
= lp_const_max(src_type
);
515 double dst_max
= lp_const_max(dst_type
);
518 lp_build_context_init(&bld
, gallivm
, tmp_type
);
520 if(src_min
< dst_min
) {
524 thres
= lp_build_const_vec(gallivm
, src_type
, dst_min
);
525 for(i
= 0; i
< num_tmps
; ++i
)
526 tmp
[i
] = lp_build_max(&bld
, tmp
[i
], thres
);
529 if(src_max
> dst_max
) {
533 thres
= lp_build_const_vec(gallivm
, src_type
, dst_max
);
534 for(i
= 0; i
< num_tmps
; ++i
)
535 tmp
[i
] = lp_build_min(&bld
, tmp
[i
], thres
);
540 * Scale to the narrowest range
543 if(dst_type
.floating
) {
546 else if(tmp_type
.floating
) {
547 if(!dst_type
.fixed
&& !dst_type
.sign
&& dst_type
.norm
) {
548 for(i
= 0; i
< num_tmps
; ++i
) {
549 tmp
[i
] = lp_build_clamped_float_to_unsigned_norm(gallivm
,
554 tmp_type
.floating
= FALSE
;
557 double dst_scale
= lp_const_scale(dst_type
);
558 LLVMTypeRef tmp_vec_type
;
560 if (dst_scale
!= 1.0) {
561 LLVMValueRef scale
= lp_build_const_vec(gallivm
, tmp_type
, dst_scale
);
562 for(i
= 0; i
< num_tmps
; ++i
)
563 tmp
[i
] = LLVMBuildFMul(builder
, tmp
[i
], scale
, "");
566 /* Use an equally sized integer for intermediate computations */
567 tmp_type
.floating
= FALSE
;
568 tmp_vec_type
= lp_build_vec_type(gallivm
, tmp_type
);
569 for(i
= 0; i
< num_tmps
; ++i
) {
572 tmp
[i
] = LLVMBuildFPToSI(builder
, tmp
[i
], tmp_vec_type
, "");
574 tmp
[i
] = LLVMBuildFPToUI(builder
, tmp
[i
], tmp_vec_type
, "");
576 /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
577 tmp
[i
] = LLVMBuildFPToSI(builder
, tmp
[i
], tmp_vec_type
, "");
583 unsigned src_shift
= lp_const_shift(src_type
);
584 unsigned dst_shift
= lp_const_shift(dst_type
);
585 unsigned src_offset
= lp_const_offset(src_type
);
586 unsigned dst_offset
= lp_const_offset(dst_type
);
588 /* Compensate for different offsets */
589 if (dst_offset
> src_offset
&& src_type
.width
> dst_type
.width
) {
590 for (i
= 0; i
< num_tmps
; ++i
) {
591 LLVMValueRef shifted
;
592 LLVMValueRef shift
= lp_build_const_int_vec(gallivm
, tmp_type
, src_shift
- 1);
594 shifted
= LLVMBuildAShr(builder
, tmp
[i
], shift
, "");
596 shifted
= LLVMBuildLShr(builder
, tmp
[i
], shift
, "");
598 tmp
[i
] = LLVMBuildSub(builder
, tmp
[i
], shifted
, "");
602 if(src_shift
> dst_shift
) {
603 LLVMValueRef shift
= lp_build_const_int_vec(gallivm
, tmp_type
,
604 src_shift
- dst_shift
);
605 for(i
= 0; i
< num_tmps
; ++i
)
607 tmp
[i
] = LLVMBuildAShr(builder
, tmp
[i
], shift
, "");
609 tmp
[i
] = LLVMBuildLShr(builder
, tmp
[i
], shift
, "");
614 * Truncate or expand bit width
616 * No data conversion should happen here, although the sign bits are
617 * crucial to avoid bad clamping.
621 struct lp_type new_type
;
624 new_type
.sign
= dst_type
.sign
;
625 new_type
.width
= dst_type
.width
;
626 new_type
.length
= dst_type
.length
;
628 lp_build_resize(gallivm
, tmp_type
, new_type
, tmp
, num_srcs
, tmp
, num_dsts
);
635 * Scale to the widest range
638 if(src_type
.floating
) {
641 else if(!src_type
.floating
&& dst_type
.floating
) {
642 if(!src_type
.fixed
&& !src_type
.sign
&& src_type
.norm
) {
643 for(i
= 0; i
< num_tmps
; ++i
) {
644 tmp
[i
] = lp_build_unsigned_norm_to_float(gallivm
,
649 tmp_type
.floating
= TRUE
;
652 double src_scale
= lp_const_scale(src_type
);
653 LLVMTypeRef tmp_vec_type
;
655 /* Use an equally sized integer for intermediate computations */
656 tmp_type
.floating
= TRUE
;
657 tmp_type
.sign
= TRUE
;
658 tmp_vec_type
= lp_build_vec_type(gallivm
, tmp_type
);
659 for(i
= 0; i
< num_tmps
; ++i
) {
662 tmp
[i
] = LLVMBuildSIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
664 tmp
[i
] = LLVMBuildUIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
666 /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
667 tmp
[i
] = LLVMBuildSIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
671 if (src_scale
!= 1.0) {
672 LLVMValueRef scale
= lp_build_const_vec(gallivm
, tmp_type
, 1.0/src_scale
);
673 for(i
= 0; i
< num_tmps
; ++i
)
674 tmp
[i
] = LLVMBuildFMul(builder
, tmp
[i
], scale
, "");
679 unsigned src_shift
= lp_const_shift(src_type
);
680 unsigned dst_shift
= lp_const_shift(dst_type
);
681 unsigned src_offset
= lp_const_offset(src_type
);
682 unsigned dst_offset
= lp_const_offset(dst_type
);
684 if (src_shift
< dst_shift
) {
685 LLVMValueRef pre_shift
[LP_MAX_VECTOR_LENGTH
];
686 LLVMValueRef shift
= lp_build_const_int_vec(gallivm
, tmp_type
, dst_shift
- src_shift
);
688 for (i
= 0; i
< num_tmps
; ++i
) {
689 pre_shift
[i
] = tmp
[i
];
690 tmp
[i
] = LLVMBuildShl(builder
, tmp
[i
], shift
, "");
693 /* Compensate for different offsets */
694 if (dst_offset
> src_offset
) {
695 for (i
= 0; i
< num_tmps
; ++i
) {
696 tmp
[i
] = LLVMBuildSub(builder
, tmp
[i
], pre_shift
[i
], "");
702 for(i
= 0; i
< num_dsts
; ++i
) {
704 assert(lp_check_value(dst_type
, dst
[i
]));
710 * Bit mask conversion.
712 * This will convert the integer masks that match the given types.
714 * The mask values should 0 or -1, i.e., all bits either set to zero or one.
715 * Any other value will likely cause unpredictable results.
717 * This is basically a very trimmed down version of lp_build_conv.
720 lp_build_conv_mask(struct gallivm_state
*gallivm
,
721 struct lp_type src_type
,
722 struct lp_type dst_type
,
723 const LLVMValueRef
*src
, unsigned num_srcs
,
724 LLVMValueRef
*dst
, unsigned num_dsts
)
727 /* We must not loose or gain channels. Only precision */
728 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
733 * We assume all values are 0 or -1
736 src_type
.floating
= FALSE
;
737 src_type
.fixed
= FALSE
;
738 src_type
.sign
= TRUE
;
739 src_type
.norm
= FALSE
;
741 dst_type
.floating
= FALSE
;
742 dst_type
.fixed
= FALSE
;
743 dst_type
.sign
= TRUE
;
744 dst_type
.norm
= FALSE
;
747 * Truncate or expand bit width
750 lp_build_resize(gallivm
, src_type
, dst_type
, src
, num_srcs
, dst
, num_dsts
);