Merge branch 'mesa_7_6_branch'
[mesa.git] / src / gallium / drivers / llvmpipe / lp_bld_conv.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper functions for type conversions.
32 *
33 * We want to use the fastest type for a given computation whenever feasible.
34 * The other side of this is that we need to be able convert between several
35 * types accurately and efficiently.
36 *
37 * Conversion between types of different bit width is quite complex since a
38 *
39 * To remember there are a few invariants in type conversions:
40 *
41 * - register width must remain constant:
42 *
43 * src_type.width * src_type.length == dst_type.width * dst_type.length
44 *
45 * - total number of elements must remain constant:
46 *
47 * src_type.length * num_srcs == dst_type.length * num_dsts
48 *
49 * It is not always possible to do the conversion both accurately and
50 * efficiently, usually due to lack of adequate machine instructions. In these
51 * cases it is important not to cut shortcuts here and sacrifice accuracy, as
52 * there this functions can be used anywhere. In the future we might have a
53 * precision parameter which can gauge the accuracy vs efficiency compromise,
54 * but for now if the data conversion between two stages happens to be the
55 * bottleneck, then most likely should just avoid converting at all and run
56 * both stages with the same type.
57 *
58 * Make sure to run lp_test_conv unit test after any change to this file.
59 *
60 * @author Jose Fonseca <jfonseca@vmware.com>
61 */
62
63
64 #include "util/u_debug.h"
65 #include "util/u_math.h"
66
67 #include "lp_bld_type.h"
68 #include "lp_bld_const.h"
69 #include "lp_bld_intr.h"
70 #include "lp_bld_arit.h"
71 #include "lp_bld_conv.h"
72
73
74 /**
75 * Special case for converting clamped IEEE-754 floats to unsigned norms.
76 *
77 * The mathematical voodoo below may seem excessive but it is actually
78 * paramount we do it this way for several reasons. First, there is no single
79 * precision FP to unsigned integer conversion Intel SSE instruction. Second,
80 * secondly, even if there was, since the FP's mantissa takes only a fraction
81 * of register bits the typically scale and cast approach would require double
82 * precision for accurate results, and therefore half the throughput
83 *
84 * Although the result values can be scaled to an arbitrary bit width specified
85 * by dst_width, the actual result type will have the same width.
86 */
87 LLVMValueRef
88 lp_build_clamped_float_to_unsigned_norm(LLVMBuilderRef builder,
89 struct lp_type src_type,
90 unsigned dst_width,
91 LLVMValueRef src)
92 {
93 LLVMTypeRef int_vec_type = lp_build_int_vec_type(src_type);
94 LLVMValueRef res;
95 unsigned mantissa;
96 unsigned n;
97 unsigned long long ubound;
98 unsigned long long mask;
99 double scale;
100 double bias;
101
102 assert(src_type.floating);
103
104 mantissa = lp_mantissa(src_type);
105
106 /* We cannot carry more bits than the mantissa */
107 n = MIN2(mantissa, dst_width);
108
109 /* This magic coefficients will make the desired result to appear in the
110 * lowest significant bits of the mantissa.
111 */
112 ubound = ((unsigned long long)1 << n);
113 mask = ubound - 1;
114 scale = (double)mask/ubound;
115 bias = (double)((unsigned long long)1 << (mantissa - n));
116
117 res = LLVMBuildMul(builder, src, lp_build_const_scalar(src_type, scale), "");
118 res = LLVMBuildAdd(builder, res, lp_build_const_scalar(src_type, bias), "");
119 res = LLVMBuildBitCast(builder, res, int_vec_type, "");
120
121 if(dst_width > n) {
122 int shift = dst_width - n;
123 res = LLVMBuildShl(builder, res, lp_build_int_const_scalar(src_type, shift), "");
124
125 /* TODO: Fill in the empty lower bits for additional precision? */
126 #if 0
127 {
128 LLVMValueRef msb;
129 msb = LLVMBuildLShr(builder, res, lp_build_int_const_scalar(src_type, dst_width - 1), "");
130 msb = LLVMBuildShl(builder, msb, lp_build_int_const_scalar(src_type, shift), "");
131 msb = LLVMBuildSub(builder, msb, lp_build_int_const_scalar(src_type, 1), "");
132 res = LLVMBuildOr(builder, res, msb, "");
133 }
134 #elif 0
135 while(shift > 0) {
136 res = LLVMBuildOr(builder, res, LLVMBuildLShr(builder, res, lp_build_int_const_scalar(src_type, n), ""), "");
137 shift -= n;
138 n *= 2;
139 }
140 #endif
141 }
142 else
143 res = LLVMBuildAnd(builder, res, lp_build_int_const_scalar(src_type, mask), "");
144
145 return res;
146 }
147
148
149 /**
150 * Inverse of lp_build_clamped_float_to_unsigned_norm above.
151 */
152 LLVMValueRef
153 lp_build_unsigned_norm_to_float(LLVMBuilderRef builder,
154 unsigned src_width,
155 struct lp_type dst_type,
156 LLVMValueRef src)
157 {
158 LLVMTypeRef vec_type = lp_build_vec_type(dst_type);
159 LLVMTypeRef int_vec_type = lp_build_int_vec_type(dst_type);
160 LLVMValueRef bias_;
161 LLVMValueRef res;
162 unsigned mantissa;
163 unsigned n;
164 unsigned long long ubound;
165 unsigned long long mask;
166 double scale;
167 double bias;
168
169 mantissa = lp_mantissa(dst_type);
170
171 n = MIN2(mantissa, src_width);
172
173 ubound = ((unsigned long long)1 << n);
174 mask = ubound - 1;
175 scale = (double)ubound/mask;
176 bias = (double)((unsigned long long)1 << (mantissa - n));
177
178 res = src;
179
180 if(src_width > mantissa) {
181 int shift = src_width - mantissa;
182 res = LLVMBuildLShr(builder, res, lp_build_int_const_scalar(dst_type, shift), "");
183 }
184
185 bias_ = lp_build_const_scalar(dst_type, bias);
186
187 res = LLVMBuildOr(builder,
188 res,
189 LLVMBuildBitCast(builder, bias_, int_vec_type, ""), "");
190
191 res = LLVMBuildBitCast(builder, res, vec_type, "");
192
193 res = LLVMBuildSub(builder, res, bias_, "");
194 res = LLVMBuildMul(builder, res, lp_build_const_scalar(dst_type, scale), "");
195
196 return res;
197 }
198
199
200 /**
201 * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions.
202 */
203 static LLVMValueRef
204 lp_build_const_unpack_shuffle(unsigned n, unsigned lo_hi)
205 {
206 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
207 unsigned i, j;
208
209 assert(n <= LP_MAX_VECTOR_LENGTH);
210 assert(lo_hi < 2);
211
212 /* TODO: cache results in a static table */
213
214 for(i = 0, j = lo_hi*n/2; i < n; i += 2, ++j) {
215 elems[i + 0] = LLVMConstInt(LLVMInt32Type(), 0 + j, 0);
216 elems[i + 1] = LLVMConstInt(LLVMInt32Type(), n + j, 0);
217 }
218
219 return LLVMConstVector(elems, n);
220 }
221
222
223 /**
224 * Build shuffle vectors that match PACKxx instructions.
225 */
226 static LLVMValueRef
227 lp_build_const_pack_shuffle(unsigned n)
228 {
229 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
230 unsigned i;
231
232 assert(n <= LP_MAX_VECTOR_LENGTH);
233
234 /* TODO: cache results in a static table */
235
236 for(i = 0; i < n; ++i)
237 elems[i] = LLVMConstInt(LLVMInt32Type(), 2*i, 0);
238
239 return LLVMConstVector(elems, n);
240 }
241
242
243 /**
244 * Expand the bit width.
245 *
246 * This will only change the number of bits the values are represented, not the
247 * values themselves.
248 */
249 static void
250 lp_build_expand(LLVMBuilderRef builder,
251 struct lp_type src_type,
252 struct lp_type dst_type,
253 LLVMValueRef src,
254 LLVMValueRef *dst, unsigned num_dsts)
255 {
256 unsigned num_tmps;
257 unsigned i;
258
259 /* Register width must remain constant */
260 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
261
262 /* We must not loose or gain channels. Only precision */
263 assert(src_type.length == dst_type.length * num_dsts);
264
265 num_tmps = 1;
266 dst[0] = src;
267
268 while(src_type.width < dst_type.width) {
269 struct lp_type new_type = src_type;
270 LLVMTypeRef new_vec_type;
271
272 new_type.width *= 2;
273 new_type.length /= 2;
274 new_vec_type = lp_build_vec_type(new_type);
275
276 for(i = num_tmps; i--; ) {
277 LLVMValueRef zero;
278 LLVMValueRef shuffle_lo;
279 LLVMValueRef shuffle_hi;
280 LLVMValueRef lo;
281 LLVMValueRef hi;
282
283 zero = lp_build_zero(src_type);
284 shuffle_lo = lp_build_const_unpack_shuffle(src_type.length, 0);
285 shuffle_hi = lp_build_const_unpack_shuffle(src_type.length, 1);
286
287 /* PUNPCKLBW, PUNPCKHBW */
288 lo = LLVMBuildShuffleVector(builder, dst[i], zero, shuffle_lo, "");
289 hi = LLVMBuildShuffleVector(builder, dst[i], zero, shuffle_hi, "");
290
291 dst[2*i + 0] = LLVMBuildBitCast(builder, lo, new_vec_type, "");
292 dst[2*i + 1] = LLVMBuildBitCast(builder, hi, new_vec_type, "");
293 }
294
295 src_type = new_type;
296
297 num_tmps *= 2;
298 }
299
300 assert(num_tmps == num_dsts);
301 }
302
303
304 /**
305 * Non-interleaved pack.
306 *
307 * This will move values as
308 *
309 * lo = __ l0 __ l1 __ l2 __.. __ ln
310 * hi = __ h0 __ h1 __ h2 __.. __ hn
311 * res = l0 l1 l2 .. ln h0 h1 h2 .. hn
312 *
313 * TODO: handle saturation consistently.
314 */
315 static LLVMValueRef
316 lp_build_pack2(LLVMBuilderRef builder,
317 struct lp_type src_type,
318 struct lp_type dst_type,
319 boolean clamped,
320 LLVMValueRef lo,
321 LLVMValueRef hi)
322 {
323 LLVMTypeRef src_vec_type = lp_build_vec_type(src_type);
324 LLVMTypeRef dst_vec_type = lp_build_vec_type(dst_type);
325 LLVMValueRef shuffle;
326 LLVMValueRef res;
327
328 /* Register width must remain constant */
329 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
330
331 /* We must not loose or gain channels. Only precision */
332 assert(src_type.length * 2 == dst_type.length);
333
334 assert(!src_type.floating);
335 assert(!dst_type.floating);
336
337 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
338 if(src_type.width * src_type.length == 128) {
339 /* All X86 non-interleaved pack instructions all take signed inputs and
340 * saturate them, so saturate beforehand. */
341 if(!src_type.sign && !clamped) {
342 struct lp_build_context bld;
343 unsigned dst_bits = dst_type.sign ? dst_type.width - 1 : dst_type.width;
344 LLVMValueRef dst_max = lp_build_int_const_scalar(src_type, ((unsigned long long)1 << dst_bits) - 1);
345 lp_build_context_init(&bld, builder, src_type);
346 lo = lp_build_min(&bld, lo, dst_max);
347 hi = lp_build_min(&bld, hi, dst_max);
348 }
349
350 switch(src_type.width) {
351 case 32:
352 if(dst_type.sign)
353 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packssdw.128", src_vec_type, lo, hi);
354 else
355 /* PACKUSDW is the only instrinsic with a consistent signature */
356 return lp_build_intrinsic_binary(builder, "llvm.x86.sse41.packusdw", dst_vec_type, lo, hi);
357 break;
358
359 case 16:
360 if(dst_type.sign)
361 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packsswb.128", src_vec_type, lo, hi);
362 else
363 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packuswb.128", src_vec_type, lo, hi);
364 break;
365
366 default:
367 assert(0);
368 return LLVMGetUndef(dst_vec_type);
369 break;
370 }
371
372 res = LLVMBuildBitCast(builder, res, dst_vec_type, "");
373 return res;
374 }
375 #endif
376
377 lo = LLVMBuildBitCast(builder, lo, dst_vec_type, "");
378 hi = LLVMBuildBitCast(builder, hi, dst_vec_type, "");
379
380 shuffle = lp_build_const_pack_shuffle(dst_type.length);
381
382 res = LLVMBuildShuffleVector(builder, lo, hi, shuffle, "");
383
384 return res;
385 }
386
387
388 /**
389 * Truncate the bit width.
390 *
391 * TODO: Handle saturation consistently.
392 */
393 static LLVMValueRef
394 lp_build_pack(LLVMBuilderRef builder,
395 struct lp_type src_type,
396 struct lp_type dst_type,
397 boolean clamped,
398 const LLVMValueRef *src, unsigned num_srcs)
399 {
400 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
401 unsigned i;
402
403 /* Register width must remain constant */
404 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
405
406 /* We must not loose or gain channels. Only precision */
407 assert(src_type.length * num_srcs == dst_type.length);
408
409 for(i = 0; i < num_srcs; ++i)
410 tmp[i] = src[i];
411
412 while(src_type.width > dst_type.width) {
413 struct lp_type new_type = src_type;
414
415 new_type.width /= 2;
416 new_type.length *= 2;
417
418 /* Take in consideration the sign changes only in the last step */
419 if(new_type.width == dst_type.width)
420 new_type.sign = dst_type.sign;
421
422 num_srcs /= 2;
423
424 for(i = 0; i < num_srcs; ++i)
425 tmp[i] = lp_build_pack2(builder, src_type, new_type, clamped,
426 tmp[2*i + 0], tmp[2*i + 1]);
427
428 src_type = new_type;
429 }
430
431 assert(num_srcs == 1);
432
433 return tmp[0];
434 }
435
436
437 /**
438 * Generic type conversion.
439 *
440 * TODO: Take a precision argument, or even better, add a new precision member
441 * to the lp_type union.
442 */
443 void
444 lp_build_conv(LLVMBuilderRef builder,
445 struct lp_type src_type,
446 struct lp_type dst_type,
447 const LLVMValueRef *src, unsigned num_srcs,
448 LLVMValueRef *dst, unsigned num_dsts)
449 {
450 struct lp_type tmp_type;
451 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
452 unsigned num_tmps;
453 unsigned i;
454
455 /* Register width must remain constant */
456 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
457
458 /* We must not loose or gain channels. Only precision */
459 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
460
461 assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
462 assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
463
464 tmp_type = src_type;
465 for(i = 0; i < num_srcs; ++i)
466 tmp[i] = src[i];
467 num_tmps = num_srcs;
468
469 /*
470 * Clamp if necessary
471 */
472
473 if(memcmp(&src_type, &dst_type, sizeof src_type) != 0) {
474 struct lp_build_context bld;
475 double src_min = lp_const_min(src_type);
476 double dst_min = lp_const_min(dst_type);
477 double src_max = lp_const_max(src_type);
478 double dst_max = lp_const_max(dst_type);
479 LLVMValueRef thres;
480
481 lp_build_context_init(&bld, builder, tmp_type);
482
483 if(src_min < dst_min) {
484 if(dst_min == 0.0)
485 thres = bld.zero;
486 else
487 thres = lp_build_const_scalar(src_type, dst_min);
488 for(i = 0; i < num_tmps; ++i)
489 tmp[i] = lp_build_max(&bld, tmp[i], thres);
490 }
491
492 if(src_max > dst_max) {
493 if(dst_max == 1.0)
494 thres = bld.one;
495 else
496 thres = lp_build_const_scalar(src_type, dst_max);
497 for(i = 0; i < num_tmps; ++i)
498 tmp[i] = lp_build_min(&bld, tmp[i], thres);
499 }
500 }
501
502 /*
503 * Scale to the narrowest range
504 */
505
506 if(dst_type.floating) {
507 /* Nothing to do */
508 }
509 else if(tmp_type.floating) {
510 if(!dst_type.fixed && !dst_type.sign && dst_type.norm) {
511 for(i = 0; i < num_tmps; ++i) {
512 tmp[i] = lp_build_clamped_float_to_unsigned_norm(builder,
513 tmp_type,
514 dst_type.width,
515 tmp[i]);
516 }
517 tmp_type.floating = FALSE;
518 }
519 else {
520 double dst_scale = lp_const_scale(dst_type);
521 LLVMTypeRef tmp_vec_type;
522
523 if (dst_scale != 1.0) {
524 LLVMValueRef scale = lp_build_const_scalar(tmp_type, dst_scale);
525 for(i = 0; i < num_tmps; ++i)
526 tmp[i] = LLVMBuildMul(builder, tmp[i], scale, "");
527 }
528
529 /* Use an equally sized integer for intermediate computations */
530 tmp_type.floating = FALSE;
531 tmp_vec_type = lp_build_vec_type(tmp_type);
532 for(i = 0; i < num_tmps; ++i) {
533 #if 0
534 if(dst_type.sign)
535 tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
536 else
537 tmp[i] = LLVMBuildFPToUI(builder, tmp[i], tmp_vec_type, "");
538 #else
539 /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
540 tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
541 #endif
542 }
543 }
544 }
545 else {
546 unsigned src_shift = lp_const_shift(src_type);
547 unsigned dst_shift = lp_const_shift(dst_type);
548
549 /* FIXME: compensate different offsets too */
550 if(src_shift > dst_shift) {
551 LLVMValueRef shift = lp_build_int_const_scalar(tmp_type, src_shift - dst_shift);
552 for(i = 0; i < num_tmps; ++i)
553 if(src_type.sign)
554 tmp[i] = LLVMBuildAShr(builder, tmp[i], shift, "");
555 else
556 tmp[i] = LLVMBuildLShr(builder, tmp[i], shift, "");
557 }
558 }
559
560 /*
561 * Truncate or expand bit width
562 */
563
564 assert(!tmp_type.floating || tmp_type.width == dst_type.width);
565
566 if(tmp_type.width > dst_type.width) {
567 assert(num_dsts == 1);
568 tmp[0] = lp_build_pack(builder, tmp_type, dst_type, TRUE, tmp, num_tmps);
569 tmp_type.width = dst_type.width;
570 tmp_type.length = dst_type.length;
571 num_tmps = 1;
572 }
573
574 if(tmp_type.width < dst_type.width) {
575 assert(num_tmps == 1);
576 lp_build_expand(builder, tmp_type, dst_type, tmp[0], tmp, num_dsts);
577 tmp_type.width = dst_type.width;
578 tmp_type.length = dst_type.length;
579 num_tmps = num_dsts;
580 }
581
582 assert(tmp_type.width == dst_type.width);
583 assert(tmp_type.length == dst_type.length);
584 assert(num_tmps == num_dsts);
585
586 /*
587 * Scale to the widest range
588 */
589
590 if(src_type.floating) {
591 /* Nothing to do */
592 }
593 else if(!src_type.floating && dst_type.floating) {
594 if(!src_type.fixed && !src_type.sign && src_type.norm) {
595 for(i = 0; i < num_tmps; ++i) {
596 tmp[i] = lp_build_unsigned_norm_to_float(builder,
597 src_type.width,
598 dst_type,
599 tmp[i]);
600 }
601 tmp_type.floating = TRUE;
602 }
603 else {
604 double src_scale = lp_const_scale(src_type);
605 LLVMTypeRef tmp_vec_type;
606
607 /* Use an equally sized integer for intermediate computations */
608 tmp_type.floating = TRUE;
609 tmp_type.sign = TRUE;
610 tmp_vec_type = lp_build_vec_type(tmp_type);
611 for(i = 0; i < num_tmps; ++i) {
612 #if 0
613 if(dst_type.sign)
614 tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
615 else
616 tmp[i] = LLVMBuildUIToFP(builder, tmp[i], tmp_vec_type, "");
617 #else
618 /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
619 tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
620 #endif
621 }
622
623 if (src_scale != 1.0) {
624 LLVMValueRef scale = lp_build_const_scalar(tmp_type, 1.0/src_scale);
625 for(i = 0; i < num_tmps; ++i)
626 tmp[i] = LLVMBuildMul(builder, tmp[i], scale, "");
627 }
628 }
629 }
630 else {
631 unsigned src_shift = lp_const_shift(src_type);
632 unsigned dst_shift = lp_const_shift(dst_type);
633
634 /* FIXME: compensate different offsets too */
635 if(src_shift < dst_shift) {
636 LLVMValueRef shift = lp_build_int_const_scalar(tmp_type, dst_shift - src_shift);
637 for(i = 0; i < num_tmps; ++i)
638 tmp[i] = LLVMBuildShl(builder, tmp[i], shift, "");
639 }
640 }
641
642 for(i = 0; i < num_dsts; ++i)
643 dst[i] = tmp[i];
644 }
645
646
647 /**
648 * Bit mask conversion.
649 *
650 * This will convert the integer masks that match the given types.
651 *
652 * The mask values should 0 or -1, i.e., all bits either set to zero or one.
653 * Any other value will likely cause in unpredictable results.
654 *
655 * This is basically a very trimmed down version of lp_build_conv.
656 */
657 void
658 lp_build_conv_mask(LLVMBuilderRef builder,
659 struct lp_type src_type,
660 struct lp_type dst_type,
661 const LLVMValueRef *src, unsigned num_srcs,
662 LLVMValueRef *dst, unsigned num_dsts)
663 {
664 /* Register width must remain constant */
665 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
666
667 /* We must not loose or gain channels. Only precision */
668 assert(src_type.length * num_srcs == dst_type.length * num_dsts);
669
670 /*
671 * Drop
672 *
673 * We assume all values are 0 or -1
674 */
675
676 src_type.floating = FALSE;
677 src_type.fixed = FALSE;
678 src_type.sign = TRUE;
679 src_type.norm = FALSE;
680
681 dst_type.floating = FALSE;
682 dst_type.fixed = FALSE;
683 dst_type.sign = TRUE;
684 dst_type.norm = FALSE;
685
686 /*
687 * Truncate or expand bit width
688 */
689
690 if(src_type.width > dst_type.width) {
691 assert(num_dsts == 1);
692 dst[0] = lp_build_pack(builder, src_type, dst_type, TRUE, src, num_srcs);
693 }
694 else if(src_type.width < dst_type.width) {
695 assert(num_srcs == 1);
696 lp_build_expand(builder, src_type, dst_type, src[0], dst, num_dsts);
697 }
698 else {
699 assert(num_srcs == num_dsts);
700 memcpy(dst, src, num_dsts * sizeof *dst);
701 }
702 }