Merge branch 'mesa_7_6_branch'
[mesa.git] / src / gallium / drivers / llvmpipe / lp_bld_pack.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * @file
31 * Helper functions for packing/unpacking.
32 *
33 * Pack/unpacking is necessary for conversion between types of different
34 * bit width.
35 *
36 * They are also commonly used when an computation needs higher
37 * precision for the intermediate values. For example, if one needs the
38 * function:
39 *
40 * c = compute(a, b);
41 *
42 * to use more precision for intermediate results then one should implement it
43 * as:
44 *
45 * LLVMValueRef
46 * compute(LLVMBuilderRef builder struct lp_type type, LLVMValueRef a, LLVMValueRef b)
47 * {
48 * struct lp_type wide_type = lp_wider_type(type);
49 * LLVMValueRef al, ah, bl, bh, cl, ch, c;
50 *
51 * lp_build_unpack2(builder, type, wide_type, a, &al, &ah);
52 * lp_build_unpack2(builder, type, wide_type, b, &bl, &bh);
53 *
54 * cl = compute_half(al, bl);
55 * ch = compute_half(ah, bh);
56 *
57 * c = lp_build_pack2(bld->builder, wide_type, type, cl, ch);
58 *
59 * return c;
60 * }
61 *
62 * where compute_half() would do the computation for half the elements with
63 * twice the precision.
64 *
65 * @author Jose Fonseca <jfonseca@vmware.com>
66 */
67
68
69 #include "util/u_debug.h"
70 #include "util/u_math.h"
71 #include "util/u_cpu_detect.h"
72
73 #include "lp_bld_type.h"
74 #include "lp_bld_const.h"
75 #include "lp_bld_intr.h"
76 #include "lp_bld_arit.h"
77 #include "lp_bld_pack.h"
78
79
80 /**
81 * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions.
82 */
83 static LLVMValueRef
84 lp_build_const_unpack_shuffle(unsigned n, unsigned lo_hi)
85 {
86 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
87 unsigned i, j;
88
89 assert(n <= LP_MAX_VECTOR_LENGTH);
90 assert(lo_hi < 2);
91
92 /* TODO: cache results in a static table */
93
94 for(i = 0, j = lo_hi*n/2; i < n; i += 2, ++j) {
95 elems[i + 0] = LLVMConstInt(LLVMInt32Type(), 0 + j, 0);
96 elems[i + 1] = LLVMConstInt(LLVMInt32Type(), n + j, 0);
97 }
98
99 return LLVMConstVector(elems, n);
100 }
101
102
103 /**
104 * Build shuffle vectors that match PACKxx instructions.
105 */
106 static LLVMValueRef
107 lp_build_const_pack_shuffle(unsigned n)
108 {
109 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
110 unsigned i;
111
112 assert(n <= LP_MAX_VECTOR_LENGTH);
113
114 /* TODO: cache results in a static table */
115
116 for(i = 0; i < n; ++i)
117 elems[i] = LLVMConstInt(LLVMInt32Type(), 2*i, 0);
118
119 return LLVMConstVector(elems, n);
120 }
121
122
123 /**
124 * Interleave vector elements.
125 *
126 * Matches the PUNPCKLxx and PUNPCKHxx SSE instructions.
127 */
128 LLVMValueRef
129 lp_build_interleave2(LLVMBuilderRef builder,
130 struct lp_type type,
131 LLVMValueRef a,
132 LLVMValueRef b,
133 unsigned lo_hi)
134 {
135 LLVMValueRef shuffle;
136
137 shuffle = lp_build_const_unpack_shuffle(type.length, lo_hi);
138
139 return LLVMBuildShuffleVector(builder, a, b, shuffle, "");
140 }
141
142
143 /**
144 * Double the bit width.
145 *
146 * This will only change the number of bits the values are represented, not the
147 * values themselves.
148 */
149 void
150 lp_build_unpack2(LLVMBuilderRef builder,
151 struct lp_type src_type,
152 struct lp_type dst_type,
153 LLVMValueRef src,
154 LLVMValueRef *dst_lo,
155 LLVMValueRef *dst_hi)
156 {
157 LLVMValueRef msb;
158 LLVMTypeRef dst_vec_type;
159
160 assert(!src_type.floating);
161 assert(!dst_type.floating);
162 assert(dst_type.sign == src_type.sign);
163 assert(dst_type.width == src_type.width * 2);
164 assert(dst_type.length * 2 == src_type.length);
165
166 if(src_type.sign) {
167 /* Replicate the sign bit in the most significant bits */
168 msb = LLVMBuildAShr(builder, src, lp_build_int_const_scalar(src_type, src_type.width - 1), "");
169 }
170 else
171 /* Most significant bits always zero */
172 msb = lp_build_zero(src_type);
173
174 /* Interleave bits */
175 if(util_cpu_caps.little_endian) {
176 *dst_lo = lp_build_interleave2(builder, src_type, src, msb, 0);
177 *dst_hi = lp_build_interleave2(builder, src_type, src, msb, 1);
178 }
179 else {
180 *dst_lo = lp_build_interleave2(builder, src_type, msb, src, 0);
181 *dst_hi = lp_build_interleave2(builder, src_type, msb, src, 1);
182 }
183
184 /* Cast the result into the new type (twice as wide) */
185
186 dst_vec_type = lp_build_vec_type(dst_type);
187
188 *dst_lo = LLVMBuildBitCast(builder, *dst_lo, dst_vec_type, "");
189 *dst_hi = LLVMBuildBitCast(builder, *dst_hi, dst_vec_type, "");
190 }
191
192
193 /**
194 * Expand the bit width.
195 *
196 * This will only change the number of bits the values are represented, not the
197 * values themselves.
198 */
199 void
200 lp_build_unpack(LLVMBuilderRef builder,
201 struct lp_type src_type,
202 struct lp_type dst_type,
203 LLVMValueRef src,
204 LLVMValueRef *dst, unsigned num_dsts)
205 {
206 unsigned num_tmps;
207 unsigned i;
208
209 /* Register width must remain constant */
210 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
211
212 /* We must not loose or gain channels. Only precision */
213 assert(src_type.length == dst_type.length * num_dsts);
214
215 num_tmps = 1;
216 dst[0] = src;
217
218 while(src_type.width < dst_type.width) {
219 struct lp_type tmp_type = src_type;
220
221 tmp_type.width *= 2;
222 tmp_type.length /= 2;
223
224 for(i = num_tmps; i--; ) {
225 lp_build_unpack2(builder, src_type, tmp_type, dst[i], &dst[2*i + 0], &dst[2*i + 1]);
226 }
227
228 src_type = tmp_type;
229
230 num_tmps *= 2;
231 }
232
233 assert(num_tmps == num_dsts);
234 }
235
236
237 /**
238 * Non-interleaved pack.
239 *
240 * This will move values as
241 *
242 * lo = __ l0 __ l1 __ l2 __.. __ ln
243 * hi = __ h0 __ h1 __ h2 __.. __ hn
244 * res = l0 l1 l2 .. ln h0 h1 h2 .. hn
245 *
246 * This will only change the number of bits the values are represented, not the
247 * values themselves.
248 *
249 * It is assumed the values are already clamped into the destination type range.
250 * Values outside that range will produce undefined results. Use
251 * lp_build_packs2 instead.
252 */
253 LLVMValueRef
254 lp_build_pack2(LLVMBuilderRef builder,
255 struct lp_type src_type,
256 struct lp_type dst_type,
257 LLVMValueRef lo,
258 LLVMValueRef hi)
259 {
260 LLVMTypeRef src_vec_type = lp_build_vec_type(src_type);
261 LLVMTypeRef dst_vec_type = lp_build_vec_type(dst_type);
262 LLVMValueRef shuffle;
263 LLVMValueRef res;
264
265 dst_vec_type = lp_build_vec_type(dst_type);
266
267 assert(!src_type.floating);
268 assert(!dst_type.floating);
269 assert(src_type.width == dst_type.width * 2);
270 assert(src_type.length * 2 == dst_type.length);
271
272 if(util_cpu_caps.has_sse2 && src_type.width * src_type.length == 128) {
273 switch(src_type.width) {
274 case 32:
275 if(dst_type.sign) {
276 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packssdw.128", src_vec_type, lo, hi);
277 }
278 else {
279 if (util_cpu_caps.has_sse4_1) {
280 /* PACKUSDW is the only instrinsic with a consistent signature */
281 return lp_build_intrinsic_binary(builder, "llvm.x86.sse41.packusdw", dst_vec_type, lo, hi);
282 }
283 else {
284 assert(0);
285 return LLVMGetUndef(dst_vec_type);
286 }
287 }
288 break;
289
290 case 16:
291 if(dst_type.sign)
292 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packsswb.128", src_vec_type, lo, hi);
293 else
294 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packuswb.128", src_vec_type, lo, hi);
295 break;
296
297 default:
298 assert(0);
299 return LLVMGetUndef(dst_vec_type);
300 break;
301 }
302
303 res = LLVMBuildBitCast(builder, res, dst_vec_type, "");
304 return res;
305 }
306
307 lo = LLVMBuildBitCast(builder, lo, dst_vec_type, "");
308 hi = LLVMBuildBitCast(builder, hi, dst_vec_type, "");
309
310 shuffle = lp_build_const_pack_shuffle(dst_type.length);
311
312 res = LLVMBuildShuffleVector(builder, lo, hi, shuffle, "");
313
314 return res;
315 }
316
317
318
319 /**
320 * Non-interleaved pack and saturate.
321 *
322 * Same as lp_build_pack2 but will saturate values so that they fit into the
323 * destination type.
324 */
325 LLVMValueRef
326 lp_build_packs2(LLVMBuilderRef builder,
327 struct lp_type src_type,
328 struct lp_type dst_type,
329 LLVMValueRef lo,
330 LLVMValueRef hi)
331 {
332 boolean clamp;
333
334 assert(!src_type.floating);
335 assert(!dst_type.floating);
336 assert(src_type.sign == dst_type.sign);
337 assert(src_type.width == dst_type.width * 2);
338 assert(src_type.length * 2 == dst_type.length);
339
340 clamp = TRUE;
341
342 /* All X86 SSE non-interleaved pack instructions take signed inputs and
343 * saturate them, so no need to clamp for those cases. */
344 if(util_cpu_caps.has_sse2 &&
345 src_type.width * src_type.length == 128 &&
346 src_type.sign)
347 clamp = FALSE;
348
349 if(clamp) {
350 struct lp_build_context bld;
351 unsigned dst_bits = dst_type.sign ? dst_type.width - 1 : dst_type.width;
352 LLVMValueRef dst_max = lp_build_int_const_scalar(src_type, ((unsigned long long)1 << dst_bits) - 1);
353 lp_build_context_init(&bld, builder, src_type);
354 lo = lp_build_min(&bld, lo, dst_max);
355 hi = lp_build_min(&bld, hi, dst_max);
356 /* FIXME: What about lower bound? */
357 }
358
359 return lp_build_pack2(builder, src_type, dst_type, lo, hi);
360 }
361
362
363 /**
364 * Truncate the bit width.
365 *
366 * TODO: Handle saturation consistently.
367 */
368 LLVMValueRef
369 lp_build_pack(LLVMBuilderRef builder,
370 struct lp_type src_type,
371 struct lp_type dst_type,
372 boolean clamped,
373 const LLVMValueRef *src, unsigned num_srcs)
374 {
375 LLVMValueRef (*pack2)(LLVMBuilderRef builder,
376 struct lp_type src_type,
377 struct lp_type dst_type,
378 LLVMValueRef lo,
379 LLVMValueRef hi);
380 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
381 unsigned i;
382
383
384 /* Register width must remain constant */
385 assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
386
387 /* We must not loose or gain channels. Only precision */
388 assert(src_type.length * num_srcs == dst_type.length);
389
390 if(clamped)
391 pack2 = &lp_build_pack2;
392 else
393 pack2 = &lp_build_packs2;
394
395 for(i = 0; i < num_srcs; ++i)
396 tmp[i] = src[i];
397
398 while(src_type.width > dst_type.width) {
399 struct lp_type tmp_type = src_type;
400
401 tmp_type.width /= 2;
402 tmp_type.length *= 2;
403
404 /* Take in consideration the sign changes only in the last step */
405 if(tmp_type.width == dst_type.width)
406 tmp_type.sign = dst_type.sign;
407
408 num_srcs /= 2;
409
410 for(i = 0; i < num_srcs; ++i)
411 tmp[i] = pack2(builder, src_type, tmp_type, tmp[2*i + 0], tmp[2*i + 1]);
412
413 src_type = tmp_type;
414 }
415
416 assert(num_srcs == 1);
417
418 return tmp[0];
419 }