Merge branch 'master' into pipe-video
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_format_aos.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * AoS pixel format manipulation.
31 *
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 */
34
35
36 #include "util/u_format.h"
37 #include "util/u_memory.h"
38 #include "util/u_math.h"
39 #include "util/u_string.h"
40
41 #include "lp_bld_arit.h"
42 #include "lp_bld_init.h"
43 #include "lp_bld_type.h"
44 #include "lp_bld_flow.h"
45 #include "lp_bld_const.h"
46 #include "lp_bld_conv.h"
47 #include "lp_bld_swizzle.h"
48 #include "lp_bld_gather.h"
49 #include "lp_bld_debug.h"
50 #include "lp_bld_format.h"
51
52
53 /**
54 * Basic swizzling. Rearrange the order of the unswizzled array elements
55 * according to the format description. PIPE_SWIZZLE_ZERO/ONE are supported
56 * too.
57 * Ex: if unswizzled[4] = {B, G, R, x}, then swizzled_out[4] = {R, G, B, 1}.
58 */
59 LLVMValueRef
60 lp_build_format_swizzle_aos(const struct util_format_description *desc,
61 struct lp_build_context *bld,
62 LLVMValueRef unswizzled)
63 {
64 unsigned char swizzles[4];
65 unsigned chan;
66
67 assert(bld->type.length % 4 == 0);
68
69 for (chan = 0; chan < 4; ++chan) {
70 enum util_format_swizzle swizzle;
71
72 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS) {
73 /*
74 * For ZS formats do RGBA = ZZZ1
75 */
76 if (chan == 3) {
77 swizzle = UTIL_FORMAT_SWIZZLE_1;
78 } else if (desc->swizzle[0] == UTIL_FORMAT_SWIZZLE_NONE) {
79 swizzle = UTIL_FORMAT_SWIZZLE_0;
80 } else {
81 swizzle = desc->swizzle[0];
82 }
83 } else {
84 swizzle = desc->swizzle[chan];
85 }
86 swizzles[chan] = swizzle;
87 }
88
89 return lp_build_swizzle_aos(bld, unswizzled, swizzles);
90 }
91
92
93 /**
94 * Whether the format matches the vector type, apart of swizzles.
95 */
96 static INLINE boolean
97 format_matches_type(const struct util_format_description *desc,
98 struct lp_type type)
99 {
100 enum util_format_type chan_type;
101 unsigned chan;
102
103 assert(type.length % 4 == 0);
104
105 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN ||
106 desc->colorspace != UTIL_FORMAT_COLORSPACE_RGB ||
107 desc->block.width != 1 ||
108 desc->block.height != 1) {
109 return FALSE;
110 }
111
112 if (type.floating) {
113 chan_type = UTIL_FORMAT_TYPE_FLOAT;
114 } else if (type.fixed) {
115 chan_type = UTIL_FORMAT_TYPE_FIXED;
116 } else if (type.sign) {
117 chan_type = UTIL_FORMAT_TYPE_SIGNED;
118 } else {
119 chan_type = UTIL_FORMAT_TYPE_UNSIGNED;
120 }
121
122 for (chan = 0; chan < desc->nr_channels; ++chan) {
123 if (desc->channel[chan].size != type.width) {
124 return FALSE;
125 }
126
127 if (desc->channel[chan].type != UTIL_FORMAT_TYPE_VOID) {
128 if (desc->channel[chan].type != chan_type ||
129 desc->channel[chan].normalized != type.norm) {
130 return FALSE;
131 }
132 }
133 }
134
135 return TRUE;
136 }
137
138
139 /**
140 * Unpack a single pixel into its RGBA components.
141 *
142 * @param desc the pixel format for the packed pixel value
143 * @param packed integer pixel in a format such as PIPE_FORMAT_B8G8R8A8_UNORM
144 *
145 * @return RGBA in a float[4] or ubyte[4] or ushort[4] vector.
146 */
147 static INLINE LLVMValueRef
148 lp_build_unpack_arith_rgba_aos(LLVMBuilderRef builder,
149 const struct util_format_description *desc,
150 LLVMValueRef packed)
151 {
152 LLVMValueRef shifted, casted, scaled, masked;
153 LLVMValueRef shifts[4];
154 LLVMValueRef masks[4];
155 LLVMValueRef scales[4];
156
157 boolean normalized;
158 boolean needs_uitofp;
159 unsigned shift;
160 unsigned i;
161
162 /* TODO: Support more formats */
163 assert(desc->layout == UTIL_FORMAT_LAYOUT_PLAIN);
164 assert(desc->block.width == 1);
165 assert(desc->block.height == 1);
166 assert(desc->block.bits <= 32);
167
168 /* Do the intermediate integer computations with 32bit integers since it
169 * matches floating point size */
170 assert (LLVMTypeOf(packed) == LLVMInt32Type());
171
172 /* Broadcast the packed value to all four channels
173 * before: packed = BGRA
174 * after: packed = {BGRA, BGRA, BGRA, BGRA}
175 */
176 packed = LLVMBuildInsertElement(builder,
177 LLVMGetUndef(LLVMVectorType(LLVMInt32Type(), 4)),
178 packed,
179 LLVMConstNull(LLVMInt32Type()),
180 "");
181 packed = LLVMBuildShuffleVector(builder,
182 packed,
183 LLVMGetUndef(LLVMVectorType(LLVMInt32Type(), 4)),
184 LLVMConstNull(LLVMVectorType(LLVMInt32Type(), 4)),
185 "");
186
187 /* Initialize vector constants */
188 normalized = FALSE;
189 needs_uitofp = FALSE;
190 shift = 0;
191
192 /* Loop over 4 color components */
193 for (i = 0; i < 4; ++i) {
194 unsigned bits = desc->channel[i].size;
195
196 if (desc->channel[i].type == UTIL_FORMAT_TYPE_VOID) {
197 shifts[i] = LLVMGetUndef(LLVMInt32Type());
198 masks[i] = LLVMConstNull(LLVMInt32Type());
199 scales[i] = LLVMConstNull(LLVMFloatType());
200 }
201 else {
202 unsigned long long mask = (1ULL << bits) - 1;
203
204 assert(desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED);
205
206 if (bits == 32) {
207 needs_uitofp = TRUE;
208 }
209
210 shifts[i] = LLVMConstInt(LLVMInt32Type(), shift, 0);
211 masks[i] = LLVMConstInt(LLVMInt32Type(), mask, 0);
212
213 if (desc->channel[i].normalized) {
214 scales[i] = LLVMConstReal(LLVMFloatType(), 1.0/mask);
215 normalized = TRUE;
216 }
217 else
218 scales[i] = LLVMConstReal(LLVMFloatType(), 1.0);
219 }
220
221 shift += bits;
222 }
223
224 /* Ex: convert packed = {BGRA, BGRA, BGRA, BGRA}
225 * into masked = {B, G, R, A}
226 */
227 shifted = LLVMBuildLShr(builder, packed, LLVMConstVector(shifts, 4), "");
228 masked = LLVMBuildAnd(builder, shifted, LLVMConstVector(masks, 4), "");
229
230
231 if (!needs_uitofp) {
232 /* UIToFP can't be expressed in SSE2 */
233 casted = LLVMBuildSIToFP(builder, masked, LLVMVectorType(LLVMFloatType(), 4), "");
234 } else {
235 casted = LLVMBuildUIToFP(builder, masked, LLVMVectorType(LLVMFloatType(), 4), "");
236 }
237
238 /* At this point 'casted' may be a vector of floats such as
239 * {255.0, 255.0, 255.0, 255.0}. Next, if the pixel values are normalized
240 * we'll scale this to {1.0, 1.0, 1.0, 1.0}.
241 */
242
243 if (normalized)
244 scaled = LLVMBuildFMul(builder, casted, LLVMConstVector(scales, 4), "");
245 else
246 scaled = casted;
247
248 return scaled;
249 }
250
251
252 /**
253 * Pack a single pixel.
254 *
255 * @param rgba 4 float vector with the unpacked components.
256 *
257 * XXX: This is mostly for reference and testing -- operating a single pixel at
258 * a time is rarely if ever needed.
259 */
260 LLVMValueRef
261 lp_build_pack_rgba_aos(LLVMBuilderRef builder,
262 const struct util_format_description *desc,
263 LLVMValueRef rgba)
264 {
265 LLVMTypeRef type;
266 LLVMValueRef packed = NULL;
267 LLVMValueRef swizzles[4];
268 LLVMValueRef shifted, casted, scaled, unswizzled;
269 LLVMValueRef shifts[4];
270 LLVMValueRef scales[4];
271 boolean normalized;
272 unsigned shift;
273 unsigned i, j;
274
275 assert(desc->layout == UTIL_FORMAT_LAYOUT_PLAIN);
276 assert(desc->block.width == 1);
277 assert(desc->block.height == 1);
278
279 type = LLVMIntType(desc->block.bits);
280
281 /* Unswizzle the color components into the source vector. */
282 for (i = 0; i < 4; ++i) {
283 for (j = 0; j < 4; ++j) {
284 if (desc->swizzle[j] == i)
285 break;
286 }
287 if (j < 4)
288 swizzles[i] = LLVMConstInt(LLVMInt32Type(), j, 0);
289 else
290 swizzles[i] = LLVMGetUndef(LLVMInt32Type());
291 }
292
293 unswizzled = LLVMBuildShuffleVector(builder, rgba,
294 LLVMGetUndef(LLVMVectorType(LLVMFloatType(), 4)),
295 LLVMConstVector(swizzles, 4), "");
296
297 normalized = FALSE;
298 shift = 0;
299 for (i = 0; i < 4; ++i) {
300 unsigned bits = desc->channel[i].size;
301
302 if (desc->channel[i].type == UTIL_FORMAT_TYPE_VOID) {
303 shifts[i] = LLVMGetUndef(LLVMInt32Type());
304 scales[i] = LLVMGetUndef(LLVMFloatType());
305 }
306 else {
307 unsigned mask = (1 << bits) - 1;
308
309 assert(desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED);
310 assert(bits < 32);
311
312 shifts[i] = LLVMConstInt(LLVMInt32Type(), shift, 0);
313
314 if (desc->channel[i].normalized) {
315 scales[i] = LLVMConstReal(LLVMFloatType(), mask);
316 normalized = TRUE;
317 }
318 else
319 scales[i] = LLVMConstReal(LLVMFloatType(), 1.0);
320 }
321
322 shift += bits;
323 }
324
325 if (normalized)
326 scaled = LLVMBuildFMul(builder, unswizzled, LLVMConstVector(scales, 4), "");
327 else
328 scaled = unswizzled;
329
330 casted = LLVMBuildFPToSI(builder, scaled, LLVMVectorType(LLVMInt32Type(), 4), "");
331
332 shifted = LLVMBuildShl(builder, casted, LLVMConstVector(shifts, 4), "");
333
334 /* Bitwise or all components */
335 for (i = 0; i < 4; ++i) {
336 if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
337 LLVMValueRef component = LLVMBuildExtractElement(builder, shifted, LLVMConstInt(LLVMInt32Type(), i, 0), "");
338 if (packed)
339 packed = LLVMBuildOr(builder, packed, component, "");
340 else
341 packed = component;
342 }
343 }
344
345 if (!packed)
346 packed = LLVMGetUndef(LLVMInt32Type());
347
348 if (desc->block.bits < 32)
349 packed = LLVMBuildTrunc(builder, packed, type, "");
350
351 return packed;
352 }
353
354
355
356
357 /**
358 * Fetch a pixel into a 4 float AoS.
359 *
360 * \param format_desc describes format of the image we're fetching from
361 * \param ptr address of the pixel block (or the texel if uncompressed)
362 * \param i, j the sub-block pixel coordinates. For non-compressed formats
363 * these will always be (0, 0).
364 * \return a 4 element vector with the pixel's RGBA values.
365 */
366 LLVMValueRef
367 lp_build_fetch_rgba_aos(LLVMBuilderRef builder,
368 const struct util_format_description *format_desc,
369 struct lp_type type,
370 LLVMValueRef base_ptr,
371 LLVMValueRef offset,
372 LLVMValueRef i,
373 LLVMValueRef j)
374 {
375 unsigned num_pixels = type.length / 4;
376 struct lp_build_context bld;
377
378 assert(type.length <= LP_MAX_VECTOR_LENGTH);
379 assert(type.length % 4 == 0);
380
381 lp_build_context_init(&bld, builder, type);
382
383 /*
384 * Trivial case
385 *
386 * The format matches the type (apart of a swizzle) so no need for
387 * scaling or converting.
388 */
389
390 if (format_matches_type(format_desc, type) &&
391 format_desc->block.bits <= type.width * 4 &&
392 util_is_power_of_two(format_desc->block.bits)) {
393 LLVMValueRef packed;
394
395 /*
396 * The format matches the type (apart of a swizzle) so no need for
397 * scaling or converting.
398 */
399
400 packed = lp_build_gather(builder, type.length/4,
401 format_desc->block.bits, type.width*4,
402 base_ptr, offset);
403
404 assert(format_desc->block.bits <= type.width * type.length);
405
406 packed = LLVMBuildBitCast(builder, packed, lp_build_vec_type(type), "");
407
408 return lp_build_format_swizzle_aos(format_desc, &bld, packed);
409 }
410
411 /*
412 * Bit arithmetic
413 */
414
415 if (format_desc->layout == UTIL_FORMAT_LAYOUT_PLAIN &&
416 (format_desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB ||
417 format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS) &&
418 format_desc->block.width == 1 &&
419 format_desc->block.height == 1 &&
420 util_is_power_of_two(format_desc->block.bits) &&
421 format_desc->block.bits <= 32 &&
422 format_desc->is_bitmask &&
423 !format_desc->is_mixed &&
424 (format_desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED ||
425 format_desc->channel[1].type == UTIL_FORMAT_TYPE_UNSIGNED)) {
426
427 LLVMValueRef tmps[LP_MAX_VECTOR_LENGTH/4];
428 LLVMValueRef res;
429 unsigned k;
430
431 /*
432 * Unpack a pixel at a time into a <4 x float> RGBA vector
433 */
434
435 for (k = 0; k < num_pixels; ++k) {
436 LLVMValueRef packed;
437
438 packed = lp_build_gather_elem(builder, num_pixels,
439 format_desc->block.bits, 32,
440 base_ptr, offset, k);
441
442 tmps[k] = lp_build_unpack_arith_rgba_aos(builder, format_desc,
443 packed);
444 }
445
446 /*
447 * Type conversion.
448 *
449 * TODO: We could avoid floating conversion for integer to
450 * integer conversions.
451 */
452
453 if (gallivm_debug & GALLIVM_DEBUG_PERF && !type.floating) {
454 debug_printf("%s: unpacking %s with floating point\n",
455 __FUNCTION__, format_desc->short_name);
456 }
457
458 lp_build_conv(builder,
459 lp_float32_vec4_type(),
460 type,
461 tmps, num_pixels, &res, 1);
462
463 return lp_build_format_swizzle_aos(format_desc, &bld, res);
464 }
465
466 /*
467 * YUV / subsampled formats
468 */
469
470 if (format_desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED) {
471 struct lp_type tmp_type;
472 LLVMValueRef tmp;
473
474 memset(&tmp_type, 0, sizeof tmp_type);
475 tmp_type.width = 8;
476 tmp_type.length = num_pixels * 4;
477 tmp_type.norm = TRUE;
478
479 tmp = lp_build_fetch_subsampled_rgba_aos(builder,
480 format_desc,
481 num_pixels,
482 base_ptr,
483 offset,
484 i, j);
485
486 lp_build_conv(builder,
487 tmp_type, type,
488 &tmp, 1, &tmp, 1);
489
490 return tmp;
491 }
492
493 /*
494 * Fallback to util_format_description::fetch_rgba_8unorm().
495 */
496
497 if (format_desc->fetch_rgba_8unorm &&
498 !type.floating && type.width == 8 && !type.sign && type.norm) {
499 /*
500 * Fallback to calling util_format_description::fetch_rgba_8unorm.
501 *
502 * This is definitely not the most efficient way of fetching pixels, as
503 * we miss the opportunity to do vectorization, but this it is a
504 * convenient for formats or scenarios for which there was no opportunity
505 * or incentive to optimize.
506 */
507
508 LLVMModuleRef module = LLVMGetGlobalParent(LLVMGetBasicBlockParent(LLVMGetInsertBlock(builder)));
509 char name[256];
510 LLVMTypeRef i8t = LLVMInt8Type();
511 LLVMTypeRef pi8t = LLVMPointerType(i8t, 0);
512 LLVMTypeRef i32t = LLVMInt32Type();
513 LLVMValueRef function;
514 LLVMValueRef tmp_ptr;
515 LLVMValueRef tmp;
516 LLVMValueRef res;
517 unsigned k;
518
519 util_snprintf(name, sizeof name, "util_format_%s_fetch_rgba_8unorm",
520 format_desc->short_name);
521
522 if (gallivm_debug & GALLIVM_DEBUG_PERF) {
523 debug_printf("%s: falling back to %s\n", __FUNCTION__, name);
524 }
525
526 /*
527 * Declare and bind format_desc->fetch_rgba_8unorm().
528 */
529
530 function = LLVMGetNamedFunction(module, name);
531 if (!function) {
532 LLVMTypeRef ret_type;
533 LLVMTypeRef arg_types[4];
534 LLVMTypeRef function_type;
535
536 ret_type = LLVMVoidType();
537 arg_types[0] = pi8t;
538 arg_types[1] = pi8t;
539 arg_types[3] = arg_types[2] = LLVMIntType(sizeof(unsigned) * 8);
540 function_type = LLVMFunctionType(ret_type, arg_types, Elements(arg_types), 0);
541 function = LLVMAddFunction(module, name, function_type);
542
543 LLVMSetFunctionCallConv(function, LLVMCCallConv);
544 LLVMSetLinkage(function, LLVMExternalLinkage);
545
546 assert(LLVMIsDeclaration(function));
547
548 LLVMAddGlobalMapping(lp_build_engine, function,
549 func_to_pointer((func_pointer)format_desc->fetch_rgba_8unorm));
550 }
551
552 tmp_ptr = lp_build_alloca(builder, i32t, "");
553
554 res = LLVMGetUndef(LLVMVectorType(i32t, num_pixels));
555
556 /*
557 * Invoke format_desc->fetch_rgba_8unorm() for each pixel and insert the result
558 * in the SoA vectors.
559 */
560
561 for (k = 0; k < num_pixels; ++k) {
562 LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), k, 0);
563 LLVMValueRef args[4];
564
565 args[0] = LLVMBuildBitCast(builder, tmp_ptr, pi8t, "");
566 args[1] = lp_build_gather_elem_ptr(builder, num_pixels,
567 base_ptr, offset, k);
568
569 if (num_pixels == 1) {
570 args[2] = i;
571 args[3] = j;
572 }
573 else {
574 args[2] = LLVMBuildExtractElement(builder, i, index, "");
575 args[3] = LLVMBuildExtractElement(builder, j, index, "");
576 }
577
578 LLVMBuildCall(builder, function, args, Elements(args), "");
579
580 tmp = LLVMBuildLoad(builder, tmp_ptr, "");
581
582 if (num_pixels == 1) {
583 res = tmp;
584 }
585 else {
586 res = LLVMBuildInsertElement(builder, res, tmp, index, "");
587 }
588 }
589
590 /* Bitcast from <n x i32> to <4n x i8> */
591 res = LLVMBuildBitCast(builder, res, bld.vec_type, "");
592
593 return res;
594 }
595
596
597 /*
598 * Fallback to util_format_description::fetch_rgba_float().
599 */
600
601 if (format_desc->fetch_rgba_float) {
602 /*
603 * Fallback to calling util_format_description::fetch_rgba_float.
604 *
605 * This is definitely not the most efficient way of fetching pixels, as
606 * we miss the opportunity to do vectorization, but this it is a
607 * convenient for formats or scenarios for which there was no opportunity
608 * or incentive to optimize.
609 */
610
611 LLVMModuleRef module = LLVMGetGlobalParent(LLVMGetBasicBlockParent(LLVMGetInsertBlock(builder)));
612 char name[256];
613 LLVMTypeRef f32t = LLVMFloatType();
614 LLVMTypeRef f32x4t = LLVMVectorType(f32t, 4);
615 LLVMTypeRef pf32t = LLVMPointerType(f32t, 0);
616 LLVMValueRef function;
617 LLVMValueRef tmp_ptr;
618 LLVMValueRef tmps[LP_MAX_VECTOR_LENGTH/4];
619 LLVMValueRef res;
620 unsigned k;
621
622 util_snprintf(name, sizeof name, "util_format_%s_fetch_rgba_float",
623 format_desc->short_name);
624
625 if (gallivm_debug & GALLIVM_DEBUG_PERF) {
626 debug_printf("%s: falling back to %s\n", __FUNCTION__, name);
627 }
628
629 /*
630 * Declare and bind format_desc->fetch_rgba_float().
631 */
632
633 function = LLVMGetNamedFunction(module, name);
634 if (!function) {
635 LLVMTypeRef ret_type;
636 LLVMTypeRef arg_types[4];
637 LLVMTypeRef function_type;
638
639 ret_type = LLVMVoidType();
640 arg_types[0] = pf32t;
641 arg_types[1] = LLVMPointerType(LLVMInt8Type(), 0);
642 arg_types[3] = arg_types[2] = LLVMIntType(sizeof(unsigned) * 8);
643 function_type = LLVMFunctionType(ret_type, arg_types, Elements(arg_types), 0);
644 function = LLVMAddFunction(module, name, function_type);
645
646 LLVMSetFunctionCallConv(function, LLVMCCallConv);
647 LLVMSetLinkage(function, LLVMExternalLinkage);
648
649 assert(LLVMIsDeclaration(function));
650
651 LLVMAddGlobalMapping(lp_build_engine, function,
652 func_to_pointer((func_pointer)format_desc->fetch_rgba_float));
653 }
654
655 tmp_ptr = lp_build_alloca(builder, f32x4t, "");
656
657 /*
658 * Invoke format_desc->fetch_rgba_float() for each pixel and insert the result
659 * in the SoA vectors.
660 */
661
662 for (k = 0; k < num_pixels; ++k) {
663 LLVMValueRef args[4];
664
665 args[0] = LLVMBuildBitCast(builder, tmp_ptr, pf32t, "");
666 args[1] = lp_build_gather_elem_ptr(builder, num_pixels,
667 base_ptr, offset, k);
668
669 if (num_pixels == 1) {
670 args[2] = i;
671 args[3] = j;
672 }
673 else {
674 LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), k, 0);
675 args[2] = LLVMBuildExtractElement(builder, i, index, "");
676 args[3] = LLVMBuildExtractElement(builder, j, index, "");
677 }
678
679 LLVMBuildCall(builder, function, args, Elements(args), "");
680
681 tmps[k] = LLVMBuildLoad(builder, tmp_ptr, "");
682 }
683
684 lp_build_conv(builder,
685 lp_float32_vec4_type(),
686 type,
687 tmps, num_pixels, &res, 1);
688
689 return res;
690 }
691
692 assert(0);
693 return lp_build_undef(type);
694 }