gallivm: Use 8 wide AoS sampling on AVX2.
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_sample_soa.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * Texture sampling -- SoA.
31 *
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 * @author Brian Paul <brianp@vmware.com>
34 */
35
36 #include "pipe/p_defines.h"
37 #include "pipe/p_state.h"
38 #include "pipe/p_shader_tokens.h"
39 #include "util/u_debug.h"
40 #include "util/u_dump.h"
41 #include "util/u_memory.h"
42 #include "util/u_math.h"
43 #include "util/u_format.h"
44 #include "util/u_cpu_detect.h"
45 #include "util/format_rgb9e5.h"
46 #include "lp_bld_debug.h"
47 #include "lp_bld_type.h"
48 #include "lp_bld_const.h"
49 #include "lp_bld_conv.h"
50 #include "lp_bld_arit.h"
51 #include "lp_bld_bitarit.h"
52 #include "lp_bld_logic.h"
53 #include "lp_bld_printf.h"
54 #include "lp_bld_swizzle.h"
55 #include "lp_bld_flow.h"
56 #include "lp_bld_gather.h"
57 #include "lp_bld_format.h"
58 #include "lp_bld_sample.h"
59 #include "lp_bld_sample_aos.h"
60 #include "lp_bld_struct.h"
61 #include "lp_bld_quad.h"
62 #include "lp_bld_pack.h"
63
64
65 /**
66 * Generate code to fetch a texel from a texture at int coords (x, y, z).
67 * The computation depends on whether the texture is 1D, 2D or 3D.
68 * The result, texel, will be float vectors:
69 * texel[0] = red values
70 * texel[1] = green values
71 * texel[2] = blue values
72 * texel[3] = alpha values
73 */
74 static void
75 lp_build_sample_texel_soa(struct lp_build_sample_context *bld,
76 LLVMValueRef width,
77 LLVMValueRef height,
78 LLVMValueRef depth,
79 LLVMValueRef x,
80 LLVMValueRef y,
81 LLVMValueRef z,
82 LLVMValueRef y_stride,
83 LLVMValueRef z_stride,
84 LLVMValueRef data_ptr,
85 LLVMValueRef mipoffsets,
86 LLVMValueRef texel_out[4])
87 {
88 const struct lp_static_sampler_state *static_state = bld->static_sampler_state;
89 const unsigned dims = bld->dims;
90 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
91 LLVMBuilderRef builder = bld->gallivm->builder;
92 LLVMValueRef offset;
93 LLVMValueRef i, j;
94 LLVMValueRef use_border = NULL;
95
96 /* use_border = x < 0 || x >= width || y < 0 || y >= height */
97 if (lp_sampler_wrap_mode_uses_border_color(static_state->wrap_s,
98 static_state->min_img_filter,
99 static_state->mag_img_filter)) {
100 LLVMValueRef b1, b2;
101 b1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, x, int_coord_bld->zero);
102 b2 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, x, width);
103 use_border = LLVMBuildOr(builder, b1, b2, "b1_or_b2");
104 }
105
106 if (dims >= 2 &&
107 lp_sampler_wrap_mode_uses_border_color(static_state->wrap_t,
108 static_state->min_img_filter,
109 static_state->mag_img_filter)) {
110 LLVMValueRef b1, b2;
111 b1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, y, int_coord_bld->zero);
112 b2 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, y, height);
113 if (use_border) {
114 use_border = LLVMBuildOr(builder, use_border, b1, "ub_or_b1");
115 use_border = LLVMBuildOr(builder, use_border, b2, "ub_or_b2");
116 }
117 else {
118 use_border = LLVMBuildOr(builder, b1, b2, "b1_or_b2");
119 }
120 }
121
122 if (dims == 3 &&
123 lp_sampler_wrap_mode_uses_border_color(static_state->wrap_r,
124 static_state->min_img_filter,
125 static_state->mag_img_filter)) {
126 LLVMValueRef b1, b2;
127 b1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, z, int_coord_bld->zero);
128 b2 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, z, depth);
129 if (use_border) {
130 use_border = LLVMBuildOr(builder, use_border, b1, "ub_or_b1");
131 use_border = LLVMBuildOr(builder, use_border, b2, "ub_or_b2");
132 }
133 else {
134 use_border = LLVMBuildOr(builder, b1, b2, "b1_or_b2");
135 }
136 }
137
138 /* convert x,y,z coords to linear offset from start of texture, in bytes */
139 lp_build_sample_offset(&bld->int_coord_bld,
140 bld->format_desc,
141 x, y, z, y_stride, z_stride,
142 &offset, &i, &j);
143 if (mipoffsets) {
144 offset = lp_build_add(&bld->int_coord_bld, offset, mipoffsets);
145 }
146
147 if (use_border) {
148 /* If we can sample the border color, it means that texcoords may
149 * lie outside the bounds of the texture image. We need to do
150 * something to prevent reading out of bounds and causing a segfault.
151 *
152 * Simply AND the texture coords with !use_border. This will cause
153 * coords which are out of bounds to become zero. Zero's guaranteed
154 * to be inside the texture image.
155 */
156 offset = lp_build_andnot(&bld->int_coord_bld, offset, use_border);
157 }
158
159 lp_build_fetch_rgba_soa(bld->gallivm,
160 bld->format_desc,
161 bld->texel_type,
162 data_ptr, offset,
163 i, j,
164 bld->cache,
165 texel_out);
166
167 /*
168 * Note: if we find an app which frequently samples the texture border
169 * we might want to implement a true conditional here to avoid sampling
170 * the texture whenever possible (since that's quite a bit of code).
171 * Ex:
172 * if (use_border) {
173 * texel = border_color;
174 * }
175 * else {
176 * texel = sample_texture(coord);
177 * }
178 * As it is now, we always sample the texture, then selectively replace
179 * the texel color results with the border color.
180 */
181
182 if (use_border) {
183 /* select texel color or border color depending on use_border. */
184 const struct util_format_description *format_desc = bld->format_desc;
185 int chan;
186 struct lp_type border_type = bld->texel_type;
187 border_type.length = 4;
188 /*
189 * Only replace channels which are actually present. The others should
190 * get optimized away eventually by sampler_view swizzle anyway but it's
191 * easier too.
192 */
193 for (chan = 0; chan < 4; chan++) {
194 unsigned chan_s;
195 /* reverse-map channel... */
196 for (chan_s = 0; chan_s < 4; chan_s++) {
197 if (chan_s == format_desc->swizzle[chan]) {
198 break;
199 }
200 }
201 if (chan_s <= 3) {
202 /* use the already clamped color */
203 LLVMValueRef idx = lp_build_const_int32(bld->gallivm, chan);
204 LLVMValueRef border_chan;
205
206 border_chan = lp_build_extract_broadcast(bld->gallivm,
207 border_type,
208 bld->texel_type,
209 bld->border_color_clamped,
210 idx);
211 texel_out[chan] = lp_build_select(&bld->texel_bld, use_border,
212 border_chan, texel_out[chan]);
213 }
214 }
215 }
216 }
217
218
219 /**
220 * Helper to compute the mirror function for the PIPE_WRAP_MIRROR modes.
221 */
222 static LLVMValueRef
223 lp_build_coord_mirror(struct lp_build_sample_context *bld,
224 LLVMValueRef coord)
225 {
226 struct lp_build_context *coord_bld = &bld->coord_bld;
227 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
228 LLVMValueRef fract, flr, isOdd;
229
230 lp_build_ifloor_fract(coord_bld, coord, &flr, &fract);
231 /* kill off NaNs */
232 /* XXX: not safe without arch rounding, fract can be anything. */
233 fract = lp_build_max_ext(coord_bld, fract, coord_bld->zero,
234 GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN);
235
236 /* isOdd = flr & 1 */
237 isOdd = LLVMBuildAnd(bld->gallivm->builder, flr, int_coord_bld->one, "");
238
239 /* make coord positive or negative depending on isOdd */
240 /* XXX slight overkill masking out sign bit is unnecessary */
241 coord = lp_build_set_sign(coord_bld, fract, isOdd);
242
243 /* convert isOdd to float */
244 isOdd = lp_build_int_to_float(coord_bld, isOdd);
245
246 /* add isOdd to coord */
247 coord = lp_build_add(coord_bld, coord, isOdd);
248
249 return coord;
250 }
251
252
253 /**
254 * Helper to compute the first coord and the weight for
255 * linear wrap repeat npot textures
256 */
257 void
258 lp_build_coord_repeat_npot_linear(struct lp_build_sample_context *bld,
259 LLVMValueRef coord_f,
260 LLVMValueRef length_i,
261 LLVMValueRef length_f,
262 LLVMValueRef *coord0_i,
263 LLVMValueRef *weight_f)
264 {
265 struct lp_build_context *coord_bld = &bld->coord_bld;
266 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
267 LLVMValueRef half = lp_build_const_vec(bld->gallivm, coord_bld->type, 0.5);
268 LLVMValueRef length_minus_one = lp_build_sub(int_coord_bld, length_i,
269 int_coord_bld->one);
270 LLVMValueRef mask;
271 /* wrap with normalized floats is just fract */
272 coord_f = lp_build_fract(coord_bld, coord_f);
273 /* mul by size and subtract 0.5 */
274 coord_f = lp_build_mul(coord_bld, coord_f, length_f);
275 coord_f = lp_build_sub(coord_bld, coord_f, half);
276 /*
277 * we avoided the 0.5/length division before the repeat wrap,
278 * now need to fix up edge cases with selects
279 */
280 /*
281 * Note we do a float (unordered) compare so we can eliminate NaNs.
282 * (Otherwise would need fract_safe above).
283 */
284 mask = lp_build_compare(coord_bld->gallivm, coord_bld->type,
285 PIPE_FUNC_LESS, coord_f, coord_bld->zero);
286
287 /* convert to int, compute lerp weight */
288 lp_build_ifloor_fract(coord_bld, coord_f, coord0_i, weight_f);
289 *coord0_i = lp_build_select(int_coord_bld, mask, length_minus_one, *coord0_i);
290 }
291
292
293 /**
294 * Build LLVM code for texture wrap mode for linear filtering.
295 * \param x0_out returns first integer texcoord
296 * \param x1_out returns second integer texcoord
297 * \param weight_out returns linear interpolation weight
298 */
299 static void
300 lp_build_sample_wrap_linear(struct lp_build_sample_context *bld,
301 LLVMValueRef coord,
302 LLVMValueRef length,
303 LLVMValueRef length_f,
304 LLVMValueRef offset,
305 boolean is_pot,
306 unsigned wrap_mode,
307 LLVMValueRef *x0_out,
308 LLVMValueRef *x1_out,
309 LLVMValueRef *weight_out)
310 {
311 struct lp_build_context *coord_bld = &bld->coord_bld;
312 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
313 LLVMBuilderRef builder = bld->gallivm->builder;
314 LLVMValueRef half = lp_build_const_vec(bld->gallivm, coord_bld->type, 0.5);
315 LLVMValueRef length_minus_one = lp_build_sub(int_coord_bld, length, int_coord_bld->one);
316 LLVMValueRef coord0, coord1, weight;
317
318 switch(wrap_mode) {
319 case PIPE_TEX_WRAP_REPEAT:
320 if (is_pot) {
321 /* mul by size and subtract 0.5 */
322 coord = lp_build_mul(coord_bld, coord, length_f);
323 coord = lp_build_sub(coord_bld, coord, half);
324 if (offset) {
325 offset = lp_build_int_to_float(coord_bld, offset);
326 coord = lp_build_add(coord_bld, coord, offset);
327 }
328 /* convert to int, compute lerp weight */
329 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
330 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
331 /* repeat wrap */
332 coord0 = LLVMBuildAnd(builder, coord0, length_minus_one, "");
333 coord1 = LLVMBuildAnd(builder, coord1, length_minus_one, "");
334 }
335 else {
336 LLVMValueRef mask;
337 if (offset) {
338 offset = lp_build_int_to_float(coord_bld, offset);
339 offset = lp_build_div(coord_bld, offset, length_f);
340 coord = lp_build_add(coord_bld, coord, offset);
341 }
342 lp_build_coord_repeat_npot_linear(bld, coord,
343 length, length_f,
344 &coord0, &weight);
345 mask = lp_build_compare(int_coord_bld->gallivm, int_coord_bld->type,
346 PIPE_FUNC_NOTEQUAL, coord0, length_minus_one);
347 coord1 = LLVMBuildAnd(builder,
348 lp_build_add(int_coord_bld, coord0, int_coord_bld->one),
349 mask, "");
350 }
351 break;
352
353 case PIPE_TEX_WRAP_CLAMP:
354 if (bld->static_sampler_state->normalized_coords) {
355 /* scale coord to length */
356 coord = lp_build_mul(coord_bld, coord, length_f);
357 }
358 if (offset) {
359 offset = lp_build_int_to_float(coord_bld, offset);
360 coord = lp_build_add(coord_bld, coord, offset);
361 }
362
363 /* clamp to [0, length] */
364 coord = lp_build_clamp(coord_bld, coord, coord_bld->zero, length_f);
365
366 coord = lp_build_sub(coord_bld, coord, half);
367
368 /* convert to int, compute lerp weight */
369 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
370 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
371 break;
372
373 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
374 {
375 struct lp_build_context abs_coord_bld = bld->coord_bld;
376 abs_coord_bld.type.sign = FALSE;
377
378 if (bld->static_sampler_state->normalized_coords) {
379 /* mul by tex size */
380 coord = lp_build_mul(coord_bld, coord, length_f);
381 }
382 if (offset) {
383 offset = lp_build_int_to_float(coord_bld, offset);
384 coord = lp_build_add(coord_bld, coord, offset);
385 }
386
387 /* clamp to length max */
388 coord = lp_build_min_ext(coord_bld, coord, length_f,
389 GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN);
390 /* subtract 0.5 */
391 coord = lp_build_sub(coord_bld, coord, half);
392 /* clamp to [0, length - 0.5] */
393 coord = lp_build_max(coord_bld, coord, coord_bld->zero);
394 /* convert to int, compute lerp weight */
395 lp_build_ifloor_fract(&abs_coord_bld, coord, &coord0, &weight);
396 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
397 /* coord1 = min(coord1, length-1) */
398 coord1 = lp_build_min(int_coord_bld, coord1, length_minus_one);
399 break;
400 }
401
402 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
403 if (bld->static_sampler_state->normalized_coords) {
404 /* scale coord to length */
405 coord = lp_build_mul(coord_bld, coord, length_f);
406 }
407 if (offset) {
408 offset = lp_build_int_to_float(coord_bld, offset);
409 coord = lp_build_add(coord_bld, coord, offset);
410 }
411 /* was: clamp to [-0.5, length + 0.5], then sub 0.5 */
412 /* can skip clamp (though might not work for very large coord values) */
413 coord = lp_build_sub(coord_bld, coord, half);
414 /* convert to int, compute lerp weight */
415 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
416 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
417 break;
418
419 case PIPE_TEX_WRAP_MIRROR_REPEAT:
420 if (offset) {
421 offset = lp_build_int_to_float(coord_bld, offset);
422 offset = lp_build_div(coord_bld, offset, length_f);
423 coord = lp_build_add(coord_bld, coord, offset);
424 }
425 /* compute mirror function */
426 coord = lp_build_coord_mirror(bld, coord);
427
428 /* scale coord to length */
429 coord = lp_build_mul(coord_bld, coord, length_f);
430 coord = lp_build_sub(coord_bld, coord, half);
431
432 /* convert to int, compute lerp weight */
433 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
434 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
435
436 /* coord0 = max(coord0, 0) */
437 coord0 = lp_build_max(int_coord_bld, coord0, int_coord_bld->zero);
438 /* coord1 = min(coord1, length-1) */
439 coord1 = lp_build_min(int_coord_bld, coord1, length_minus_one);
440 break;
441
442 case PIPE_TEX_WRAP_MIRROR_CLAMP:
443 if (bld->static_sampler_state->normalized_coords) {
444 /* scale coord to length */
445 coord = lp_build_mul(coord_bld, coord, length_f);
446 }
447 if (offset) {
448 offset = lp_build_int_to_float(coord_bld, offset);
449 coord = lp_build_add(coord_bld, coord, offset);
450 }
451 coord = lp_build_abs(coord_bld, coord);
452
453 /* clamp to [0, length] */
454 coord = lp_build_min(coord_bld, coord, length_f);
455
456 coord = lp_build_sub(coord_bld, coord, half);
457
458 /* convert to int, compute lerp weight */
459 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
460 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
461 break;
462
463 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
464 {
465 struct lp_build_context abs_coord_bld = bld->coord_bld;
466 abs_coord_bld.type.sign = FALSE;
467
468 if (bld->static_sampler_state->normalized_coords) {
469 /* scale coord to length */
470 coord = lp_build_mul(coord_bld, coord, length_f);
471 }
472 if (offset) {
473 offset = lp_build_int_to_float(coord_bld, offset);
474 coord = lp_build_add(coord_bld, coord, offset);
475 }
476 coord = lp_build_abs(coord_bld, coord);
477
478 /* clamp to length max */
479 coord = lp_build_min_ext(coord_bld, coord, length_f,
480 GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN);
481 /* subtract 0.5 */
482 coord = lp_build_sub(coord_bld, coord, half);
483 /* clamp to [0, length - 0.5] */
484 coord = lp_build_max(coord_bld, coord, coord_bld->zero);
485
486 /* convert to int, compute lerp weight */
487 lp_build_ifloor_fract(&abs_coord_bld, coord, &coord0, &weight);
488 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
489 /* coord1 = min(coord1, length-1) */
490 coord1 = lp_build_min(int_coord_bld, coord1, length_minus_one);
491 }
492 break;
493
494 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
495 {
496 if (bld->static_sampler_state->normalized_coords) {
497 /* scale coord to length */
498 coord = lp_build_mul(coord_bld, coord, length_f);
499 }
500 if (offset) {
501 offset = lp_build_int_to_float(coord_bld, offset);
502 coord = lp_build_add(coord_bld, coord, offset);
503 }
504 coord = lp_build_abs(coord_bld, coord);
505
506 /* was: clamp to [-0.5, length + 0.5] then sub 0.5 */
507 /* skip clamp - always positive, and other side
508 only potentially matters for very large coords */
509 coord = lp_build_sub(coord_bld, coord, half);
510
511 /* convert to int, compute lerp weight */
512 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
513 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
514 }
515 break;
516
517 default:
518 assert(0);
519 coord0 = NULL;
520 coord1 = NULL;
521 weight = NULL;
522 }
523
524 *x0_out = coord0;
525 *x1_out = coord1;
526 *weight_out = weight;
527 }
528
529
530 /**
531 * Build LLVM code for texture wrap mode for nearest filtering.
532 * \param coord the incoming texcoord (nominally in [0,1])
533 * \param length the texture size along one dimension, as int vector
534 * \param length_f the texture size along one dimension, as float vector
535 * \param offset texel offset along one dimension (as int vector)
536 * \param is_pot if TRUE, length is a power of two
537 * \param wrap_mode one of PIPE_TEX_WRAP_x
538 */
539 static LLVMValueRef
540 lp_build_sample_wrap_nearest(struct lp_build_sample_context *bld,
541 LLVMValueRef coord,
542 LLVMValueRef length,
543 LLVMValueRef length_f,
544 LLVMValueRef offset,
545 boolean is_pot,
546 unsigned wrap_mode)
547 {
548 struct lp_build_context *coord_bld = &bld->coord_bld;
549 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
550 LLVMBuilderRef builder = bld->gallivm->builder;
551 LLVMValueRef length_minus_one = lp_build_sub(int_coord_bld, length, int_coord_bld->one);
552 LLVMValueRef icoord;
553
554 switch(wrap_mode) {
555 case PIPE_TEX_WRAP_REPEAT:
556 if (is_pot) {
557 coord = lp_build_mul(coord_bld, coord, length_f);
558 icoord = lp_build_ifloor(coord_bld, coord);
559 if (offset) {
560 icoord = lp_build_add(int_coord_bld, icoord, offset);
561 }
562 icoord = LLVMBuildAnd(builder, icoord, length_minus_one, "");
563 }
564 else {
565 if (offset) {
566 offset = lp_build_int_to_float(coord_bld, offset);
567 offset = lp_build_div(coord_bld, offset, length_f);
568 coord = lp_build_add(coord_bld, coord, offset);
569 }
570 /* take fraction, unnormalize */
571 coord = lp_build_fract_safe(coord_bld, coord);
572 coord = lp_build_mul(coord_bld, coord, length_f);
573 icoord = lp_build_itrunc(coord_bld, coord);
574 }
575 break;
576
577 case PIPE_TEX_WRAP_CLAMP:
578 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
579 if (bld->static_sampler_state->normalized_coords) {
580 /* scale coord to length */
581 coord = lp_build_mul(coord_bld, coord, length_f);
582 }
583
584 if (offset) {
585 offset = lp_build_int_to_float(coord_bld, offset);
586 coord = lp_build_add(coord_bld, coord, offset);
587 }
588 /* floor */
589 /* use itrunc instead since we clamp to 0 anyway */
590 icoord = lp_build_itrunc(coord_bld, coord);
591
592 /* clamp to [0, length - 1]. */
593 icoord = lp_build_clamp(int_coord_bld, icoord, int_coord_bld->zero,
594 length_minus_one);
595 break;
596
597 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
598 if (bld->static_sampler_state->normalized_coords) {
599 /* scale coord to length */
600 coord = lp_build_mul(coord_bld, coord, length_f);
601 }
602 /* no clamp necessary, border masking will handle this */
603 icoord = lp_build_ifloor(coord_bld, coord);
604 if (offset) {
605 icoord = lp_build_add(int_coord_bld, icoord, offset);
606 }
607 break;
608
609 case PIPE_TEX_WRAP_MIRROR_REPEAT:
610 if (offset) {
611 offset = lp_build_int_to_float(coord_bld, offset);
612 offset = lp_build_div(coord_bld, offset, length_f);
613 coord = lp_build_add(coord_bld, coord, offset);
614 }
615 /* compute mirror function */
616 coord = lp_build_coord_mirror(bld, coord);
617
618 /* scale coord to length */
619 assert(bld->static_sampler_state->normalized_coords);
620 coord = lp_build_mul(coord_bld, coord, length_f);
621
622 /* itrunc == ifloor here */
623 icoord = lp_build_itrunc(coord_bld, coord);
624
625 /* clamp to [0, length - 1] */
626 icoord = lp_build_min(int_coord_bld, icoord, length_minus_one);
627 break;
628
629 case PIPE_TEX_WRAP_MIRROR_CLAMP:
630 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
631 if (bld->static_sampler_state->normalized_coords) {
632 /* scale coord to length */
633 coord = lp_build_mul(coord_bld, coord, length_f);
634 }
635 if (offset) {
636 offset = lp_build_int_to_float(coord_bld, offset);
637 coord = lp_build_add(coord_bld, coord, offset);
638 }
639 coord = lp_build_abs(coord_bld, coord);
640
641 /* itrunc == ifloor here */
642 icoord = lp_build_itrunc(coord_bld, coord);
643 /*
644 * Use unsigned min due to possible undef values (NaNs, overflow)
645 */
646 {
647 struct lp_build_context abs_coord_bld = *int_coord_bld;
648 abs_coord_bld.type.sign = FALSE;
649 /* clamp to [0, length - 1] */
650 icoord = lp_build_min(&abs_coord_bld, icoord, length_minus_one);
651 }
652 break;
653
654 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
655 if (bld->static_sampler_state->normalized_coords) {
656 /* scale coord to length */
657 coord = lp_build_mul(coord_bld, coord, length_f);
658 }
659 if (offset) {
660 offset = lp_build_int_to_float(coord_bld, offset);
661 coord = lp_build_add(coord_bld, coord, offset);
662 }
663 coord = lp_build_abs(coord_bld, coord);
664
665 /* itrunc == ifloor here */
666 icoord = lp_build_itrunc(coord_bld, coord);
667 break;
668
669 default:
670 assert(0);
671 icoord = NULL;
672 }
673
674 return icoord;
675 }
676
677
678 /**
679 * Do shadow test/comparison.
680 * \param p shadow ref value
681 * \param texel the texel to compare against
682 */
683 static LLVMValueRef
684 lp_build_sample_comparefunc(struct lp_build_sample_context *bld,
685 LLVMValueRef p,
686 LLVMValueRef texel)
687 {
688 struct lp_build_context *texel_bld = &bld->texel_bld;
689 LLVMValueRef res;
690
691 if (0) {
692 //lp_build_print_value(bld->gallivm, "shadow cmp coord", p);
693 lp_build_print_value(bld->gallivm, "shadow cmp texel", texel);
694 }
695
696 /* result = (p FUNC texel) ? 1 : 0 */
697 /*
698 * honor d3d10 floating point rules here, which state that comparisons
699 * are ordered except NOT_EQUAL which is unordered.
700 */
701 if (bld->static_sampler_state->compare_func != PIPE_FUNC_NOTEQUAL) {
702 res = lp_build_cmp_ordered(texel_bld, bld->static_sampler_state->compare_func,
703 p, texel);
704 }
705 else {
706 res = lp_build_cmp(texel_bld, bld->static_sampler_state->compare_func,
707 p, texel);
708 }
709 return res;
710 }
711
712
713 /**
714 * Generate code to sample a mipmap level with nearest filtering.
715 * If sampling a cube texture, r = cube face in [0,5].
716 */
717 static void
718 lp_build_sample_image_nearest(struct lp_build_sample_context *bld,
719 LLVMValueRef size,
720 LLVMValueRef row_stride_vec,
721 LLVMValueRef img_stride_vec,
722 LLVMValueRef data_ptr,
723 LLVMValueRef mipoffsets,
724 LLVMValueRef *coords,
725 const LLVMValueRef *offsets,
726 LLVMValueRef colors_out[4])
727 {
728 const unsigned dims = bld->dims;
729 LLVMValueRef width_vec;
730 LLVMValueRef height_vec;
731 LLVMValueRef depth_vec;
732 LLVMValueRef flt_size;
733 LLVMValueRef flt_width_vec;
734 LLVMValueRef flt_height_vec;
735 LLVMValueRef flt_depth_vec;
736 LLVMValueRef x, y = NULL, z = NULL;
737
738 lp_build_extract_image_sizes(bld,
739 &bld->int_size_bld,
740 bld->int_coord_type,
741 size,
742 &width_vec, &height_vec, &depth_vec);
743
744 flt_size = lp_build_int_to_float(&bld->float_size_bld, size);
745
746 lp_build_extract_image_sizes(bld,
747 &bld->float_size_bld,
748 bld->coord_type,
749 flt_size,
750 &flt_width_vec, &flt_height_vec, &flt_depth_vec);
751
752 /*
753 * Compute integer texcoords.
754 */
755 x = lp_build_sample_wrap_nearest(bld, coords[0], width_vec,
756 flt_width_vec, offsets[0],
757 bld->static_texture_state->pot_width,
758 bld->static_sampler_state->wrap_s);
759 lp_build_name(x, "tex.x.wrapped");
760
761 if (dims >= 2) {
762 y = lp_build_sample_wrap_nearest(bld, coords[1], height_vec,
763 flt_height_vec, offsets[1],
764 bld->static_texture_state->pot_height,
765 bld->static_sampler_state->wrap_t);
766 lp_build_name(y, "tex.y.wrapped");
767
768 if (dims == 3) {
769 z = lp_build_sample_wrap_nearest(bld, coords[2], depth_vec,
770 flt_depth_vec, offsets[2],
771 bld->static_texture_state->pot_depth,
772 bld->static_sampler_state->wrap_r);
773 lp_build_name(z, "tex.z.wrapped");
774 }
775 }
776 if (has_layer_coord(bld->static_texture_state->target)) {
777 if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) {
778 /* add cube layer to face */
779 z = lp_build_add(&bld->int_coord_bld, coords[2], coords[3]);
780 }
781 else {
782 z = coords[2];
783 }
784 lp_build_name(z, "tex.z.layer");
785 }
786
787 /*
788 * Get texture colors.
789 */
790 lp_build_sample_texel_soa(bld,
791 width_vec, height_vec, depth_vec,
792 x, y, z,
793 row_stride_vec, img_stride_vec,
794 data_ptr, mipoffsets, colors_out);
795
796 if (bld->static_sampler_state->compare_mode != PIPE_TEX_COMPARE_NONE) {
797 LLVMValueRef cmpval;
798 cmpval = lp_build_sample_comparefunc(bld, coords[4], colors_out[0]);
799 /* this is really just a AND 1.0, cmpval but llvm is clever enough */
800 colors_out[0] = lp_build_select(&bld->texel_bld, cmpval,
801 bld->texel_bld.one, bld->texel_bld.zero);
802 colors_out[1] = colors_out[2] = colors_out[3] = colors_out[0];
803 }
804
805 }
806
807
808 /**
809 * Like a lerp, but inputs are 0/~0 masks, so can simplify slightly.
810 */
811 static LLVMValueRef
812 lp_build_masklerp(struct lp_build_context *bld,
813 LLVMValueRef weight,
814 LLVMValueRef mask0,
815 LLVMValueRef mask1)
816 {
817 struct gallivm_state *gallivm = bld->gallivm;
818 LLVMBuilderRef builder = gallivm->builder;
819 LLVMValueRef weight2;
820
821 weight2 = lp_build_sub(bld, bld->one, weight);
822 weight = LLVMBuildBitCast(builder, weight,
823 lp_build_int_vec_type(gallivm, bld->type), "");
824 weight2 = LLVMBuildBitCast(builder, weight2,
825 lp_build_int_vec_type(gallivm, bld->type), "");
826 weight = LLVMBuildAnd(builder, weight, mask1, "");
827 weight2 = LLVMBuildAnd(builder, weight2, mask0, "");
828 weight = LLVMBuildBitCast(builder, weight, bld->vec_type, "");
829 weight2 = LLVMBuildBitCast(builder, weight2, bld->vec_type, "");
830 return lp_build_add(bld, weight, weight2);
831 }
832
833 /**
834 * Like a 2d lerp, but inputs are 0/~0 masks, so can simplify slightly.
835 */
836 static LLVMValueRef
837 lp_build_masklerp2d(struct lp_build_context *bld,
838 LLVMValueRef weight0,
839 LLVMValueRef weight1,
840 LLVMValueRef mask00,
841 LLVMValueRef mask01,
842 LLVMValueRef mask10,
843 LLVMValueRef mask11)
844 {
845 LLVMValueRef val0 = lp_build_masklerp(bld, weight0, mask00, mask01);
846 LLVMValueRef val1 = lp_build_masklerp(bld, weight0, mask10, mask11);
847 return lp_build_lerp(bld, weight1, val0, val1, 0);
848 }
849
850 /*
851 * this is a bit excessive code for something OpenGL just recommends
852 * but does not require.
853 */
854 #define ACCURATE_CUBE_CORNERS 1
855
856 /**
857 * Generate code to sample a mipmap level with linear filtering.
858 * If sampling a cube texture, r = cube face in [0,5].
859 * If linear_mask is present, only pixels having their mask set
860 * will receive linear filtering, the rest will use nearest.
861 */
862 static void
863 lp_build_sample_image_linear(struct lp_build_sample_context *bld,
864 boolean is_gather,
865 LLVMValueRef size,
866 LLVMValueRef linear_mask,
867 LLVMValueRef row_stride_vec,
868 LLVMValueRef img_stride_vec,
869 LLVMValueRef data_ptr,
870 LLVMValueRef mipoffsets,
871 LLVMValueRef *coords,
872 const LLVMValueRef *offsets,
873 LLVMValueRef colors_out[4])
874 {
875 LLVMBuilderRef builder = bld->gallivm->builder;
876 struct lp_build_context *ivec_bld = &bld->int_coord_bld;
877 struct lp_build_context *coord_bld = &bld->coord_bld;
878 struct lp_build_context *texel_bld = &bld->texel_bld;
879 const unsigned dims = bld->dims;
880 LLVMValueRef width_vec;
881 LLVMValueRef height_vec;
882 LLVMValueRef depth_vec;
883 LLVMValueRef flt_size;
884 LLVMValueRef flt_width_vec;
885 LLVMValueRef flt_height_vec;
886 LLVMValueRef flt_depth_vec;
887 LLVMValueRef fall_off[4], have_corners;
888 LLVMValueRef z1 = NULL;
889 LLVMValueRef z00 = NULL, z01 = NULL, z10 = NULL, z11 = NULL;
890 LLVMValueRef x00 = NULL, x01 = NULL, x10 = NULL, x11 = NULL;
891 LLVMValueRef y00 = NULL, y01 = NULL, y10 = NULL, y11 = NULL;
892 LLVMValueRef s_fpart, t_fpart = NULL, r_fpart = NULL;
893 LLVMValueRef xs[4], ys[4], zs[4];
894 LLVMValueRef neighbors[2][2][4];
895 int chan, texel_index;
896 boolean seamless_cube_filter, accurate_cube_corners;
897
898 seamless_cube_filter = (bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
899 bld->static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) &&
900 bld->static_sampler_state->seamless_cube_map;
901 /*
902 * XXX I don't know how this is really supposed to work with gather. From GL
903 * spec wording (not gather specific) it sounds like the 4th missing texel
904 * should be an average of the other 3, hence for gather could return this.
905 * This is however NOT how the code here works, which just fixes up the
906 * weights used for filtering instead. And of course for gather there is
907 * no filter to tweak...
908 */
909 accurate_cube_corners = ACCURATE_CUBE_CORNERS && seamless_cube_filter &&
910 !is_gather;
911
912 lp_build_extract_image_sizes(bld,
913 &bld->int_size_bld,
914 bld->int_coord_type,
915 size,
916 &width_vec, &height_vec, &depth_vec);
917
918 flt_size = lp_build_int_to_float(&bld->float_size_bld, size);
919
920 lp_build_extract_image_sizes(bld,
921 &bld->float_size_bld,
922 bld->coord_type,
923 flt_size,
924 &flt_width_vec, &flt_height_vec, &flt_depth_vec);
925
926 /*
927 * Compute integer texcoords.
928 */
929
930 if (!seamless_cube_filter) {
931 lp_build_sample_wrap_linear(bld, coords[0], width_vec,
932 flt_width_vec, offsets[0],
933 bld->static_texture_state->pot_width,
934 bld->static_sampler_state->wrap_s,
935 &x00, &x01, &s_fpart);
936 lp_build_name(x00, "tex.x0.wrapped");
937 lp_build_name(x01, "tex.x1.wrapped");
938 x10 = x00;
939 x11 = x01;
940
941 if (dims >= 2) {
942 lp_build_sample_wrap_linear(bld, coords[1], height_vec,
943 flt_height_vec, offsets[1],
944 bld->static_texture_state->pot_height,
945 bld->static_sampler_state->wrap_t,
946 &y00, &y10, &t_fpart);
947 lp_build_name(y00, "tex.y0.wrapped");
948 lp_build_name(y10, "tex.y1.wrapped");
949 y01 = y00;
950 y11 = y10;
951
952 if (dims == 3) {
953 lp_build_sample_wrap_linear(bld, coords[2], depth_vec,
954 flt_depth_vec, offsets[2],
955 bld->static_texture_state->pot_depth,
956 bld->static_sampler_state->wrap_r,
957 &z00, &z1, &r_fpart);
958 z01 = z10 = z11 = z00;
959 lp_build_name(z00, "tex.z0.wrapped");
960 lp_build_name(z1, "tex.z1.wrapped");
961 }
962 }
963 if (has_layer_coord(bld->static_texture_state->target)) {
964 if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) {
965 /* add cube layer to face */
966 z00 = z01 = z10 = z11 = z1 =
967 lp_build_add(&bld->int_coord_bld, coords[2], coords[3]);
968 }
969 else {
970 z00 = z01 = z10 = z11 = z1 = coords[2]; /* cube face or layer */
971 }
972 lp_build_name(z00, "tex.z0.layer");
973 lp_build_name(z1, "tex.z1.layer");
974 }
975 }
976 else {
977 struct lp_build_if_state edge_if;
978 LLVMTypeRef int1t;
979 LLVMValueRef new_faces[4], new_xcoords[4][2], new_ycoords[4][2];
980 LLVMValueRef coord, have_edge, have_corner;
981 LLVMValueRef fall_off_ym_notxm, fall_off_ym_notxp, fall_off_x, fall_off_y;
982 LLVMValueRef fall_off_yp_notxm, fall_off_yp_notxp;
983 LLVMValueRef x0, x1, y0, y1, y0_clamped, y1_clamped;
984 LLVMValueRef face = coords[2];
985 LLVMValueRef half = lp_build_const_vec(bld->gallivm, coord_bld->type, 0.5f);
986 LLVMValueRef length_minus_one = lp_build_sub(ivec_bld, width_vec, ivec_bld->one);
987 /* XXX drop height calcs. Could (should) do this without seamless filtering too */
988 height_vec = width_vec;
989 flt_height_vec = flt_width_vec;
990
991 /* XXX the overflow logic is actually sort of duplicated with trilinear,
992 * since an overflow in one mip should also have a corresponding overflow
993 * in another.
994 */
995 /* should always have normalized coords, and offsets are undefined */
996 assert(bld->static_sampler_state->normalized_coords);
997 coord = lp_build_mul(coord_bld, coords[0], flt_width_vec);
998 /* instead of clamp, build mask if overflowed */
999 coord = lp_build_sub(coord_bld, coord, half);
1000 /* convert to int, compute lerp weight */
1001 /* not ideal with AVX (and no AVX2) */
1002 lp_build_ifloor_fract(coord_bld, coord, &x0, &s_fpart);
1003 x1 = lp_build_add(ivec_bld, x0, ivec_bld->one);
1004 coord = lp_build_mul(coord_bld, coords[1], flt_height_vec);
1005 coord = lp_build_sub(coord_bld, coord, half);
1006 lp_build_ifloor_fract(coord_bld, coord, &y0, &t_fpart);
1007 y1 = lp_build_add(ivec_bld, y0, ivec_bld->one);
1008
1009 fall_off[0] = lp_build_cmp(ivec_bld, PIPE_FUNC_LESS, x0, ivec_bld->zero);
1010 fall_off[1] = lp_build_cmp(ivec_bld, PIPE_FUNC_GREATER, x1, length_minus_one);
1011 fall_off[2] = lp_build_cmp(ivec_bld, PIPE_FUNC_LESS, y0, ivec_bld->zero);
1012 fall_off[3] = lp_build_cmp(ivec_bld, PIPE_FUNC_GREATER, y1, length_minus_one);
1013
1014 fall_off_x = lp_build_or(ivec_bld, fall_off[0], fall_off[1]);
1015 fall_off_y = lp_build_or(ivec_bld, fall_off[2], fall_off[3]);
1016 have_edge = lp_build_or(ivec_bld, fall_off_x, fall_off_y);
1017 have_edge = lp_build_any_true_range(ivec_bld, ivec_bld->type.length, have_edge);
1018
1019 /* needed for accurate corner filtering branch later, rely on 0 init */
1020 int1t = LLVMInt1TypeInContext(bld->gallivm->context);
1021 have_corners = lp_build_alloca(bld->gallivm, int1t, "have_corner");
1022
1023 for (texel_index = 0; texel_index < 4; texel_index++) {
1024 xs[texel_index] = lp_build_alloca(bld->gallivm, ivec_bld->vec_type, "xs");
1025 ys[texel_index] = lp_build_alloca(bld->gallivm, ivec_bld->vec_type, "ys");
1026 zs[texel_index] = lp_build_alloca(bld->gallivm, ivec_bld->vec_type, "zs");
1027 }
1028
1029 lp_build_if(&edge_if, bld->gallivm, have_edge);
1030
1031 have_corner = lp_build_and(ivec_bld, fall_off_x, fall_off_y);
1032 have_corner = lp_build_any_true_range(ivec_bld, ivec_bld->type.length, have_corner);
1033 LLVMBuildStore(builder, have_corner, have_corners);
1034
1035 /*
1036 * Need to feed clamped values here for cheap corner handling,
1037 * but only for y coord (as when falling off both edges we only
1038 * fall off the x one) - this should be sufficient.
1039 */
1040 y0_clamped = lp_build_max(ivec_bld, y0, ivec_bld->zero);
1041 y1_clamped = lp_build_min(ivec_bld, y1, length_minus_one);
1042
1043 /*
1044 * Get all possible new coords.
1045 */
1046 lp_build_cube_new_coords(ivec_bld, face,
1047 x0, x1, y0_clamped, y1_clamped,
1048 length_minus_one,
1049 new_faces, new_xcoords, new_ycoords);
1050
1051 /* handle fall off x-, x+ direction */
1052 /* determine new coords, face (not both fall_off vars can be true at same time) */
1053 x00 = lp_build_select(ivec_bld, fall_off[0], new_xcoords[0][0], x0);
1054 y00 = lp_build_select(ivec_bld, fall_off[0], new_ycoords[0][0], y0_clamped);
1055 x10 = lp_build_select(ivec_bld, fall_off[0], new_xcoords[0][1], x0);
1056 y10 = lp_build_select(ivec_bld, fall_off[0], new_ycoords[0][1], y1_clamped);
1057 x01 = lp_build_select(ivec_bld, fall_off[1], new_xcoords[1][0], x1);
1058 y01 = lp_build_select(ivec_bld, fall_off[1], new_ycoords[1][0], y0_clamped);
1059 x11 = lp_build_select(ivec_bld, fall_off[1], new_xcoords[1][1], x1);
1060 y11 = lp_build_select(ivec_bld, fall_off[1], new_ycoords[1][1], y1_clamped);
1061
1062 z00 = z10 = lp_build_select(ivec_bld, fall_off[0], new_faces[0], face);
1063 z01 = z11 = lp_build_select(ivec_bld, fall_off[1], new_faces[1], face);
1064
1065 /* handle fall off y-, y+ direction */
1066 /*
1067 * Cheap corner logic: just hack up things so a texel doesn't fall
1068 * off both sides (which means filter weights will be wrong but we'll only
1069 * use valid texels in the filter).
1070 * This means however (y) coords must additionally be clamped (see above).
1071 * This corner handling should be fully OpenGL (but not d3d10) compliant.
1072 */
1073 fall_off_ym_notxm = lp_build_andnot(ivec_bld, fall_off[2], fall_off[0]);
1074 fall_off_ym_notxp = lp_build_andnot(ivec_bld, fall_off[2], fall_off[1]);
1075 fall_off_yp_notxm = lp_build_andnot(ivec_bld, fall_off[3], fall_off[0]);
1076 fall_off_yp_notxp = lp_build_andnot(ivec_bld, fall_off[3], fall_off[1]);
1077
1078 x00 = lp_build_select(ivec_bld, fall_off_ym_notxm, new_xcoords[2][0], x00);
1079 y00 = lp_build_select(ivec_bld, fall_off_ym_notxm, new_ycoords[2][0], y00);
1080 x01 = lp_build_select(ivec_bld, fall_off_ym_notxp, new_xcoords[2][1], x01);
1081 y01 = lp_build_select(ivec_bld, fall_off_ym_notxp, new_ycoords[2][1], y01);
1082 x10 = lp_build_select(ivec_bld, fall_off_yp_notxm, new_xcoords[3][0], x10);
1083 y10 = lp_build_select(ivec_bld, fall_off_yp_notxm, new_ycoords[3][0], y10);
1084 x11 = lp_build_select(ivec_bld, fall_off_yp_notxp, new_xcoords[3][1], x11);
1085 y11 = lp_build_select(ivec_bld, fall_off_yp_notxp, new_ycoords[3][1], y11);
1086
1087 z00 = lp_build_select(ivec_bld, fall_off_ym_notxm, new_faces[2], z00);
1088 z01 = lp_build_select(ivec_bld, fall_off_ym_notxp, new_faces[2], z01);
1089 z10 = lp_build_select(ivec_bld, fall_off_yp_notxm, new_faces[3], z10);
1090 z11 = lp_build_select(ivec_bld, fall_off_yp_notxp, new_faces[3], z11);
1091
1092 if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) {
1093 /* now can add cube layer to face (per sample) */
1094 z00 = lp_build_add(ivec_bld, z00, coords[3]);
1095 z01 = lp_build_add(ivec_bld, z01, coords[3]);
1096 z10 = lp_build_add(ivec_bld, z10, coords[3]);
1097 z11 = lp_build_add(ivec_bld, z11, coords[3]);
1098 }
1099
1100 LLVMBuildStore(builder, x00, xs[0]);
1101 LLVMBuildStore(builder, x01, xs[1]);
1102 LLVMBuildStore(builder, x10, xs[2]);
1103 LLVMBuildStore(builder, x11, xs[3]);
1104 LLVMBuildStore(builder, y00, ys[0]);
1105 LLVMBuildStore(builder, y01, ys[1]);
1106 LLVMBuildStore(builder, y10, ys[2]);
1107 LLVMBuildStore(builder, y11, ys[3]);
1108 LLVMBuildStore(builder, z00, zs[0]);
1109 LLVMBuildStore(builder, z01, zs[1]);
1110 LLVMBuildStore(builder, z10, zs[2]);
1111 LLVMBuildStore(builder, z11, zs[3]);
1112
1113 lp_build_else(&edge_if);
1114
1115 LLVMBuildStore(builder, x0, xs[0]);
1116 LLVMBuildStore(builder, x1, xs[1]);
1117 LLVMBuildStore(builder, x0, xs[2]);
1118 LLVMBuildStore(builder, x1, xs[3]);
1119 LLVMBuildStore(builder, y0, ys[0]);
1120 LLVMBuildStore(builder, y0, ys[1]);
1121 LLVMBuildStore(builder, y1, ys[2]);
1122 LLVMBuildStore(builder, y1, ys[3]);
1123 if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) {
1124 LLVMValueRef cube_layer = lp_build_add(ivec_bld, face, coords[3]);
1125 LLVMBuildStore(builder, cube_layer, zs[0]);
1126 LLVMBuildStore(builder, cube_layer, zs[1]);
1127 LLVMBuildStore(builder, cube_layer, zs[2]);
1128 LLVMBuildStore(builder, cube_layer, zs[3]);
1129 }
1130 else {
1131 LLVMBuildStore(builder, face, zs[0]);
1132 LLVMBuildStore(builder, face, zs[1]);
1133 LLVMBuildStore(builder, face, zs[2]);
1134 LLVMBuildStore(builder, face, zs[3]);
1135 }
1136
1137 lp_build_endif(&edge_if);
1138
1139 x00 = LLVMBuildLoad(builder, xs[0], "");
1140 x01 = LLVMBuildLoad(builder, xs[1], "");
1141 x10 = LLVMBuildLoad(builder, xs[2], "");
1142 x11 = LLVMBuildLoad(builder, xs[3], "");
1143 y00 = LLVMBuildLoad(builder, ys[0], "");
1144 y01 = LLVMBuildLoad(builder, ys[1], "");
1145 y10 = LLVMBuildLoad(builder, ys[2], "");
1146 y11 = LLVMBuildLoad(builder, ys[3], "");
1147 z00 = LLVMBuildLoad(builder, zs[0], "");
1148 z01 = LLVMBuildLoad(builder, zs[1], "");
1149 z10 = LLVMBuildLoad(builder, zs[2], "");
1150 z11 = LLVMBuildLoad(builder, zs[3], "");
1151 }
1152
1153 if (linear_mask) {
1154 /*
1155 * Whack filter weights into place. Whatever texel had more weight is
1156 * the one which should have been selected by nearest filtering hence
1157 * just use 100% weight for it.
1158 */
1159 struct lp_build_context *c_bld = &bld->coord_bld;
1160 LLVMValueRef w1_mask, w1_weight;
1161 LLVMValueRef half = lp_build_const_vec(bld->gallivm, c_bld->type, 0.5f);
1162
1163 w1_mask = lp_build_cmp(c_bld, PIPE_FUNC_GREATER, s_fpart, half);
1164 /* this select is really just a "and" */
1165 w1_weight = lp_build_select(c_bld, w1_mask, c_bld->one, c_bld->zero);
1166 s_fpart = lp_build_select(c_bld, linear_mask, s_fpart, w1_weight);
1167 if (dims >= 2) {
1168 w1_mask = lp_build_cmp(c_bld, PIPE_FUNC_GREATER, t_fpart, half);
1169 w1_weight = lp_build_select(c_bld, w1_mask, c_bld->one, c_bld->zero);
1170 t_fpart = lp_build_select(c_bld, linear_mask, t_fpart, w1_weight);
1171 if (dims == 3) {
1172 w1_mask = lp_build_cmp(c_bld, PIPE_FUNC_GREATER, r_fpart, half);
1173 w1_weight = lp_build_select(c_bld, w1_mask, c_bld->one, c_bld->zero);
1174 r_fpart = lp_build_select(c_bld, linear_mask, r_fpart, w1_weight);
1175 }
1176 }
1177 }
1178
1179 /*
1180 * Get texture colors.
1181 */
1182 /* get x0/x1 texels */
1183 lp_build_sample_texel_soa(bld,
1184 width_vec, height_vec, depth_vec,
1185 x00, y00, z00,
1186 row_stride_vec, img_stride_vec,
1187 data_ptr, mipoffsets, neighbors[0][0]);
1188 lp_build_sample_texel_soa(bld,
1189 width_vec, height_vec, depth_vec,
1190 x01, y01, z01,
1191 row_stride_vec, img_stride_vec,
1192 data_ptr, mipoffsets, neighbors[0][1]);
1193
1194 if (dims == 1) {
1195 assert(!is_gather);
1196 if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
1197 /* Interpolate two samples from 1D image to produce one color */
1198 for (chan = 0; chan < 4; chan++) {
1199 colors_out[chan] = lp_build_lerp(texel_bld, s_fpart,
1200 neighbors[0][0][chan],
1201 neighbors[0][1][chan],
1202 0);
1203 }
1204 }
1205 else {
1206 LLVMValueRef cmpval0, cmpval1;
1207 cmpval0 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][0][0]);
1208 cmpval1 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
1209 /* simplified lerp, AND mask with weight and add */
1210 colors_out[0] = lp_build_masklerp(texel_bld, s_fpart,
1211 cmpval0, cmpval1);
1212 colors_out[1] = colors_out[2] = colors_out[3] = colors_out[0];
1213 }
1214 }
1215 else {
1216 /* 2D/3D texture */
1217 struct lp_build_if_state corner_if;
1218 LLVMValueRef colors0[4], colorss[4];
1219
1220 /* get x0/x1 texels at y1 */
1221 lp_build_sample_texel_soa(bld,
1222 width_vec, height_vec, depth_vec,
1223 x10, y10, z10,
1224 row_stride_vec, img_stride_vec,
1225 data_ptr, mipoffsets, neighbors[1][0]);
1226 lp_build_sample_texel_soa(bld,
1227 width_vec, height_vec, depth_vec,
1228 x11, y11, z11,
1229 row_stride_vec, img_stride_vec,
1230 data_ptr, mipoffsets, neighbors[1][1]);
1231
1232 /*
1233 * To avoid having to duplicate linear_mask / fetch code use
1234 * another branch (with corner condition though edge would work
1235 * as well) here.
1236 */
1237 if (accurate_cube_corners) {
1238 LLVMValueRef w00, w01, w10, w11, wx0, wy0;
1239 LLVMValueRef c_weight, c00, c01, c10, c11;
1240 LLVMValueRef have_corner, one_third, tmp;
1241
1242 colorss[0] = lp_build_alloca(bld->gallivm, coord_bld->vec_type, "cs");
1243 colorss[1] = lp_build_alloca(bld->gallivm, coord_bld->vec_type, "cs");
1244 colorss[2] = lp_build_alloca(bld->gallivm, coord_bld->vec_type, "cs");
1245 colorss[3] = lp_build_alloca(bld->gallivm, coord_bld->vec_type, "cs");
1246
1247 have_corner = LLVMBuildLoad(builder, have_corners, "");
1248
1249 lp_build_if(&corner_if, bld->gallivm, have_corner);
1250
1251 /*
1252 * we can't use standard 2d lerp as we need per-element weight
1253 * in case of corners, so just calculate bilinear result as
1254 * w00*s00 + w01*s01 + w10*s10 + w11*s11.
1255 * (This is actually less work than using 2d lerp, 7 vs. 9 instructions,
1256 * however calculating the weights needs another 6, so actually probably
1257 * not slower than 2d lerp only for 4 channels as weights only need
1258 * to be calculated once - of course fixing the weights has additional cost.)
1259 */
1260 wx0 = lp_build_sub(coord_bld, coord_bld->one, s_fpart);
1261 wy0 = lp_build_sub(coord_bld, coord_bld->one, t_fpart);
1262 w00 = lp_build_mul(coord_bld, wx0, wy0);
1263 w01 = lp_build_mul(coord_bld, s_fpart, wy0);
1264 w10 = lp_build_mul(coord_bld, wx0, t_fpart);
1265 w11 = lp_build_mul(coord_bld, s_fpart, t_fpart);
1266
1267 /* find corner weight */
1268 c00 = lp_build_and(ivec_bld, fall_off[0], fall_off[2]);
1269 c_weight = lp_build_select(coord_bld, c00, w00, coord_bld->zero);
1270 c01 = lp_build_and(ivec_bld, fall_off[1], fall_off[2]);
1271 c_weight = lp_build_select(coord_bld, c01, w01, c_weight);
1272 c10 = lp_build_and(ivec_bld, fall_off[0], fall_off[3]);
1273 c_weight = lp_build_select(coord_bld, c10, w10, c_weight);
1274 c11 = lp_build_and(ivec_bld, fall_off[1], fall_off[3]);
1275 c_weight = lp_build_select(coord_bld, c11, w11, c_weight);
1276
1277 /*
1278 * add 1/3 of the corner weight to each of the 3 other samples
1279 * and null out corner weight
1280 */
1281 one_third = lp_build_const_vec(bld->gallivm, coord_bld->type, 1.0f/3.0f);
1282 c_weight = lp_build_mul(coord_bld, c_weight, one_third);
1283 w00 = lp_build_add(coord_bld, w00, c_weight);
1284 c00 = LLVMBuildBitCast(builder, c00, coord_bld->vec_type, "");
1285 w00 = lp_build_andnot(coord_bld, w00, c00);
1286 w01 = lp_build_add(coord_bld, w01, c_weight);
1287 c01 = LLVMBuildBitCast(builder, c01, coord_bld->vec_type, "");
1288 w01 = lp_build_andnot(coord_bld, w01, c01);
1289 w10 = lp_build_add(coord_bld, w10, c_weight);
1290 c10 = LLVMBuildBitCast(builder, c10, coord_bld->vec_type, "");
1291 w10 = lp_build_andnot(coord_bld, w10, c10);
1292 w11 = lp_build_add(coord_bld, w11, c_weight);
1293 c11 = LLVMBuildBitCast(builder, c11, coord_bld->vec_type, "");
1294 w11 = lp_build_andnot(coord_bld, w11, c11);
1295
1296 if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
1297 for (chan = 0; chan < 4; chan++) {
1298 colors0[chan] = lp_build_mul(coord_bld, w00, neighbors[0][0][chan]);
1299 tmp = lp_build_mul(coord_bld, w01, neighbors[0][1][chan]);
1300 colors0[chan] = lp_build_add(coord_bld, tmp, colors0[chan]);
1301 tmp = lp_build_mul(coord_bld, w10, neighbors[1][0][chan]);
1302 colors0[chan] = lp_build_add(coord_bld, tmp, colors0[chan]);
1303 tmp = lp_build_mul(coord_bld, w11, neighbors[1][1][chan]);
1304 colors0[chan] = lp_build_add(coord_bld, tmp, colors0[chan]);
1305 }
1306 }
1307 else {
1308 LLVMValueRef cmpval00, cmpval01, cmpval10, cmpval11;
1309 cmpval00 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][0][0]);
1310 cmpval01 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
1311 cmpval10 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][0][0]);
1312 cmpval11 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][1][0]);
1313 /* inputs to interpolation are just masks so just add masked weights together */
1314 cmpval00 = LLVMBuildBitCast(builder, cmpval00, coord_bld->vec_type, "");
1315 cmpval01 = LLVMBuildBitCast(builder, cmpval01, coord_bld->vec_type, "");
1316 cmpval10 = LLVMBuildBitCast(builder, cmpval10, coord_bld->vec_type, "");
1317 cmpval11 = LLVMBuildBitCast(builder, cmpval11, coord_bld->vec_type, "");
1318 colors0[0] = lp_build_and(coord_bld, w00, cmpval00);
1319 tmp = lp_build_and(coord_bld, w01, cmpval01);
1320 colors0[0] = lp_build_add(coord_bld, tmp, colors0[0]);
1321 tmp = lp_build_and(coord_bld, w10, cmpval10);
1322 colors0[0] = lp_build_add(coord_bld, tmp, colors0[0]);
1323 tmp = lp_build_and(coord_bld, w11, cmpval11);
1324 colors0[0] = lp_build_add(coord_bld, tmp, colors0[0]);
1325 colors0[1] = colors0[2] = colors0[3] = colors0[0];
1326 }
1327
1328 LLVMBuildStore(builder, colors0[0], colorss[0]);
1329 LLVMBuildStore(builder, colors0[1], colorss[1]);
1330 LLVMBuildStore(builder, colors0[2], colorss[2]);
1331 LLVMBuildStore(builder, colors0[3], colorss[3]);
1332
1333 lp_build_else(&corner_if);
1334 }
1335
1336 if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
1337 if (is_gather) {
1338 /*
1339 * Just assign the red channel (no component selection yet).
1340 * This is a bit hackish, we usually do the swizzle at the
1341 * end of sampling (much less values to swizzle), but this
1342 * obviously cannot work when using gather.
1343 */
1344 unsigned chan_swiz = bld->static_texture_state->swizzle_r;
1345 colors0[0] = lp_build_swizzle_soa_channel(texel_bld,
1346 neighbors[1][0],
1347 chan_swiz);
1348 colors0[1] = lp_build_swizzle_soa_channel(texel_bld,
1349 neighbors[1][1],
1350 chan_swiz);
1351 colors0[2] = lp_build_swizzle_soa_channel(texel_bld,
1352 neighbors[0][1],
1353 chan_swiz);
1354 colors0[3] = lp_build_swizzle_soa_channel(texel_bld,
1355 neighbors[0][0],
1356 chan_swiz);
1357 }
1358 else {
1359 /* Bilinear interpolate the four samples from the 2D image / 3D slice */
1360 for (chan = 0; chan < 4; chan++) {
1361 colors0[chan] = lp_build_lerp_2d(texel_bld,
1362 s_fpart, t_fpart,
1363 neighbors[0][0][chan],
1364 neighbors[0][1][chan],
1365 neighbors[1][0][chan],
1366 neighbors[1][1][chan],
1367 0);
1368 }
1369 }
1370 }
1371 else {
1372 LLVMValueRef cmpval00, cmpval01, cmpval10, cmpval11;
1373 cmpval00 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][0][0]);
1374 cmpval01 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
1375 cmpval10 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][0][0]);
1376 cmpval11 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][1][0]);
1377
1378 if (is_gather) {
1379 /* more hacks for swizzling, should be X, ONE or ZERO... */
1380 unsigned chan_swiz = bld->static_texture_state->swizzle_r;
1381 if (chan_swiz <= PIPE_SWIZZLE_W) {
1382 colors0[0] = lp_build_select(texel_bld, cmpval10,
1383 texel_bld->one, texel_bld->zero);
1384 colors0[1] = lp_build_select(texel_bld, cmpval11,
1385 texel_bld->one, texel_bld->zero);
1386 colors0[2] = lp_build_select(texel_bld, cmpval01,
1387 texel_bld->one, texel_bld->zero);
1388 colors0[3] = lp_build_select(texel_bld, cmpval00,
1389 texel_bld->one, texel_bld->zero);
1390 }
1391 else if (chan_swiz == PIPE_SWIZZLE_0) {
1392 colors0[0] = colors0[1] = colors0[2] = colors0[3] =
1393 texel_bld->zero;
1394 }
1395 else {
1396 colors0[0] = colors0[1] = colors0[2] = colors0[3] =
1397 texel_bld->one;
1398 }
1399 }
1400 else {
1401 colors0[0] = lp_build_masklerp2d(texel_bld, s_fpart, t_fpart,
1402 cmpval00, cmpval01, cmpval10, cmpval11);
1403 colors0[1] = colors0[2] = colors0[3] = colors0[0];
1404 }
1405 }
1406
1407 if (accurate_cube_corners) {
1408 LLVMBuildStore(builder, colors0[0], colorss[0]);
1409 LLVMBuildStore(builder, colors0[1], colorss[1]);
1410 LLVMBuildStore(builder, colors0[2], colorss[2]);
1411 LLVMBuildStore(builder, colors0[3], colorss[3]);
1412
1413 lp_build_endif(&corner_if);
1414
1415 colors0[0] = LLVMBuildLoad(builder, colorss[0], "");
1416 colors0[1] = LLVMBuildLoad(builder, colorss[1], "");
1417 colors0[2] = LLVMBuildLoad(builder, colorss[2], "");
1418 colors0[3] = LLVMBuildLoad(builder, colorss[3], "");
1419 }
1420
1421 if (dims == 3) {
1422 LLVMValueRef neighbors1[2][2][4];
1423 LLVMValueRef colors1[4];
1424
1425 assert(!is_gather);
1426
1427 /* get x0/x1/y0/y1 texels at z1 */
1428 lp_build_sample_texel_soa(bld,
1429 width_vec, height_vec, depth_vec,
1430 x00, y00, z1,
1431 row_stride_vec, img_stride_vec,
1432 data_ptr, mipoffsets, neighbors1[0][0]);
1433 lp_build_sample_texel_soa(bld,
1434 width_vec, height_vec, depth_vec,
1435 x01, y01, z1,
1436 row_stride_vec, img_stride_vec,
1437 data_ptr, mipoffsets, neighbors1[0][1]);
1438 lp_build_sample_texel_soa(bld,
1439 width_vec, height_vec, depth_vec,
1440 x10, y10, z1,
1441 row_stride_vec, img_stride_vec,
1442 data_ptr, mipoffsets, neighbors1[1][0]);
1443 lp_build_sample_texel_soa(bld,
1444 width_vec, height_vec, depth_vec,
1445 x11, y11, z1,
1446 row_stride_vec, img_stride_vec,
1447 data_ptr, mipoffsets, neighbors1[1][1]);
1448
1449 if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
1450 /* Bilinear interpolate the four samples from the second Z slice */
1451 for (chan = 0; chan < 4; chan++) {
1452 colors1[chan] = lp_build_lerp_2d(texel_bld,
1453 s_fpart, t_fpart,
1454 neighbors1[0][0][chan],
1455 neighbors1[0][1][chan],
1456 neighbors1[1][0][chan],
1457 neighbors1[1][1][chan],
1458 0);
1459 }
1460 /* Linearly interpolate the two samples from the two 3D slices */
1461 for (chan = 0; chan < 4; chan++) {
1462 colors_out[chan] = lp_build_lerp(texel_bld,
1463 r_fpart,
1464 colors0[chan], colors1[chan],
1465 0);
1466 }
1467 }
1468 else {
1469 LLVMValueRef cmpval00, cmpval01, cmpval10, cmpval11;
1470 cmpval00 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][0][0]);
1471 cmpval01 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
1472 cmpval10 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][0][0]);
1473 cmpval11 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][1][0]);
1474 colors1[0] = lp_build_masklerp2d(texel_bld, s_fpart, t_fpart,
1475 cmpval00, cmpval01, cmpval10, cmpval11);
1476 /* Linearly interpolate the two samples from the two 3D slices */
1477 colors_out[0] = lp_build_lerp(texel_bld,
1478 r_fpart,
1479 colors0[0], colors1[0],
1480 0);
1481 colors_out[1] = colors_out[2] = colors_out[3] = colors_out[0];
1482 }
1483 }
1484 else {
1485 /* 2D tex */
1486 for (chan = 0; chan < 4; chan++) {
1487 colors_out[chan] = colors0[chan];
1488 }
1489 }
1490 }
1491 }
1492
1493
1494 /**
1495 * Sample the texture/mipmap using given image filter and mip filter.
1496 * ilevel0 and ilevel1 indicate the two mipmap levels to sample
1497 * from (vectors or scalars).
1498 * If we're using nearest miplevel sampling the '1' values will be null/unused.
1499 */
1500 static void
1501 lp_build_sample_mipmap(struct lp_build_sample_context *bld,
1502 unsigned img_filter,
1503 unsigned mip_filter,
1504 boolean is_gather,
1505 LLVMValueRef *coords,
1506 const LLVMValueRef *offsets,
1507 LLVMValueRef ilevel0,
1508 LLVMValueRef ilevel1,
1509 LLVMValueRef lod_fpart,
1510 LLVMValueRef *colors_out)
1511 {
1512 LLVMBuilderRef builder = bld->gallivm->builder;
1513 LLVMValueRef size0 = NULL;
1514 LLVMValueRef size1 = NULL;
1515 LLVMValueRef row_stride0_vec = NULL;
1516 LLVMValueRef row_stride1_vec = NULL;
1517 LLVMValueRef img_stride0_vec = NULL;
1518 LLVMValueRef img_stride1_vec = NULL;
1519 LLVMValueRef data_ptr0 = NULL;
1520 LLVMValueRef data_ptr1 = NULL;
1521 LLVMValueRef mipoff0 = NULL;
1522 LLVMValueRef mipoff1 = NULL;
1523 LLVMValueRef colors0[4], colors1[4];
1524 unsigned chan;
1525
1526 /* sample the first mipmap level */
1527 lp_build_mipmap_level_sizes(bld, ilevel0,
1528 &size0,
1529 &row_stride0_vec, &img_stride0_vec);
1530 if (bld->num_mips == 1) {
1531 data_ptr0 = lp_build_get_mipmap_level(bld, ilevel0);
1532 }
1533 else {
1534 /* This path should work for num_lods 1 too but slightly less efficient */
1535 data_ptr0 = bld->base_ptr;
1536 mipoff0 = lp_build_get_mip_offsets(bld, ilevel0);
1537 }
1538 if (img_filter == PIPE_TEX_FILTER_NEAREST) {
1539 lp_build_sample_image_nearest(bld, size0,
1540 row_stride0_vec, img_stride0_vec,
1541 data_ptr0, mipoff0, coords, offsets,
1542 colors0);
1543 }
1544 else {
1545 assert(img_filter == PIPE_TEX_FILTER_LINEAR);
1546 lp_build_sample_image_linear(bld, is_gather, size0, NULL,
1547 row_stride0_vec, img_stride0_vec,
1548 data_ptr0, mipoff0, coords, offsets,
1549 colors0);
1550 }
1551
1552 /* Store the first level's colors in the output variables */
1553 for (chan = 0; chan < 4; chan++) {
1554 LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
1555 }
1556
1557 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
1558 struct lp_build_if_state if_ctx;
1559 LLVMValueRef need_lerp;
1560
1561 /* need_lerp = lod_fpart > 0 */
1562 if (bld->num_lods == 1) {
1563 need_lerp = LLVMBuildFCmp(builder, LLVMRealUGT,
1564 lod_fpart, bld->lodf_bld.zero,
1565 "need_lerp");
1566 }
1567 else {
1568 /*
1569 * We'll do mip filtering if any of the quads (or individual
1570 * pixel in case of per-pixel lod) need it.
1571 * It might be better to split the vectors here and only fetch/filter
1572 * quads which need it (if there's one lod per quad).
1573 */
1574 need_lerp = lp_build_compare(bld->gallivm, bld->lodf_bld.type,
1575 PIPE_FUNC_GREATER,
1576 lod_fpart, bld->lodf_bld.zero);
1577 need_lerp = lp_build_any_true_range(&bld->lodi_bld, bld->num_lods, need_lerp);
1578 }
1579
1580 lp_build_if(&if_ctx, bld->gallivm, need_lerp);
1581 {
1582 /*
1583 * We unfortunately need to clamp lod_fpart here since we can get
1584 * negative values which would screw up filtering if not all
1585 * lod_fpart values have same sign.
1586 */
1587 lod_fpart = lp_build_max(&bld->lodf_bld, lod_fpart,
1588 bld->lodf_bld.zero);
1589 /* sample the second mipmap level */
1590 lp_build_mipmap_level_sizes(bld, ilevel1,
1591 &size1,
1592 &row_stride1_vec, &img_stride1_vec);
1593 if (bld->num_mips == 1) {
1594 data_ptr1 = lp_build_get_mipmap_level(bld, ilevel1);
1595 }
1596 else {
1597 data_ptr1 = bld->base_ptr;
1598 mipoff1 = lp_build_get_mip_offsets(bld, ilevel1);
1599 }
1600 if (img_filter == PIPE_TEX_FILTER_NEAREST) {
1601 lp_build_sample_image_nearest(bld, size1,
1602 row_stride1_vec, img_stride1_vec,
1603 data_ptr1, mipoff1, coords, offsets,
1604 colors1);
1605 }
1606 else {
1607 lp_build_sample_image_linear(bld, FALSE, size1, NULL,
1608 row_stride1_vec, img_stride1_vec,
1609 data_ptr1, mipoff1, coords, offsets,
1610 colors1);
1611 }
1612
1613 /* interpolate samples from the two mipmap levels */
1614
1615 if (bld->num_lods != bld->coord_type.length)
1616 lod_fpart = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
1617 bld->lodf_bld.type,
1618 bld->texel_bld.type,
1619 lod_fpart);
1620
1621 for (chan = 0; chan < 4; chan++) {
1622 colors0[chan] = lp_build_lerp(&bld->texel_bld, lod_fpart,
1623 colors0[chan], colors1[chan],
1624 0);
1625 LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
1626 }
1627 }
1628 lp_build_endif(&if_ctx);
1629 }
1630 }
1631
1632
1633 /**
1634 * Sample the texture/mipmap using given mip filter, and using
1635 * both nearest and linear filtering at the same time depending
1636 * on linear_mask.
1637 * lod can be per quad but linear_mask is always per pixel.
1638 * ilevel0 and ilevel1 indicate the two mipmap levels to sample
1639 * from (vectors or scalars).
1640 * If we're using nearest miplevel sampling the '1' values will be null/unused.
1641 */
1642 static void
1643 lp_build_sample_mipmap_both(struct lp_build_sample_context *bld,
1644 LLVMValueRef linear_mask,
1645 unsigned mip_filter,
1646 LLVMValueRef *coords,
1647 const LLVMValueRef *offsets,
1648 LLVMValueRef ilevel0,
1649 LLVMValueRef ilevel1,
1650 LLVMValueRef lod_fpart,
1651 LLVMValueRef lod_positive,
1652 LLVMValueRef *colors_out)
1653 {
1654 LLVMBuilderRef builder = bld->gallivm->builder;
1655 LLVMValueRef size0 = NULL;
1656 LLVMValueRef size1 = NULL;
1657 LLVMValueRef row_stride0_vec = NULL;
1658 LLVMValueRef row_stride1_vec = NULL;
1659 LLVMValueRef img_stride0_vec = NULL;
1660 LLVMValueRef img_stride1_vec = NULL;
1661 LLVMValueRef data_ptr0 = NULL;
1662 LLVMValueRef data_ptr1 = NULL;
1663 LLVMValueRef mipoff0 = NULL;
1664 LLVMValueRef mipoff1 = NULL;
1665 LLVMValueRef colors0[4], colors1[4];
1666 unsigned chan;
1667
1668 /* sample the first mipmap level */
1669 lp_build_mipmap_level_sizes(bld, ilevel0,
1670 &size0,
1671 &row_stride0_vec, &img_stride0_vec);
1672 if (bld->num_mips == 1) {
1673 data_ptr0 = lp_build_get_mipmap_level(bld, ilevel0);
1674 }
1675 else {
1676 /* This path should work for num_lods 1 too but slightly less efficient */
1677 data_ptr0 = bld->base_ptr;
1678 mipoff0 = lp_build_get_mip_offsets(bld, ilevel0);
1679 }
1680
1681 lp_build_sample_image_linear(bld, FALSE, size0, linear_mask,
1682 row_stride0_vec, img_stride0_vec,
1683 data_ptr0, mipoff0, coords, offsets,
1684 colors0);
1685
1686 /* Store the first level's colors in the output variables */
1687 for (chan = 0; chan < 4; chan++) {
1688 LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
1689 }
1690
1691 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
1692 struct lp_build_if_state if_ctx;
1693 LLVMValueRef need_lerp;
1694
1695 /*
1696 * We'll do mip filtering if any of the quads (or individual
1697 * pixel in case of per-pixel lod) need it.
1698 * Note using lod_positive here not lod_fpart since it may be the same
1699 * condition as that used in the outer "if" in the caller hence llvm
1700 * should be able to merge the branches in this case.
1701 */
1702 need_lerp = lp_build_any_true_range(&bld->lodi_bld, bld->num_lods, lod_positive);
1703
1704 lp_build_if(&if_ctx, bld->gallivm, need_lerp);
1705 {
1706 /*
1707 * We unfortunately need to clamp lod_fpart here since we can get
1708 * negative values which would screw up filtering if not all
1709 * lod_fpart values have same sign.
1710 */
1711 lod_fpart = lp_build_max(&bld->lodf_bld, lod_fpart,
1712 bld->lodf_bld.zero);
1713 /* sample the second mipmap level */
1714 lp_build_mipmap_level_sizes(bld, ilevel1,
1715 &size1,
1716 &row_stride1_vec, &img_stride1_vec);
1717 if (bld->num_mips == 1) {
1718 data_ptr1 = lp_build_get_mipmap_level(bld, ilevel1);
1719 }
1720 else {
1721 data_ptr1 = bld->base_ptr;
1722 mipoff1 = lp_build_get_mip_offsets(bld, ilevel1);
1723 }
1724
1725 lp_build_sample_image_linear(bld, FALSE, size1, linear_mask,
1726 row_stride1_vec, img_stride1_vec,
1727 data_ptr1, mipoff1, coords, offsets,
1728 colors1);
1729
1730 /* interpolate samples from the two mipmap levels */
1731
1732 if (bld->num_lods != bld->coord_type.length)
1733 lod_fpart = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
1734 bld->lodf_bld.type,
1735 bld->texel_bld.type,
1736 lod_fpart);
1737
1738 for (chan = 0; chan < 4; chan++) {
1739 colors0[chan] = lp_build_lerp(&bld->texel_bld, lod_fpart,
1740 colors0[chan], colors1[chan],
1741 0);
1742 LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
1743 }
1744 }
1745 lp_build_endif(&if_ctx);
1746 }
1747 }
1748
1749
1750 /**
1751 * Build (per-coord) layer value.
1752 * Either clamp layer to valid values or fill in optional out_of_bounds
1753 * value and just return value unclamped.
1754 */
1755 static LLVMValueRef
1756 lp_build_layer_coord(struct lp_build_sample_context *bld,
1757 unsigned texture_unit,
1758 boolean is_cube_array,
1759 LLVMValueRef layer,
1760 LLVMValueRef *out_of_bounds)
1761 {
1762 LLVMValueRef num_layers;
1763 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
1764
1765 num_layers = bld->dynamic_state->depth(bld->dynamic_state, bld->gallivm,
1766 bld->context_ptr, texture_unit);
1767
1768 if (out_of_bounds) {
1769 LLVMValueRef out1, out;
1770 assert(!is_cube_array);
1771 num_layers = lp_build_broadcast_scalar(int_coord_bld, num_layers);
1772 out = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, layer, int_coord_bld->zero);
1773 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, layer, num_layers);
1774 *out_of_bounds = lp_build_or(int_coord_bld, out, out1);
1775 return layer;
1776 }
1777 else {
1778 LLVMValueRef maxlayer;
1779 LLVMValueRef s = is_cube_array ? lp_build_const_int32(bld->gallivm, 6) :
1780 bld->int_bld.one;
1781 maxlayer = lp_build_sub(&bld->int_bld, num_layers, s);
1782 maxlayer = lp_build_broadcast_scalar(int_coord_bld, maxlayer);
1783 return lp_build_clamp(int_coord_bld, layer, int_coord_bld->zero, maxlayer);
1784 }
1785 }
1786
1787
1788 /**
1789 * Calculate cube face, lod, mip levels.
1790 */
1791 static void
1792 lp_build_sample_common(struct lp_build_sample_context *bld,
1793 unsigned texture_index,
1794 unsigned sampler_index,
1795 LLVMValueRef *coords,
1796 const struct lp_derivatives *derivs, /* optional */
1797 LLVMValueRef lod_bias, /* optional */
1798 LLVMValueRef explicit_lod, /* optional */
1799 LLVMValueRef *lod_pos_or_zero,
1800 LLVMValueRef *lod_fpart,
1801 LLVMValueRef *ilevel0,
1802 LLVMValueRef *ilevel1)
1803 {
1804 const unsigned mip_filter = bld->static_sampler_state->min_mip_filter;
1805 const unsigned min_filter = bld->static_sampler_state->min_img_filter;
1806 const unsigned mag_filter = bld->static_sampler_state->mag_img_filter;
1807 const unsigned target = bld->static_texture_state->target;
1808 LLVMValueRef first_level, cube_rho = NULL;
1809 LLVMValueRef lod_ipart = NULL;
1810 struct lp_derivatives cube_derivs;
1811
1812 /*
1813 printf("%s mip %d min %d mag %d\n", __FUNCTION__,
1814 mip_filter, min_filter, mag_filter);
1815 */
1816
1817 /*
1818 * Choose cube face, recompute texcoords for the chosen face and
1819 * compute rho here too (as it requires transform of derivatives).
1820 */
1821 if (target == PIPE_TEXTURE_CUBE || target == PIPE_TEXTURE_CUBE_ARRAY) {
1822 boolean need_derivs;
1823 need_derivs = ((min_filter != mag_filter ||
1824 mip_filter != PIPE_TEX_MIPFILTER_NONE) &&
1825 !bld->static_sampler_state->min_max_lod_equal &&
1826 !explicit_lod);
1827 lp_build_cube_lookup(bld, coords, derivs, &cube_rho, &cube_derivs, need_derivs);
1828 derivs = &cube_derivs;
1829 if (target == PIPE_TEXTURE_CUBE_ARRAY) {
1830 /* calculate cube layer coord now */
1831 LLVMValueRef layer = lp_build_iround(&bld->coord_bld, coords[3]);
1832 LLVMValueRef six = lp_build_const_int_vec(bld->gallivm, bld->int_coord_type, 6);
1833 layer = lp_build_mul(&bld->int_coord_bld, layer, six);
1834 coords[3] = lp_build_layer_coord(bld, texture_index, TRUE, layer, NULL);
1835 /* because of seamless filtering can't add it to face (coords[2]) here. */
1836 }
1837 }
1838 else if (target == PIPE_TEXTURE_1D_ARRAY ||
1839 target == PIPE_TEXTURE_2D_ARRAY) {
1840 coords[2] = lp_build_iround(&bld->coord_bld, coords[2]);
1841 coords[2] = lp_build_layer_coord(bld, texture_index, FALSE, coords[2], NULL);
1842 }
1843
1844 if (bld->static_sampler_state->compare_mode != PIPE_TEX_COMPARE_NONE) {
1845 /*
1846 * Clamp p coords to [0,1] for fixed function depth texture format here.
1847 * Technically this is not entirely correct for unorm depth as the ref value
1848 * should be converted to the depth format (quantization!) and comparison
1849 * then done in texture format. This would actually help performance (since
1850 * only need to do it once and could save the per-sample conversion of texels
1851 * to floats instead), but it would need more messy code (would need to push
1852 * at least some bits down to actual fetch so conversion could be skipped,
1853 * and would have ugly interaction with border color, would need to convert
1854 * border color to that format too or do some other tricks to make it work).
1855 */
1856 const struct util_format_description *format_desc = bld->format_desc;
1857 unsigned chan_type;
1858 /* not entirely sure we couldn't end up with non-valid swizzle here */
1859 chan_type = format_desc->swizzle[0] <= PIPE_SWIZZLE_W ?
1860 format_desc->channel[format_desc->swizzle[0]].type :
1861 UTIL_FORMAT_TYPE_FLOAT;
1862 if (chan_type != UTIL_FORMAT_TYPE_FLOAT) {
1863 coords[4] = lp_build_clamp(&bld->coord_bld, coords[4],
1864 bld->coord_bld.zero, bld->coord_bld.one);
1865 }
1866 }
1867
1868 /*
1869 * Compute the level of detail (float).
1870 */
1871 if (min_filter != mag_filter ||
1872 mip_filter != PIPE_TEX_MIPFILTER_NONE) {
1873 /* Need to compute lod either to choose mipmap levels or to
1874 * distinguish between minification/magnification with one mipmap level.
1875 */
1876 lp_build_lod_selector(bld, texture_index, sampler_index,
1877 coords[0], coords[1], coords[2], cube_rho,
1878 derivs, lod_bias, explicit_lod,
1879 mip_filter,
1880 &lod_ipart, lod_fpart, lod_pos_or_zero);
1881 } else {
1882 lod_ipart = bld->lodi_bld.zero;
1883 *lod_pos_or_zero = bld->lodi_bld.zero;
1884 }
1885
1886 if (bld->num_lods != bld->num_mips) {
1887 /* only makes sense if there's just a single mip level */
1888 assert(bld->num_mips == 1);
1889 lod_ipart = lp_build_extract_range(bld->gallivm, lod_ipart, 0, 1);
1890 }
1891
1892 /*
1893 * Compute integer mipmap level(s) to fetch texels from: ilevel0, ilevel1
1894 */
1895 switch (mip_filter) {
1896 default:
1897 assert(0 && "bad mip_filter value in lp_build_sample_soa()");
1898 /* fall-through */
1899 case PIPE_TEX_MIPFILTER_NONE:
1900 /* always use mip level 0 */
1901 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
1902 bld->gallivm, bld->context_ptr,
1903 texture_index);
1904 first_level = lp_build_broadcast_scalar(&bld->leveli_bld, first_level);
1905 *ilevel0 = first_level;
1906 break;
1907 case PIPE_TEX_MIPFILTER_NEAREST:
1908 assert(lod_ipart);
1909 lp_build_nearest_mip_level(bld, texture_index, lod_ipart, ilevel0, NULL);
1910 break;
1911 case PIPE_TEX_MIPFILTER_LINEAR:
1912 assert(lod_ipart);
1913 assert(*lod_fpart);
1914 lp_build_linear_mip_levels(bld, texture_index,
1915 lod_ipart, lod_fpart,
1916 ilevel0, ilevel1);
1917 break;
1918 }
1919 }
1920
1921 static void
1922 lp_build_clamp_border_color(struct lp_build_sample_context *bld,
1923 unsigned sampler_unit)
1924 {
1925 struct gallivm_state *gallivm = bld->gallivm;
1926 LLVMBuilderRef builder = gallivm->builder;
1927 LLVMValueRef border_color_ptr =
1928 bld->dynamic_state->border_color(bld->dynamic_state, gallivm,
1929 bld->context_ptr, sampler_unit);
1930 LLVMValueRef border_color;
1931 const struct util_format_description *format_desc = bld->format_desc;
1932 struct lp_type vec4_type = bld->texel_type;
1933 struct lp_build_context vec4_bld;
1934 LLVMValueRef min_clamp = NULL;
1935 LLVMValueRef max_clamp = NULL;
1936
1937 /*
1938 * For normalized format need to clamp border color (technically
1939 * probably should also quantize the data). Really sucks doing this
1940 * here but can't avoid at least for now since this is part of
1941 * sampler state and texture format is part of sampler_view state.
1942 * GL expects also expects clamping for uint/sint formats too so
1943 * do that as well (d3d10 can't end up here with uint/sint since it
1944 * only supports them with ld).
1945 */
1946 vec4_type.length = 4;
1947 lp_build_context_init(&vec4_bld, gallivm, vec4_type);
1948
1949 /*
1950 * Vectorized clamping of border color. Loading is a bit of a hack since
1951 * we just cast the pointer to float array to pointer to vec4
1952 * (int or float).
1953 */
1954 border_color_ptr = lp_build_array_get_ptr(gallivm, border_color_ptr,
1955 lp_build_const_int32(gallivm, 0));
1956 border_color_ptr = LLVMBuildBitCast(builder, border_color_ptr,
1957 LLVMPointerType(vec4_bld.vec_type, 0), "");
1958 border_color = LLVMBuildLoad(builder, border_color_ptr, "");
1959 /* we don't have aligned type in the dynamic state unfortunately */
1960 LLVMSetAlignment(border_color, 4);
1961
1962 /*
1963 * Instead of having some incredibly complex logic which will try to figure out
1964 * clamping necessary for each channel, simply use the first channel, and treat
1965 * mixed signed/unsigned normalized formats specially.
1966 * (Mixed non-normalized, which wouldn't work at all here, do not exist for a
1967 * good reason.)
1968 */
1969 if (format_desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
1970 int chan;
1971 /* d/s needs special handling because both present means just sampling depth */
1972 if (util_format_is_depth_and_stencil(format_desc->format)) {
1973 chan = format_desc->swizzle[0];
1974 }
1975 else {
1976 chan = util_format_get_first_non_void_channel(format_desc->format);
1977 }
1978 if (chan >= 0 && chan <= PIPE_SWIZZLE_W) {
1979 unsigned chan_type = format_desc->channel[chan].type;
1980 unsigned chan_norm = format_desc->channel[chan].normalized;
1981 unsigned chan_pure = format_desc->channel[chan].pure_integer;
1982 if (chan_type == UTIL_FORMAT_TYPE_SIGNED) {
1983 if (chan_norm) {
1984 min_clamp = lp_build_const_vec(gallivm, vec4_type, -1.0F);
1985 max_clamp = vec4_bld.one;
1986 }
1987 else if (chan_pure) {
1988 /*
1989 * Border color was stored as int, hence need min/max clamp
1990 * only if chan has less than 32 bits..
1991 */
1992 unsigned chan_size = format_desc->channel[chan].size;
1993 if (chan_size < 32) {
1994 min_clamp = lp_build_const_int_vec(gallivm, vec4_type,
1995 0 - (1 << (chan_size - 1)));
1996 max_clamp = lp_build_const_int_vec(gallivm, vec4_type,
1997 (1 << (chan_size - 1)) - 1);
1998 }
1999 }
2000 /* TODO: no idea about non-pure, non-normalized! */
2001 }
2002 else if (chan_type == UTIL_FORMAT_TYPE_UNSIGNED) {
2003 if (chan_norm) {
2004 min_clamp = vec4_bld.zero;
2005 max_clamp = vec4_bld.one;
2006 }
2007 /*
2008 * Need a ugly hack here, because we don't have Z32_FLOAT_X8X24
2009 * we use Z32_FLOAT_S8X24 to imply sampling depth component
2010 * and ignoring stencil, which will blow up here if we try to
2011 * do a uint clamp in a float texel build...
2012 * And even if we had that format, mesa st also thinks using z24s8
2013 * means depth sampling ignoring stencil.
2014 */
2015 else if (chan_pure) {
2016 /*
2017 * Border color was stored as uint, hence never need min
2018 * clamp, and only need max clamp if chan has less than 32 bits.
2019 */
2020 unsigned chan_size = format_desc->channel[chan].size;
2021 if (chan_size < 32) {
2022 max_clamp = lp_build_const_int_vec(gallivm, vec4_type,
2023 (1 << chan_size) - 1);
2024 }
2025 /* TODO: no idea about non-pure, non-normalized! */
2026 }
2027 }
2028 else if (chan_type == UTIL_FORMAT_TYPE_FIXED) {
2029 /* TODO: I have no idea what clamp this would need if any! */
2030 }
2031 }
2032 /* mixed plain formats (or different pure size) */
2033 switch (format_desc->format) {
2034 case PIPE_FORMAT_B10G10R10A2_UINT:
2035 case PIPE_FORMAT_R10G10B10A2_UINT:
2036 {
2037 unsigned max10 = (1 << 10) - 1;
2038 max_clamp = lp_build_const_aos(gallivm, vec4_type, max10, max10,
2039 max10, (1 << 2) - 1, NULL);
2040 }
2041 break;
2042 case PIPE_FORMAT_R10SG10SB10SA2U_NORM:
2043 min_clamp = lp_build_const_aos(gallivm, vec4_type, -1.0F, -1.0F,
2044 -1.0F, 0.0F, NULL);
2045 max_clamp = vec4_bld.one;
2046 break;
2047 case PIPE_FORMAT_R8SG8SB8UX8U_NORM:
2048 case PIPE_FORMAT_R5SG5SB6U_NORM:
2049 min_clamp = lp_build_const_aos(gallivm, vec4_type, -1.0F, -1.0F,
2050 0.0F, 0.0F, NULL);
2051 max_clamp = vec4_bld.one;
2052 break;
2053 default:
2054 break;
2055 }
2056 }
2057 else {
2058 /* cannot figure this out from format description */
2059 if (format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
2060 /* s3tc formats are always unorm */
2061 min_clamp = vec4_bld.zero;
2062 max_clamp = vec4_bld.one;
2063 }
2064 else if (format_desc->layout == UTIL_FORMAT_LAYOUT_RGTC ||
2065 format_desc->layout == UTIL_FORMAT_LAYOUT_ETC) {
2066 switch (format_desc->format) {
2067 case PIPE_FORMAT_RGTC1_UNORM:
2068 case PIPE_FORMAT_RGTC2_UNORM:
2069 case PIPE_FORMAT_LATC1_UNORM:
2070 case PIPE_FORMAT_LATC2_UNORM:
2071 case PIPE_FORMAT_ETC1_RGB8:
2072 min_clamp = vec4_bld.zero;
2073 max_clamp = vec4_bld.one;
2074 break;
2075 case PIPE_FORMAT_RGTC1_SNORM:
2076 case PIPE_FORMAT_RGTC2_SNORM:
2077 case PIPE_FORMAT_LATC1_SNORM:
2078 case PIPE_FORMAT_LATC2_SNORM:
2079 min_clamp = lp_build_const_vec(gallivm, vec4_type, -1.0F);
2080 max_clamp = vec4_bld.one;
2081 break;
2082 default:
2083 assert(0);
2084 break;
2085 }
2086 }
2087 /*
2088 * all others from subsampled/other group, though we don't care
2089 * about yuv (and should not have any from zs here)
2090 */
2091 else if (format_desc->colorspace != UTIL_FORMAT_COLORSPACE_YUV){
2092 switch (format_desc->format) {
2093 case PIPE_FORMAT_R8G8_B8G8_UNORM:
2094 case PIPE_FORMAT_G8R8_G8B8_UNORM:
2095 case PIPE_FORMAT_G8R8_B8R8_UNORM:
2096 case PIPE_FORMAT_R8G8_R8B8_UNORM:
2097 case PIPE_FORMAT_R1_UNORM: /* doesn't make sense but ah well */
2098 min_clamp = vec4_bld.zero;
2099 max_clamp = vec4_bld.one;
2100 break;
2101 case PIPE_FORMAT_R8G8Bx_SNORM:
2102 min_clamp = lp_build_const_vec(gallivm, vec4_type, -1.0F);
2103 max_clamp = vec4_bld.one;
2104 break;
2105 /*
2106 * Note smallfloat formats usually don't need clamping
2107 * (they still have infinite range) however this is not
2108 * true for r11g11b10 and r9g9b9e5, which can't represent
2109 * negative numbers (and additionally r9g9b9e5 can't represent
2110 * very large numbers). d3d10 seems happy without clamping in
2111 * this case, but gl spec is pretty clear: "for floating
2112 * point and integer formats, border values are clamped to
2113 * the representable range of the format" so do that here.
2114 */
2115 case PIPE_FORMAT_R11G11B10_FLOAT:
2116 min_clamp = vec4_bld.zero;
2117 break;
2118 case PIPE_FORMAT_R9G9B9E5_FLOAT:
2119 min_clamp = vec4_bld.zero;
2120 max_clamp = lp_build_const_vec(gallivm, vec4_type, MAX_RGB9E5);
2121 break;
2122 default:
2123 assert(0);
2124 break;
2125 }
2126 }
2127 }
2128
2129 if (min_clamp) {
2130 border_color = lp_build_max(&vec4_bld, border_color, min_clamp);
2131 }
2132 if (max_clamp) {
2133 border_color = lp_build_min(&vec4_bld, border_color, max_clamp);
2134 }
2135
2136 bld->border_color_clamped = border_color;
2137 }
2138
2139
2140 /**
2141 * General texture sampling codegen.
2142 * This function handles texture sampling for all texture targets (1D,
2143 * 2D, 3D, cube) and all filtering modes.
2144 */
2145 static void
2146 lp_build_sample_general(struct lp_build_sample_context *bld,
2147 unsigned sampler_unit,
2148 boolean is_gather,
2149 LLVMValueRef *coords,
2150 const LLVMValueRef *offsets,
2151 LLVMValueRef lod_positive,
2152 LLVMValueRef lod_fpart,
2153 LLVMValueRef ilevel0,
2154 LLVMValueRef ilevel1,
2155 LLVMValueRef *colors_out)
2156 {
2157 LLVMBuilderRef builder = bld->gallivm->builder;
2158 const struct lp_static_sampler_state *sampler_state = bld->static_sampler_state;
2159 const unsigned mip_filter = sampler_state->min_mip_filter;
2160 const unsigned min_filter = sampler_state->min_img_filter;
2161 const unsigned mag_filter = sampler_state->mag_img_filter;
2162 LLVMValueRef texels[4];
2163 unsigned chan;
2164
2165 /* if we need border color, (potentially) clamp it now */
2166 if (lp_sampler_wrap_mode_uses_border_color(sampler_state->wrap_s,
2167 min_filter,
2168 mag_filter) ||
2169 (bld->dims > 1 &&
2170 lp_sampler_wrap_mode_uses_border_color(sampler_state->wrap_t,
2171 min_filter,
2172 mag_filter)) ||
2173 (bld->dims > 2 &&
2174 lp_sampler_wrap_mode_uses_border_color(sampler_state->wrap_r,
2175 min_filter,
2176 mag_filter))) {
2177 lp_build_clamp_border_color(bld, sampler_unit);
2178 }
2179
2180
2181 /*
2182 * Get/interpolate texture colors.
2183 */
2184
2185 for (chan = 0; chan < 4; ++chan) {
2186 texels[chan] = lp_build_alloca(bld->gallivm, bld->texel_bld.vec_type, "");
2187 lp_build_name(texels[chan], "sampler%u_texel_%c_var", sampler_unit, "xyzw"[chan]);
2188 }
2189
2190 if (min_filter == mag_filter) {
2191 /* no need to distinguish between minification and magnification */
2192 lp_build_sample_mipmap(bld, min_filter, mip_filter,
2193 is_gather,
2194 coords, offsets,
2195 ilevel0, ilevel1, lod_fpart,
2196 texels);
2197 }
2198 else {
2199 /*
2200 * Could also get rid of the if-logic and always use mipmap_both, both
2201 * for the single lod and multi-lod case if nothing really uses this.
2202 */
2203 if (bld->num_lods == 1) {
2204 /* Emit conditional to choose min image filter or mag image filter
2205 * depending on the lod being > 0 or <= 0, respectively.
2206 */
2207 struct lp_build_if_state if_ctx;
2208
2209 lod_positive = LLVMBuildTrunc(builder, lod_positive,
2210 LLVMInt1TypeInContext(bld->gallivm->context), "");
2211
2212 lp_build_if(&if_ctx, bld->gallivm, lod_positive);
2213 {
2214 /* Use the minification filter */
2215 lp_build_sample_mipmap(bld, min_filter, mip_filter, FALSE,
2216 coords, offsets,
2217 ilevel0, ilevel1, lod_fpart,
2218 texels);
2219 }
2220 lp_build_else(&if_ctx);
2221 {
2222 /* Use the magnification filter */
2223 lp_build_sample_mipmap(bld, mag_filter, PIPE_TEX_MIPFILTER_NONE,
2224 FALSE,
2225 coords, offsets,
2226 ilevel0, NULL, NULL,
2227 texels);
2228 }
2229 lp_build_endif(&if_ctx);
2230 }
2231 else {
2232 LLVMValueRef need_linear, linear_mask;
2233 unsigned mip_filter_for_nearest;
2234 struct lp_build_if_state if_ctx;
2235
2236 if (min_filter == PIPE_TEX_FILTER_LINEAR) {
2237 linear_mask = lod_positive;
2238 mip_filter_for_nearest = PIPE_TEX_MIPFILTER_NONE;
2239 }
2240 else {
2241 linear_mask = lp_build_not(&bld->lodi_bld, lod_positive);
2242 mip_filter_for_nearest = mip_filter;
2243 }
2244 need_linear = lp_build_any_true_range(&bld->lodi_bld, bld->num_lods,
2245 linear_mask);
2246
2247 if (bld->num_lods != bld->coord_type.length) {
2248 linear_mask = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
2249 bld->lodi_type,
2250 bld->int_coord_type,
2251 linear_mask);
2252 }
2253
2254 lp_build_if(&if_ctx, bld->gallivm, need_linear);
2255 {
2256 /*
2257 * Do sampling with both filters simultaneously. This means using
2258 * a linear filter and doing some tricks (with weights) for the pixels
2259 * which need nearest filter.
2260 * Note that it's probably rare some pixels need nearest and some
2261 * linear filter but the fixups required for the nearest pixels
2262 * aren't all that complicated so just always run a combined path
2263 * if at least some pixels require linear.
2264 */
2265 lp_build_sample_mipmap_both(bld, linear_mask, mip_filter,
2266 coords, offsets,
2267 ilevel0, ilevel1,
2268 lod_fpart, lod_positive,
2269 texels);
2270 }
2271 lp_build_else(&if_ctx);
2272 {
2273 /*
2274 * All pixels require just nearest filtering, which is way
2275 * cheaper than linear, hence do a separate path for that.
2276 */
2277 lp_build_sample_mipmap(bld, PIPE_TEX_FILTER_NEAREST,
2278 mip_filter_for_nearest, FALSE,
2279 coords, offsets,
2280 ilevel0, ilevel1, lod_fpart,
2281 texels);
2282 }
2283 lp_build_endif(&if_ctx);
2284 }
2285 }
2286
2287 for (chan = 0; chan < 4; ++chan) {
2288 colors_out[chan] = LLVMBuildLoad(builder, texels[chan], "");
2289 lp_build_name(colors_out[chan], "sampler%u_texel_%c", sampler_unit, "xyzw"[chan]);
2290 }
2291 }
2292
2293
2294 /**
2295 * Texel fetch function.
2296 * In contrast to general sampling there is no filtering, no coord minification,
2297 * lod (if any) is always explicit uint, coords are uints (in terms of texel units)
2298 * directly to be applied to the selected mip level (after adding texel offsets).
2299 * This function handles texel fetch for all targets where texel fetch is supported
2300 * (no cube maps, but 1d, 2d, 3d are supported, arrays and buffers should be too).
2301 */
2302 static void
2303 lp_build_fetch_texel(struct lp_build_sample_context *bld,
2304 unsigned texture_unit,
2305 const LLVMValueRef *coords,
2306 LLVMValueRef explicit_lod,
2307 const LLVMValueRef *offsets,
2308 LLVMValueRef *colors_out)
2309 {
2310 struct lp_build_context *perquadi_bld = &bld->lodi_bld;
2311 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
2312 unsigned dims = bld->dims, chan;
2313 unsigned target = bld->static_texture_state->target;
2314 boolean out_of_bound_ret_zero = TRUE;
2315 LLVMValueRef size, ilevel;
2316 LLVMValueRef row_stride_vec = NULL, img_stride_vec = NULL;
2317 LLVMValueRef x = coords[0], y = coords[1], z = coords[2];
2318 LLVMValueRef width, height, depth, i, j;
2319 LLVMValueRef offset, out_of_bounds, out1;
2320
2321 out_of_bounds = int_coord_bld->zero;
2322
2323 if (explicit_lod && bld->static_texture_state->target != PIPE_BUFFER) {
2324 if (bld->num_mips != int_coord_bld->type.length) {
2325 ilevel = lp_build_pack_aos_scalars(bld->gallivm, int_coord_bld->type,
2326 perquadi_bld->type, explicit_lod, 0);
2327 }
2328 else {
2329 ilevel = explicit_lod;
2330 }
2331 lp_build_nearest_mip_level(bld, texture_unit, ilevel, &ilevel,
2332 out_of_bound_ret_zero ? &out_of_bounds : NULL);
2333 }
2334 else {
2335 assert(bld->num_mips == 1);
2336 if (bld->static_texture_state->target != PIPE_BUFFER) {
2337 ilevel = bld->dynamic_state->first_level(bld->dynamic_state, bld->gallivm,
2338 bld->context_ptr, texture_unit);
2339 }
2340 else {
2341 ilevel = lp_build_const_int32(bld->gallivm, 0);
2342 }
2343 }
2344 lp_build_mipmap_level_sizes(bld, ilevel,
2345 &size,
2346 &row_stride_vec, &img_stride_vec);
2347 lp_build_extract_image_sizes(bld, &bld->int_size_bld, int_coord_bld->type,
2348 size, &width, &height, &depth);
2349
2350 if (target == PIPE_TEXTURE_1D_ARRAY ||
2351 target == PIPE_TEXTURE_2D_ARRAY) {
2352 if (out_of_bound_ret_zero) {
2353 z = lp_build_layer_coord(bld, texture_unit, FALSE, z, &out1);
2354 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2355 }
2356 else {
2357 z = lp_build_layer_coord(bld, texture_unit, FALSE, z, NULL);
2358 }
2359 }
2360
2361 /* This is a lot like border sampling */
2362 if (offsets[0]) {
2363 /*
2364 * coords are really unsigned, offsets are signed, but I don't think
2365 * exceeding 31 bits is possible
2366 */
2367 x = lp_build_add(int_coord_bld, x, offsets[0]);
2368 }
2369 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, x, int_coord_bld->zero);
2370 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2371 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, x, width);
2372 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2373
2374 if (dims >= 2) {
2375 if (offsets[1]) {
2376 y = lp_build_add(int_coord_bld, y, offsets[1]);
2377 }
2378 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, y, int_coord_bld->zero);
2379 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2380 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, y, height);
2381 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2382
2383 if (dims >= 3) {
2384 if (offsets[2]) {
2385 z = lp_build_add(int_coord_bld, z, offsets[2]);
2386 }
2387 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, z, int_coord_bld->zero);
2388 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2389 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, z, depth);
2390 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2391 }
2392 }
2393
2394 lp_build_sample_offset(int_coord_bld,
2395 bld->format_desc,
2396 x, y, z, row_stride_vec, img_stride_vec,
2397 &offset, &i, &j);
2398
2399 if (bld->static_texture_state->target != PIPE_BUFFER) {
2400 offset = lp_build_add(int_coord_bld, offset,
2401 lp_build_get_mip_offsets(bld, ilevel));
2402 }
2403
2404 offset = lp_build_andnot(int_coord_bld, offset, out_of_bounds);
2405
2406 lp_build_fetch_rgba_soa(bld->gallivm,
2407 bld->format_desc,
2408 bld->texel_type,
2409 bld->base_ptr, offset,
2410 i, j,
2411 bld->cache,
2412 colors_out);
2413
2414 if (out_of_bound_ret_zero) {
2415 /*
2416 * Only needed for ARB_robust_buffer_access_behavior and d3d10.
2417 * Could use min/max above instead of out-of-bounds comparisons
2418 * if we don't care about the result returned for out-of-bounds.
2419 */
2420 for (chan = 0; chan < 4; chan++) {
2421 colors_out[chan] = lp_build_select(&bld->texel_bld, out_of_bounds,
2422 bld->texel_bld.zero, colors_out[chan]);
2423 }
2424 }
2425 }
2426
2427
2428 /**
2429 * Just set texels to white instead of actually sampling the texture.
2430 * For debugging.
2431 */
2432 void
2433 lp_build_sample_nop(struct gallivm_state *gallivm,
2434 struct lp_type type,
2435 const LLVMValueRef *coords,
2436 LLVMValueRef texel_out[4])
2437 {
2438 LLVMValueRef one = lp_build_one(gallivm, type);
2439 unsigned chan;
2440
2441 for (chan = 0; chan < 4; chan++) {
2442 texel_out[chan] = one;
2443 }
2444 }
2445
2446
2447 /**
2448 * Build the actual texture sampling code.
2449 * 'texel' will return a vector of four LLVMValueRefs corresponding to
2450 * R, G, B, A.
2451 * \param type vector float type to use for coords, etc.
2452 * \param sample_key
2453 * \param derivs partial derivatives of (s,t,r,q) with respect to x and y
2454 */
2455 static void
2456 lp_build_sample_soa_code(struct gallivm_state *gallivm,
2457 const struct lp_static_texture_state *static_texture_state,
2458 const struct lp_static_sampler_state *static_sampler_state,
2459 struct lp_sampler_dynamic_state *dynamic_state,
2460 struct lp_type type,
2461 unsigned sample_key,
2462 unsigned texture_index,
2463 unsigned sampler_index,
2464 LLVMValueRef context_ptr,
2465 LLVMValueRef thread_data_ptr,
2466 const LLVMValueRef *coords,
2467 const LLVMValueRef *offsets,
2468 const struct lp_derivatives *derivs, /* optional */
2469 LLVMValueRef lod, /* optional */
2470 LLVMValueRef texel_out[4])
2471 {
2472 unsigned target = static_texture_state->target;
2473 unsigned dims = texture_dims(target);
2474 unsigned num_quads = type.length / 4;
2475 unsigned mip_filter, min_img_filter, mag_img_filter, i;
2476 struct lp_build_sample_context bld;
2477 struct lp_static_sampler_state derived_sampler_state = *static_sampler_state;
2478 LLVMTypeRef i32t = LLVMInt32TypeInContext(gallivm->context);
2479 LLVMBuilderRef builder = gallivm->builder;
2480 LLVMValueRef tex_width, newcoords[5];
2481 enum lp_sampler_lod_property lod_property;
2482 enum lp_sampler_lod_control lod_control;
2483 enum lp_sampler_op_type op_type;
2484 LLVMValueRef lod_bias = NULL;
2485 LLVMValueRef explicit_lod = NULL;
2486 boolean op_is_tex;
2487
2488 if (0) {
2489 enum pipe_format fmt = static_texture_state->format;
2490 debug_printf("Sample from %s\n", util_format_name(fmt));
2491 }
2492
2493 lod_property = (sample_key & LP_SAMPLER_LOD_PROPERTY_MASK) >>
2494 LP_SAMPLER_LOD_PROPERTY_SHIFT;
2495 lod_control = (sample_key & LP_SAMPLER_LOD_CONTROL_MASK) >>
2496 LP_SAMPLER_LOD_CONTROL_SHIFT;
2497 op_type = (sample_key & LP_SAMPLER_OP_TYPE_MASK) >>
2498 LP_SAMPLER_OP_TYPE_SHIFT;
2499
2500 op_is_tex = op_type == LP_SAMPLER_OP_TEXTURE;
2501
2502 if (lod_control == LP_SAMPLER_LOD_BIAS) {
2503 lod_bias = lod;
2504 assert(lod);
2505 assert(derivs == NULL);
2506 }
2507 else if (lod_control == LP_SAMPLER_LOD_EXPLICIT) {
2508 explicit_lod = lod;
2509 assert(lod);
2510 assert(derivs == NULL);
2511 }
2512 else if (lod_control == LP_SAMPLER_LOD_DERIVATIVES) {
2513 assert(derivs);
2514 assert(lod == NULL);
2515 }
2516 else {
2517 assert(derivs == NULL);
2518 assert(lod == NULL);
2519 }
2520
2521 if (static_texture_state->format == PIPE_FORMAT_NONE) {
2522 /*
2523 * If there's nothing bound, format is NONE, and we must return
2524 * all zero as mandated by d3d10 in this case.
2525 */
2526 unsigned chan;
2527 LLVMValueRef zero = lp_build_zero(gallivm, type);
2528 for (chan = 0; chan < 4; chan++) {
2529 texel_out[chan] = zero;
2530 }
2531 return;
2532 }
2533
2534 assert(type.floating);
2535
2536 /* Setup our build context */
2537 memset(&bld, 0, sizeof bld);
2538 bld.gallivm = gallivm;
2539 bld.context_ptr = context_ptr;
2540 bld.static_sampler_state = &derived_sampler_state;
2541 bld.static_texture_state = static_texture_state;
2542 bld.dynamic_state = dynamic_state;
2543 bld.format_desc = util_format_description(static_texture_state->format);
2544 bld.dims = dims;
2545
2546 bld.vector_width = lp_type_width(type);
2547
2548 bld.float_type = lp_type_float(32);
2549 bld.int_type = lp_type_int(32);
2550 bld.coord_type = type;
2551 bld.int_coord_type = lp_int_type(type);
2552 bld.float_size_in_type = lp_type_float(32);
2553 bld.float_size_in_type.length = dims > 1 ? 4 : 1;
2554 bld.int_size_in_type = lp_int_type(bld.float_size_in_type);
2555 bld.texel_type = type;
2556
2557 /* always using the first channel hopefully should be safe,
2558 * if not things WILL break in other places anyway.
2559 */
2560 if (bld.format_desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB &&
2561 bld.format_desc->channel[0].pure_integer) {
2562 if (bld.format_desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED) {
2563 bld.texel_type = lp_type_int_vec(type.width, type.width * type.length);
2564 }
2565 else if (bld.format_desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED) {
2566 bld.texel_type = lp_type_uint_vec(type.width, type.width * type.length);
2567 }
2568 }
2569 else if (util_format_has_stencil(bld.format_desc) &&
2570 !util_format_has_depth(bld.format_desc)) {
2571 /* for stencil only formats, sample stencil (uint) */
2572 bld.texel_type = lp_type_int_vec(type.width, type.width * type.length);
2573 }
2574
2575 if (!static_texture_state->level_zero_only) {
2576 derived_sampler_state.min_mip_filter = static_sampler_state->min_mip_filter;
2577 } else {
2578 derived_sampler_state.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
2579 }
2580 if (op_type == LP_SAMPLER_OP_GATHER) {
2581 /*
2582 * gather4 is exactly like GL_LINEAR filtering but in the end skipping
2583 * the actual filtering. Using mostly the same paths, so cube face
2584 * selection, coord wrapping etc. all naturally uses the same code.
2585 */
2586 derived_sampler_state.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
2587 derived_sampler_state.min_img_filter = PIPE_TEX_FILTER_LINEAR;
2588 derived_sampler_state.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
2589 }
2590 mip_filter = derived_sampler_state.min_mip_filter;
2591
2592 if (0) {
2593 debug_printf(" .min_mip_filter = %u\n", derived_sampler_state.min_mip_filter);
2594 }
2595
2596 if (static_texture_state->target == PIPE_TEXTURE_CUBE ||
2597 static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY)
2598 {
2599 /*
2600 * Seamless filtering ignores wrap modes.
2601 * Setting to CLAMP_TO_EDGE is correct for nearest filtering, for
2602 * bilinear it's not correct but way better than using for instance repeat.
2603 * Note we even set this for non-seamless. Technically GL allows any wrap
2604 * mode, which made sense when supporting true borders (can get seamless
2605 * effect with border and CLAMP_TO_BORDER), but gallium doesn't support
2606 * borders and d3d9 requires wrap modes to be ignored and it's a pain to fix
2607 * up the sampler state (as it makes it texture dependent).
2608 */
2609 derived_sampler_state.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
2610 derived_sampler_state.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
2611 }
2612 /*
2613 * We could force CLAMP to CLAMP_TO_EDGE here if min/mag filter is nearest,
2614 * so AoS path could be used. Not sure it's worth the trouble...
2615 */
2616
2617 min_img_filter = derived_sampler_state.min_img_filter;
2618 mag_img_filter = derived_sampler_state.mag_img_filter;
2619
2620
2621 /*
2622 * This is all a bit complicated different paths are chosen for performance
2623 * reasons.
2624 * Essentially, there can be 1 lod per element, 1 lod per quad or 1 lod for
2625 * everything (the last two options are equivalent for 4-wide case).
2626 * If there's per-quad lod but we split to 4-wide so we can use AoS, per-quad
2627 * lod is calculated then the lod value extracted afterwards so making this
2628 * case basically the same as far as lod handling is concerned for the
2629 * further sample/filter code as the 1 lod for everything case.
2630 * Different lod handling mostly shows up when building mipmap sizes
2631 * (lp_build_mipmap_level_sizes() and friends) and also in filtering
2632 * (getting the fractional part of the lod to the right texels).
2633 */
2634
2635 /*
2636 * There are other situations where at least the multiple int lods could be
2637 * avoided like min and max lod being equal.
2638 */
2639 bld.num_mips = bld.num_lods = 1;
2640
2641 if ((gallivm_debug & GALLIVM_DEBUG_NO_QUAD_LOD) &&
2642 (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) &&
2643 (static_texture_state->target == PIPE_TEXTURE_CUBE ||
2644 static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) &&
2645 (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
2646 /*
2647 * special case for using per-pixel lod even for implicit lod,
2648 * which is generally never required (ok by APIs) except to please
2649 * some (somewhat broken imho) tests (because per-pixel face selection
2650 * can cause derivatives to be different for pixels outside the primitive
2651 * due to the major axis division even if pre-project derivatives are
2652 * looking normal).
2653 */
2654 bld.num_mips = type.length;
2655 bld.num_lods = type.length;
2656 }
2657 else if (lod_property == LP_SAMPLER_LOD_PER_ELEMENT ||
2658 (explicit_lod || lod_bias || derivs)) {
2659 if ((!op_is_tex && target != PIPE_BUFFER) ||
2660 (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
2661 bld.num_mips = type.length;
2662 bld.num_lods = type.length;
2663 }
2664 else if (op_is_tex && min_img_filter != mag_img_filter) {
2665 bld.num_mips = 1;
2666 bld.num_lods = type.length;
2667 }
2668 }
2669 /* TODO: for true scalar_lod should only use 1 lod value */
2670 else if ((!op_is_tex && explicit_lod && target != PIPE_BUFFER) ||
2671 (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
2672 bld.num_mips = num_quads;
2673 bld.num_lods = num_quads;
2674 }
2675 else if (op_is_tex && min_img_filter != mag_img_filter) {
2676 bld.num_mips = 1;
2677 bld.num_lods = num_quads;
2678 }
2679
2680
2681 bld.lodf_type = type;
2682 /* we want native vector size to be able to use our intrinsics */
2683 if (bld.num_lods != type.length) {
2684 /* TODO: this currently always has to be per-quad or per-element */
2685 bld.lodf_type.length = type.length > 4 ? ((type.length + 15) / 16) * 4 : 1;
2686 }
2687 bld.lodi_type = lp_int_type(bld.lodf_type);
2688 bld.levelf_type = bld.lodf_type;
2689 if (bld.num_mips == 1) {
2690 bld.levelf_type.length = 1;
2691 }
2692 bld.leveli_type = lp_int_type(bld.levelf_type);
2693 bld.float_size_type = bld.float_size_in_type;
2694 /* Note: size vectors may not be native. They contain minified w/h/d/_ values,
2695 * with per-element lod that is w0/h0/d0/_/w1/h1/d1_/... so up to 8x4f32 */
2696 if (bld.num_mips > 1) {
2697 bld.float_size_type.length = bld.num_mips == type.length ?
2698 bld.num_mips * bld.float_size_in_type.length :
2699 type.length;
2700 }
2701 bld.int_size_type = lp_int_type(bld.float_size_type);
2702
2703 lp_build_context_init(&bld.float_bld, gallivm, bld.float_type);
2704 lp_build_context_init(&bld.float_vec_bld, gallivm, type);
2705 lp_build_context_init(&bld.int_bld, gallivm, bld.int_type);
2706 lp_build_context_init(&bld.coord_bld, gallivm, bld.coord_type);
2707 lp_build_context_init(&bld.int_coord_bld, gallivm, bld.int_coord_type);
2708 lp_build_context_init(&bld.int_size_in_bld, gallivm, bld.int_size_in_type);
2709 lp_build_context_init(&bld.float_size_in_bld, gallivm, bld.float_size_in_type);
2710 lp_build_context_init(&bld.int_size_bld, gallivm, bld.int_size_type);
2711 lp_build_context_init(&bld.float_size_bld, gallivm, bld.float_size_type);
2712 lp_build_context_init(&bld.texel_bld, gallivm, bld.texel_type);
2713 lp_build_context_init(&bld.levelf_bld, gallivm, bld.levelf_type);
2714 lp_build_context_init(&bld.leveli_bld, gallivm, bld.leveli_type);
2715 lp_build_context_init(&bld.lodf_bld, gallivm, bld.lodf_type);
2716 lp_build_context_init(&bld.lodi_bld, gallivm, bld.lodi_type);
2717
2718 /* Get the dynamic state */
2719 tex_width = dynamic_state->width(dynamic_state, gallivm,
2720 context_ptr, texture_index);
2721 bld.row_stride_array = dynamic_state->row_stride(dynamic_state, gallivm,
2722 context_ptr, texture_index);
2723 bld.img_stride_array = dynamic_state->img_stride(dynamic_state, gallivm,
2724 context_ptr, texture_index);
2725 bld.base_ptr = dynamic_state->base_ptr(dynamic_state, gallivm,
2726 context_ptr, texture_index);
2727 bld.mip_offsets = dynamic_state->mip_offsets(dynamic_state, gallivm,
2728 context_ptr, texture_index);
2729 /* Note that mip_offsets is an array[level] of offsets to texture images */
2730
2731 if (dynamic_state->cache_ptr && thread_data_ptr) {
2732 bld.cache = dynamic_state->cache_ptr(dynamic_state, gallivm,
2733 thread_data_ptr, texture_index);
2734 }
2735
2736 /* width, height, depth as single int vector */
2737 if (dims <= 1) {
2738 bld.int_size = tex_width;
2739 }
2740 else {
2741 bld.int_size = LLVMBuildInsertElement(builder, bld.int_size_in_bld.undef,
2742 tex_width,
2743 LLVMConstInt(i32t, 0, 0), "");
2744 if (dims >= 2) {
2745 LLVMValueRef tex_height =
2746 dynamic_state->height(dynamic_state, gallivm,
2747 context_ptr, texture_index);
2748 bld.int_size = LLVMBuildInsertElement(builder, bld.int_size,
2749 tex_height,
2750 LLVMConstInt(i32t, 1, 0), "");
2751 if (dims >= 3) {
2752 LLVMValueRef tex_depth =
2753 dynamic_state->depth(dynamic_state, gallivm, context_ptr,
2754 texture_index);
2755 bld.int_size = LLVMBuildInsertElement(builder, bld.int_size,
2756 tex_depth,
2757 LLVMConstInt(i32t, 2, 0), "");
2758 }
2759 }
2760 }
2761
2762 for (i = 0; i < 5; i++) {
2763 newcoords[i] = coords[i];
2764 }
2765
2766 if (0) {
2767 /* For debug: no-op texture sampling */
2768 lp_build_sample_nop(gallivm,
2769 bld.texel_type,
2770 newcoords,
2771 texel_out);
2772 }
2773
2774 else if (op_type == LP_SAMPLER_OP_FETCH) {
2775 lp_build_fetch_texel(&bld, texture_index, newcoords,
2776 lod, offsets,
2777 texel_out);
2778 }
2779
2780 else {
2781 LLVMValueRef lod_fpart = NULL, lod_positive = NULL;
2782 LLVMValueRef ilevel0 = NULL, ilevel1 = NULL;
2783 boolean use_aos;
2784
2785 if (util_format_is_pure_integer(static_texture_state->format) &&
2786 !util_format_has_depth(bld.format_desc) &&
2787 (static_sampler_state->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR ||
2788 static_sampler_state->min_img_filter == PIPE_TEX_FILTER_LINEAR ||
2789 static_sampler_state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)) {
2790 /*
2791 * Bail if impossible filtering is specified (the awkard additional
2792 * depth check is because it is legal in gallium to have things like S8Z24
2793 * here which would say it's pure int despite such formats should sample
2794 * the depth component).
2795 * In GL such filters make the texture incomplete, this makes it robust
2796 * against state trackers which set this up regardless (we'd crash in the
2797 * lerp later (except for gather)).
2798 * Must do this after fetch_texel code since with GL state tracker we'll
2799 * get some junk sampler for buffer textures.
2800 */
2801 unsigned chan;
2802 LLVMValueRef zero = lp_build_zero(gallivm, type);
2803 for (chan = 0; chan < 4; chan++) {
2804 texel_out[chan] = zero;
2805 }
2806 return;
2807 }
2808
2809 use_aos = util_format_fits_8unorm(bld.format_desc) &&
2810 op_is_tex &&
2811 /* not sure this is strictly needed or simply impossible */
2812 derived_sampler_state.compare_mode == PIPE_TEX_COMPARE_NONE &&
2813 lp_is_simple_wrap_mode(derived_sampler_state.wrap_s);
2814
2815 use_aos &= bld.num_lods <= num_quads ||
2816 derived_sampler_state.min_img_filter ==
2817 derived_sampler_state.mag_img_filter;
2818 if (dims > 1) {
2819 use_aos &= lp_is_simple_wrap_mode(derived_sampler_state.wrap_t);
2820 if (dims > 2) {
2821 use_aos &= lp_is_simple_wrap_mode(derived_sampler_state.wrap_r);
2822 }
2823 }
2824 if ((static_texture_state->target == PIPE_TEXTURE_CUBE ||
2825 static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) &&
2826 derived_sampler_state.seamless_cube_map &&
2827 (derived_sampler_state.min_img_filter == PIPE_TEX_FILTER_LINEAR ||
2828 derived_sampler_state.mag_img_filter == PIPE_TEX_FILTER_LINEAR)) {
2829 /* theoretically possible with AoS filtering but not implemented (complex!) */
2830 use_aos = 0;
2831 }
2832
2833 if ((gallivm_debug & GALLIVM_DEBUG_PERF) &&
2834 !use_aos && util_format_fits_8unorm(bld.format_desc)) {
2835 debug_printf("%s: using floating point linear filtering for %s\n",
2836 __FUNCTION__, bld.format_desc->short_name);
2837 debug_printf(" min_img %d mag_img %d mip %d target %d seamless %d"
2838 " wraps %d wrapt %d wrapr %d\n",
2839 derived_sampler_state.min_img_filter,
2840 derived_sampler_state.mag_img_filter,
2841 derived_sampler_state.min_mip_filter,
2842 static_texture_state->target,
2843 derived_sampler_state.seamless_cube_map,
2844 derived_sampler_state.wrap_s,
2845 derived_sampler_state.wrap_t,
2846 derived_sampler_state.wrap_r);
2847 }
2848
2849 lp_build_sample_common(&bld, texture_index, sampler_index,
2850 newcoords,
2851 derivs, lod_bias, explicit_lod,
2852 &lod_positive, &lod_fpart,
2853 &ilevel0, &ilevel1);
2854
2855 if (use_aos && static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) {
2856 /* The aos path doesn't do seamless filtering so simply add cube layer
2857 * to face now.
2858 */
2859 newcoords[2] = lp_build_add(&bld.int_coord_bld, newcoords[2], newcoords[3]);
2860 }
2861
2862 /*
2863 * we only try 8-wide sampling with soa or if we have AVX2
2864 * as it appears to be a loss with just AVX)
2865 */
2866 if (num_quads == 1 || !use_aos ||
2867 (util_cpu_caps.has_avx2 &&
2868 (bld.num_lods == 1 ||
2869 derived_sampler_state.min_img_filter == derived_sampler_state.mag_img_filter))) {
2870 if (use_aos) {
2871 /* do sampling/filtering with fixed pt arithmetic */
2872 lp_build_sample_aos(&bld, sampler_index,
2873 newcoords[0], newcoords[1],
2874 newcoords[2],
2875 offsets, lod_positive, lod_fpart,
2876 ilevel0, ilevel1,
2877 texel_out);
2878 }
2879
2880 else {
2881 lp_build_sample_general(&bld, sampler_index,
2882 op_type == LP_SAMPLER_OP_GATHER,
2883 newcoords, offsets,
2884 lod_positive, lod_fpart,
2885 ilevel0, ilevel1,
2886 texel_out);
2887 }
2888 }
2889 else {
2890 unsigned j;
2891 struct lp_build_sample_context bld4;
2892 struct lp_type type4 = type;
2893 unsigned i;
2894 LLVMValueRef texelout4[4];
2895 LLVMValueRef texelouttmp[4][LP_MAX_VECTOR_LENGTH/16];
2896
2897 type4.length = 4;
2898
2899 /* Setup our build context */
2900 memset(&bld4, 0, sizeof bld4);
2901 bld4.gallivm = bld.gallivm;
2902 bld4.context_ptr = bld.context_ptr;
2903 bld4.static_texture_state = bld.static_texture_state;
2904 bld4.static_sampler_state = bld.static_sampler_state;
2905 bld4.dynamic_state = bld.dynamic_state;
2906 bld4.format_desc = bld.format_desc;
2907 bld4.dims = bld.dims;
2908 bld4.row_stride_array = bld.row_stride_array;
2909 bld4.img_stride_array = bld.img_stride_array;
2910 bld4.base_ptr = bld.base_ptr;
2911 bld4.mip_offsets = bld.mip_offsets;
2912 bld4.int_size = bld.int_size;
2913 bld4.cache = bld.cache;
2914
2915 bld4.vector_width = lp_type_width(type4);
2916
2917 bld4.float_type = lp_type_float(32);
2918 bld4.int_type = lp_type_int(32);
2919 bld4.coord_type = type4;
2920 bld4.int_coord_type = lp_int_type(type4);
2921 bld4.float_size_in_type = lp_type_float(32);
2922 bld4.float_size_in_type.length = dims > 1 ? 4 : 1;
2923 bld4.int_size_in_type = lp_int_type(bld4.float_size_in_type);
2924 bld4.texel_type = bld.texel_type;
2925 bld4.texel_type.length = 4;
2926
2927 bld4.num_mips = bld4.num_lods = 1;
2928 if ((gallivm_debug & GALLIVM_DEBUG_NO_QUAD_LOD) &&
2929 (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) &&
2930 (static_texture_state->target == PIPE_TEXTURE_CUBE ||
2931 static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) &&
2932 (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
2933 bld4.num_mips = type4.length;
2934 bld4.num_lods = type4.length;
2935 }
2936 if (lod_property == LP_SAMPLER_LOD_PER_ELEMENT &&
2937 (explicit_lod || lod_bias || derivs)) {
2938 if ((!op_is_tex && target != PIPE_BUFFER) ||
2939 (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
2940 bld4.num_mips = type4.length;
2941 bld4.num_lods = type4.length;
2942 }
2943 else if (op_is_tex && min_img_filter != mag_img_filter) {
2944 bld4.num_mips = 1;
2945 bld4.num_lods = type4.length;
2946 }
2947 }
2948
2949 /* we want native vector size to be able to use our intrinsics */
2950 bld4.lodf_type = type4;
2951 if (bld4.num_lods != type4.length) {
2952 bld4.lodf_type.length = 1;
2953 }
2954 bld4.lodi_type = lp_int_type(bld4.lodf_type);
2955 bld4.levelf_type = type4;
2956 if (bld4.num_mips != type4.length) {
2957 bld4.levelf_type.length = 1;
2958 }
2959 bld4.leveli_type = lp_int_type(bld4.levelf_type);
2960 bld4.float_size_type = bld4.float_size_in_type;
2961 if (bld4.num_mips > 1) {
2962 bld4.float_size_type.length = bld4.num_mips == type4.length ?
2963 bld4.num_mips * bld4.float_size_in_type.length :
2964 type4.length;
2965 }
2966 bld4.int_size_type = lp_int_type(bld4.float_size_type);
2967
2968 lp_build_context_init(&bld4.float_bld, gallivm, bld4.float_type);
2969 lp_build_context_init(&bld4.float_vec_bld, gallivm, type4);
2970 lp_build_context_init(&bld4.int_bld, gallivm, bld4.int_type);
2971 lp_build_context_init(&bld4.coord_bld, gallivm, bld4.coord_type);
2972 lp_build_context_init(&bld4.int_coord_bld, gallivm, bld4.int_coord_type);
2973 lp_build_context_init(&bld4.int_size_in_bld, gallivm, bld4.int_size_in_type);
2974 lp_build_context_init(&bld4.float_size_in_bld, gallivm, bld4.float_size_in_type);
2975 lp_build_context_init(&bld4.int_size_bld, gallivm, bld4.int_size_type);
2976 lp_build_context_init(&bld4.float_size_bld, gallivm, bld4.float_size_type);
2977 lp_build_context_init(&bld4.texel_bld, gallivm, bld4.texel_type);
2978 lp_build_context_init(&bld4.levelf_bld, gallivm, bld4.levelf_type);
2979 lp_build_context_init(&bld4.leveli_bld, gallivm, bld4.leveli_type);
2980 lp_build_context_init(&bld4.lodf_bld, gallivm, bld4.lodf_type);
2981 lp_build_context_init(&bld4.lodi_bld, gallivm, bld4.lodi_type);
2982
2983 for (i = 0; i < num_quads; i++) {
2984 LLVMValueRef s4, t4, r4;
2985 LLVMValueRef lod_positive4, lod_fpart4 = NULL;
2986 LLVMValueRef ilevel04, ilevel14 = NULL;
2987 LLVMValueRef offsets4[4] = { NULL };
2988 unsigned num_lods = bld4.num_lods;
2989
2990 s4 = lp_build_extract_range(gallivm, newcoords[0], 4*i, 4);
2991 t4 = lp_build_extract_range(gallivm, newcoords[1], 4*i, 4);
2992 r4 = lp_build_extract_range(gallivm, newcoords[2], 4*i, 4);
2993
2994 if (offsets[0]) {
2995 offsets4[0] = lp_build_extract_range(gallivm, offsets[0], 4*i, 4);
2996 if (dims > 1) {
2997 offsets4[1] = lp_build_extract_range(gallivm, offsets[1], 4*i, 4);
2998 if (dims > 2) {
2999 offsets4[2] = lp_build_extract_range(gallivm, offsets[2], 4*i, 4);
3000 }
3001 }
3002 }
3003 lod_positive4 = lp_build_extract_range(gallivm, lod_positive, num_lods * i, num_lods);
3004 ilevel04 = bld.num_mips == 1 ? ilevel0 :
3005 lp_build_extract_range(gallivm, ilevel0, num_lods * i, num_lods);
3006 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
3007 ilevel14 = lp_build_extract_range(gallivm, ilevel1, num_lods * i, num_lods);
3008 lod_fpart4 = lp_build_extract_range(gallivm, lod_fpart, num_lods * i, num_lods);
3009 }
3010
3011 if (use_aos) {
3012 /* do sampling/filtering with fixed pt arithmetic */
3013 lp_build_sample_aos(&bld4, sampler_index,
3014 s4, t4, r4, offsets4,
3015 lod_positive4, lod_fpart4,
3016 ilevel04, ilevel14,
3017 texelout4);
3018 }
3019
3020 else {
3021 /* this path is currently unreachable and hence might break easily... */
3022 LLVMValueRef newcoords4[5];
3023 newcoords4[0] = s4;
3024 newcoords4[1] = t4;
3025 newcoords4[2] = r4;
3026 newcoords4[3] = lp_build_extract_range(gallivm, newcoords[3], 4*i, 4);
3027 newcoords4[4] = lp_build_extract_range(gallivm, newcoords[4], 4*i, 4);
3028
3029 lp_build_sample_general(&bld4, sampler_index,
3030 op_type == LP_SAMPLER_OP_GATHER,
3031 newcoords4, offsets4,
3032 lod_positive4, lod_fpart4,
3033 ilevel04, ilevel14,
3034 texelout4);
3035 }
3036 for (j = 0; j < 4; j++) {
3037 texelouttmp[j][i] = texelout4[j];
3038 }
3039 }
3040
3041 for (j = 0; j < 4; j++) {
3042 texel_out[j] = lp_build_concat(gallivm, texelouttmp[j], type4, num_quads);
3043 }
3044 }
3045 }
3046
3047 if (target != PIPE_BUFFER && op_type != LP_SAMPLER_OP_GATHER) {
3048 apply_sampler_swizzle(&bld, texel_out);
3049 }
3050
3051 /*
3052 * texel type can be a (32bit) int/uint (for pure int formats only),
3053 * however we are expected to always return floats (storage is untyped).
3054 */
3055 if (!bld.texel_type.floating) {
3056 unsigned chan;
3057 for (chan = 0; chan < 4; chan++) {
3058 texel_out[chan] = LLVMBuildBitCast(builder, texel_out[chan],
3059 lp_build_vec_type(gallivm, type), "");
3060 }
3061 }
3062 }
3063
3064
3065 #define USE_TEX_FUNC_CALL 1
3066
3067 #define LP_MAX_TEX_FUNC_ARGS 32
3068
3069 static inline void
3070 get_target_info(enum pipe_texture_target target,
3071 unsigned *num_coords, unsigned *num_derivs,
3072 unsigned *num_offsets, unsigned *layer)
3073 {
3074 unsigned dims = texture_dims(target);
3075 *num_coords = dims;
3076 *num_offsets = dims;
3077 *num_derivs = (target == PIPE_TEXTURE_CUBE ||
3078 target == PIPE_TEXTURE_CUBE_ARRAY) ? 3 : dims;
3079 *layer = has_layer_coord(target) ? 2: 0;
3080 if (target == PIPE_TEXTURE_CUBE_ARRAY) {
3081 /*
3082 * dims doesn't include r coord for cubes - this is handled
3083 * by layer instead, but need to fix up for cube arrays...
3084 */
3085 *layer = 3;
3086 *num_coords = 3;
3087 }
3088 }
3089
3090
3091 /**
3092 * Generate the function body for a texture sampling function.
3093 */
3094 static void
3095 lp_build_sample_gen_func(struct gallivm_state *gallivm,
3096 const struct lp_static_texture_state *static_texture_state,
3097 const struct lp_static_sampler_state *static_sampler_state,
3098 struct lp_sampler_dynamic_state *dynamic_state,
3099 struct lp_type type,
3100 unsigned texture_index,
3101 unsigned sampler_index,
3102 LLVMValueRef function,
3103 unsigned num_args,
3104 unsigned sample_key)
3105 {
3106 LLVMBuilderRef old_builder;
3107 LLVMBasicBlockRef block;
3108 LLVMValueRef coords[5];
3109 LLVMValueRef offsets[3] = { NULL };
3110 LLVMValueRef lod = NULL;
3111 LLVMValueRef context_ptr;
3112 LLVMValueRef thread_data_ptr = NULL;
3113 LLVMValueRef texel_out[4];
3114 struct lp_derivatives derivs;
3115 struct lp_derivatives *deriv_ptr = NULL;
3116 unsigned num_param = 0;
3117 unsigned i, num_coords, num_derivs, num_offsets, layer;
3118 enum lp_sampler_lod_control lod_control;
3119 boolean need_cache = FALSE;
3120
3121 lod_control = (sample_key & LP_SAMPLER_LOD_CONTROL_MASK) >>
3122 LP_SAMPLER_LOD_CONTROL_SHIFT;
3123
3124 get_target_info(static_texture_state->target,
3125 &num_coords, &num_derivs, &num_offsets, &layer);
3126
3127 if (dynamic_state->cache_ptr) {
3128 const struct util_format_description *format_desc;
3129 format_desc = util_format_description(static_texture_state->format);
3130 if (format_desc && format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
3131 need_cache = TRUE;
3132 }
3133 }
3134
3135 /* "unpack" arguments */
3136 context_ptr = LLVMGetParam(function, num_param++);
3137 if (need_cache) {
3138 thread_data_ptr = LLVMGetParam(function, num_param++);
3139 }
3140 for (i = 0; i < num_coords; i++) {
3141 coords[i] = LLVMGetParam(function, num_param++);
3142 }
3143 for (i = num_coords; i < 5; i++) {
3144 /* This is rather unfortunate... */
3145 coords[i] = lp_build_undef(gallivm, type);
3146 }
3147 if (layer) {
3148 coords[layer] = LLVMGetParam(function, num_param++);
3149 }
3150 if (sample_key & LP_SAMPLER_SHADOW) {
3151 coords[4] = LLVMGetParam(function, num_param++);
3152 }
3153 if (sample_key & LP_SAMPLER_OFFSETS) {
3154 for (i = 0; i < num_offsets; i++) {
3155 offsets[i] = LLVMGetParam(function, num_param++);
3156 }
3157 }
3158 if (lod_control == LP_SAMPLER_LOD_BIAS ||
3159 lod_control == LP_SAMPLER_LOD_EXPLICIT) {
3160 lod = LLVMGetParam(function, num_param++);
3161 }
3162 else if (lod_control == LP_SAMPLER_LOD_DERIVATIVES) {
3163 for (i = 0; i < num_derivs; i++) {
3164 derivs.ddx[i] = LLVMGetParam(function, num_param++);
3165 derivs.ddy[i] = LLVMGetParam(function, num_param++);
3166 }
3167 deriv_ptr = &derivs;
3168 }
3169
3170 assert(num_args == num_param);
3171
3172 /*
3173 * Function body
3174 */
3175
3176 old_builder = gallivm->builder;
3177 block = LLVMAppendBasicBlockInContext(gallivm->context, function, "entry");
3178 gallivm->builder = LLVMCreateBuilderInContext(gallivm->context);
3179 LLVMPositionBuilderAtEnd(gallivm->builder, block);
3180
3181 lp_build_sample_soa_code(gallivm,
3182 static_texture_state,
3183 static_sampler_state,
3184 dynamic_state,
3185 type,
3186 sample_key,
3187 texture_index,
3188 sampler_index,
3189 context_ptr,
3190 thread_data_ptr,
3191 coords,
3192 offsets,
3193 deriv_ptr,
3194 lod,
3195 texel_out);
3196
3197 LLVMBuildAggregateRet(gallivm->builder, texel_out, 4);
3198
3199 LLVMDisposeBuilder(gallivm->builder);
3200 gallivm->builder = old_builder;
3201
3202 gallivm_verify_function(gallivm, function);
3203 }
3204
3205
3206 /**
3207 * Call the matching function for texture sampling.
3208 * If there's no match, generate a new one.
3209 */
3210 static void
3211 lp_build_sample_soa_func(struct gallivm_state *gallivm,
3212 const struct lp_static_texture_state *static_texture_state,
3213 const struct lp_static_sampler_state *static_sampler_state,
3214 struct lp_sampler_dynamic_state *dynamic_state,
3215 const struct lp_sampler_params *params)
3216 {
3217 LLVMBuilderRef builder = gallivm->builder;
3218 LLVMModuleRef module = LLVMGetGlobalParent(LLVMGetBasicBlockParent(
3219 LLVMGetInsertBlock(builder)));
3220 LLVMValueRef function, inst;
3221 LLVMValueRef args[LP_MAX_TEX_FUNC_ARGS];
3222 LLVMBasicBlockRef bb;
3223 LLVMValueRef tex_ret;
3224 unsigned num_args = 0;
3225 char func_name[64];
3226 unsigned i, num_coords, num_derivs, num_offsets, layer;
3227 unsigned texture_index = params->texture_index;
3228 unsigned sampler_index = params->sampler_index;
3229 unsigned sample_key = params->sample_key;
3230 const LLVMValueRef *coords = params->coords;
3231 const LLVMValueRef *offsets = params->offsets;
3232 const struct lp_derivatives *derivs = params->derivs;
3233 enum lp_sampler_lod_control lod_control;
3234 boolean need_cache = FALSE;
3235
3236 lod_control = (sample_key & LP_SAMPLER_LOD_CONTROL_MASK) >>
3237 LP_SAMPLER_LOD_CONTROL_SHIFT;
3238
3239 get_target_info(static_texture_state->target,
3240 &num_coords, &num_derivs, &num_offsets, &layer);
3241
3242 if (dynamic_state->cache_ptr) {
3243 const struct util_format_description *format_desc;
3244 format_desc = util_format_description(static_texture_state->format);
3245 if (format_desc && format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
3246 /*
3247 * This is not 100% correct, if we have cache but the
3248 * util_format_s3tc_prefer is true the cache won't get used
3249 * regardless (could hook up the block decode there...) */
3250 need_cache = TRUE;
3251 }
3252 }
3253 /*
3254 * texture function matches are found by name.
3255 * Thus the name has to include both the texture and sampler unit
3256 * (which covers all static state) plus the actual texture function
3257 * (including things like offsets, shadow coord, lod control).
3258 * Additionally lod_property has to be included too.
3259 */
3260
3261 util_snprintf(func_name, sizeof(func_name), "texfunc_res_%d_sam_%d_%x",
3262 texture_index, sampler_index, sample_key);
3263
3264 function = LLVMGetNamedFunction(module, func_name);
3265
3266 if(!function) {
3267 LLVMTypeRef arg_types[LP_MAX_TEX_FUNC_ARGS];
3268 LLVMTypeRef ret_type;
3269 LLVMTypeRef function_type;
3270 LLVMTypeRef val_type[4];
3271 unsigned num_param = 0;
3272
3273 /*
3274 * Generate the function prototype.
3275 */
3276
3277 arg_types[num_param++] = LLVMTypeOf(params->context_ptr);
3278 if (need_cache) {
3279 arg_types[num_param++] = LLVMTypeOf(params->thread_data_ptr);
3280 }
3281 for (i = 0; i < num_coords; i++) {
3282 arg_types[num_param++] = LLVMTypeOf(coords[0]);
3283 assert(LLVMTypeOf(coords[0]) == LLVMTypeOf(coords[i]));
3284 }
3285 if (layer) {
3286 arg_types[num_param++] = LLVMTypeOf(coords[layer]);
3287 assert(LLVMTypeOf(coords[0]) == LLVMTypeOf(coords[layer]));
3288 }
3289 if (sample_key & LP_SAMPLER_SHADOW) {
3290 arg_types[num_param++] = LLVMTypeOf(coords[0]);
3291 }
3292 if (sample_key & LP_SAMPLER_OFFSETS) {
3293 for (i = 0; i < num_offsets; i++) {
3294 arg_types[num_param++] = LLVMTypeOf(offsets[0]);
3295 assert(LLVMTypeOf(offsets[0]) == LLVMTypeOf(offsets[i]));
3296 }
3297 }
3298 if (lod_control == LP_SAMPLER_LOD_BIAS ||
3299 lod_control == LP_SAMPLER_LOD_EXPLICIT) {
3300 arg_types[num_param++] = LLVMTypeOf(params->lod);
3301 }
3302 else if (lod_control == LP_SAMPLER_LOD_DERIVATIVES) {
3303 for (i = 0; i < num_derivs; i++) {
3304 arg_types[num_param++] = LLVMTypeOf(derivs->ddx[i]);
3305 arg_types[num_param++] = LLVMTypeOf(derivs->ddy[i]);
3306 assert(LLVMTypeOf(derivs->ddx[0]) == LLVMTypeOf(derivs->ddx[i]));
3307 assert(LLVMTypeOf(derivs->ddy[0]) == LLVMTypeOf(derivs->ddy[i]));
3308 }
3309 }
3310
3311 val_type[0] = val_type[1] = val_type[2] = val_type[3] =
3312 lp_build_vec_type(gallivm, params->type);
3313 ret_type = LLVMStructTypeInContext(gallivm->context, val_type, 4, 0);
3314 function_type = LLVMFunctionType(ret_type, arg_types, num_param, 0);
3315 function = LLVMAddFunction(module, func_name, function_type);
3316
3317 for (i = 0; i < num_param; ++i) {
3318 if(LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind) {
3319 LLVMAddAttribute(LLVMGetParam(function, i), LLVMNoAliasAttribute);
3320 }
3321 }
3322
3323 LLVMSetFunctionCallConv(function, LLVMFastCallConv);
3324 LLVMSetLinkage(function, LLVMInternalLinkage);
3325
3326 lp_build_sample_gen_func(gallivm,
3327 static_texture_state,
3328 static_sampler_state,
3329 dynamic_state,
3330 params->type,
3331 texture_index,
3332 sampler_index,
3333 function,
3334 num_param,
3335 sample_key);
3336 }
3337
3338 num_args = 0;
3339 args[num_args++] = params->context_ptr;
3340 if (need_cache) {
3341 args[num_args++] = params->thread_data_ptr;
3342 }
3343 for (i = 0; i < num_coords; i++) {
3344 args[num_args++] = coords[i];
3345 }
3346 if (layer) {
3347 args[num_args++] = coords[layer];
3348 }
3349 if (sample_key & LP_SAMPLER_SHADOW) {
3350 args[num_args++] = coords[4];
3351 }
3352 if (sample_key & LP_SAMPLER_OFFSETS) {
3353 for (i = 0; i < num_offsets; i++) {
3354 args[num_args++] = offsets[i];
3355 }
3356 }
3357 if (lod_control == LP_SAMPLER_LOD_BIAS ||
3358 lod_control == LP_SAMPLER_LOD_EXPLICIT) {
3359 args[num_args++] = params->lod;
3360 }
3361 else if (lod_control == LP_SAMPLER_LOD_DERIVATIVES) {
3362 for (i = 0; i < num_derivs; i++) {
3363 args[num_args++] = derivs->ddx[i];
3364 args[num_args++] = derivs->ddy[i];
3365 }
3366 }
3367
3368 assert(num_args <= LP_MAX_TEX_FUNC_ARGS);
3369
3370 tex_ret = LLVMBuildCall(builder, function, args, num_args, "");
3371 bb = LLVMGetInsertBlock(builder);
3372 inst = LLVMGetLastInstruction(bb);
3373 LLVMSetInstructionCallConv(inst, LLVMFastCallConv);
3374
3375 for (i = 0; i < 4; i++) {
3376 params->texel[i] = LLVMBuildExtractValue(gallivm->builder, tex_ret, i, "");
3377 }
3378 }
3379
3380
3381 /**
3382 * Build texture sampling code.
3383 * Either via a function call or inline it directly.
3384 */
3385 void
3386 lp_build_sample_soa(const struct lp_static_texture_state *static_texture_state,
3387 const struct lp_static_sampler_state *static_sampler_state,
3388 struct lp_sampler_dynamic_state *dynamic_state,
3389 struct gallivm_state *gallivm,
3390 const struct lp_sampler_params *params)
3391 {
3392 boolean use_tex_func = FALSE;
3393
3394 /*
3395 * Do not use a function call if the sampling is "simple enough".
3396 * We define this by
3397 * a) format
3398 * b) no mips (either one level only or no mip filter)
3399 * No mips will definitely make the code smaller, though
3400 * the format requirement is a bit iffy - there's some (SoA) formats
3401 * which definitely generate less code. This does happen to catch
3402 * some important cases though which are hurt quite a bit by using
3403 * a call (though not really because of the call overhead but because
3404 * they are reusing the same texture unit with some of the same
3405 * parameters).
3406 * Ideally we'd let llvm recognize this stuff by doing IPO passes.
3407 */
3408
3409 if (USE_TEX_FUNC_CALL) {
3410 const struct util_format_description *format_desc;
3411 boolean simple_format;
3412 boolean simple_tex;
3413 enum lp_sampler_op_type op_type;
3414 format_desc = util_format_description(static_texture_state->format);
3415 simple_format = !format_desc ||
3416 (util_format_is_rgba8_variant(format_desc) &&
3417 format_desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB);
3418
3419 op_type = (params->sample_key & LP_SAMPLER_OP_TYPE_MASK) >>
3420 LP_SAMPLER_OP_TYPE_SHIFT;
3421 simple_tex =
3422 op_type != LP_SAMPLER_OP_TEXTURE ||
3423 ((static_sampler_state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE ||
3424 static_texture_state->level_zero_only == TRUE) &&
3425 static_sampler_state->min_img_filter == static_sampler_state->mag_img_filter);
3426
3427 use_tex_func = format_desc && !(simple_format && simple_tex);
3428 }
3429
3430 if (use_tex_func) {
3431 lp_build_sample_soa_func(gallivm,
3432 static_texture_state,
3433 static_sampler_state,
3434 dynamic_state,
3435 params);
3436 }
3437 else {
3438 lp_build_sample_soa_code(gallivm,
3439 static_texture_state,
3440 static_sampler_state,
3441 dynamic_state,
3442 params->type,
3443 params->sample_key,
3444 params->texture_index,
3445 params->sampler_index,
3446 params->context_ptr,
3447 params->thread_data_ptr,
3448 params->coords,
3449 params->offsets,
3450 params->derivs,
3451 params->lod,
3452 params->texel);
3453 }
3454 }
3455
3456
3457 void
3458 lp_build_size_query_soa(struct gallivm_state *gallivm,
3459 const struct lp_static_texture_state *static_state,
3460 struct lp_sampler_dynamic_state *dynamic_state,
3461 const struct lp_sampler_size_query_params *params)
3462 {
3463 LLVMValueRef lod, level, size;
3464 LLVMValueRef first_level = NULL;
3465 int dims, i;
3466 boolean has_array;
3467 unsigned num_lods = 1;
3468 struct lp_build_context bld_int_vec4;
3469 LLVMValueRef context_ptr = params->context_ptr;
3470 unsigned texture_unit = params->texture_unit;
3471 unsigned target = params->target;
3472
3473 if (static_state->format == PIPE_FORMAT_NONE) {
3474 /*
3475 * If there's nothing bound, format is NONE, and we must return
3476 * all zero as mandated by d3d10 in this case.
3477 */
3478 unsigned chan;
3479 LLVMValueRef zero = lp_build_const_vec(gallivm, params->int_type, 0.0F);
3480 for (chan = 0; chan < 4; chan++) {
3481 params->sizes_out[chan] = zero;
3482 }
3483 return;
3484 }
3485
3486 /*
3487 * Do some sanity verification about bound texture and shader dcl target.
3488 * Not entirely sure what's possible but assume array/non-array
3489 * always compatible (probably not ok for OpenGL but d3d10 has no
3490 * distinction of arrays at the resource level).
3491 * Everything else looks bogus (though not entirely sure about rect/2d).
3492 * Currently disabled because it causes assertion failures if there's
3493 * nothing bound (or rather a dummy texture, not that this case would
3494 * return the right values).
3495 */
3496 if (0 && static_state->target != target) {
3497 if (static_state->target == PIPE_TEXTURE_1D)
3498 assert(target == PIPE_TEXTURE_1D_ARRAY);
3499 else if (static_state->target == PIPE_TEXTURE_1D_ARRAY)
3500 assert(target == PIPE_TEXTURE_1D);
3501 else if (static_state->target == PIPE_TEXTURE_2D)
3502 assert(target == PIPE_TEXTURE_2D_ARRAY);
3503 else if (static_state->target == PIPE_TEXTURE_2D_ARRAY)
3504 assert(target == PIPE_TEXTURE_2D);
3505 else if (static_state->target == PIPE_TEXTURE_CUBE)
3506 assert(target == PIPE_TEXTURE_CUBE_ARRAY);
3507 else if (static_state->target == PIPE_TEXTURE_CUBE_ARRAY)
3508 assert(target == PIPE_TEXTURE_CUBE);
3509 else
3510 assert(0);
3511 }
3512
3513 dims = texture_dims(target);
3514
3515 switch (target) {
3516 case PIPE_TEXTURE_1D_ARRAY:
3517 case PIPE_TEXTURE_2D_ARRAY:
3518 case PIPE_TEXTURE_CUBE_ARRAY:
3519 has_array = TRUE;
3520 break;
3521 default:
3522 has_array = FALSE;
3523 break;
3524 }
3525
3526 assert(!params->int_type.floating);
3527
3528 lp_build_context_init(&bld_int_vec4, gallivm, lp_type_int_vec(32, 128));
3529
3530 if (params->explicit_lod) {
3531 /* FIXME: this needs to honor per-element lod */
3532 lod = LLVMBuildExtractElement(gallivm->builder, params->explicit_lod,
3533 lp_build_const_int32(gallivm, 0), "");
3534 first_level = dynamic_state->first_level(dynamic_state, gallivm,
3535 context_ptr, texture_unit);
3536 level = LLVMBuildAdd(gallivm->builder, lod, first_level, "level");
3537 lod = lp_build_broadcast_scalar(&bld_int_vec4, level);
3538 } else {
3539 lod = bld_int_vec4.zero;
3540 }
3541
3542 size = bld_int_vec4.undef;
3543
3544 size = LLVMBuildInsertElement(gallivm->builder, size,
3545 dynamic_state->width(dynamic_state, gallivm,
3546 context_ptr, texture_unit),
3547 lp_build_const_int32(gallivm, 0), "");
3548
3549 if (dims >= 2) {
3550 size = LLVMBuildInsertElement(gallivm->builder, size,
3551 dynamic_state->height(dynamic_state, gallivm,
3552 context_ptr, texture_unit),
3553 lp_build_const_int32(gallivm, 1), "");
3554 }
3555
3556 if (dims >= 3) {
3557 size = LLVMBuildInsertElement(gallivm->builder, size,
3558 dynamic_state->depth(dynamic_state, gallivm,
3559 context_ptr, texture_unit),
3560 lp_build_const_int32(gallivm, 2), "");
3561 }
3562
3563 size = lp_build_minify(&bld_int_vec4, size, lod, TRUE);
3564
3565 if (has_array) {
3566 LLVMValueRef layers = dynamic_state->depth(dynamic_state, gallivm,
3567 context_ptr, texture_unit);
3568 if (target == PIPE_TEXTURE_CUBE_ARRAY) {
3569 /*
3570 * It looks like GL wants number of cubes, d3d10.1 has it undefined?
3571 * Could avoid this by passing in number of cubes instead of total
3572 * number of layers (might make things easier elsewhere too).
3573 */
3574 LLVMValueRef six = lp_build_const_int32(gallivm, 6);
3575 layers = LLVMBuildSDiv(gallivm->builder, layers, six, "");
3576 }
3577 size = LLVMBuildInsertElement(gallivm->builder, size, layers,
3578 lp_build_const_int32(gallivm, dims), "");
3579 }
3580
3581 /*
3582 * d3d10 requires zero for x/y/z values (but not w, i.e. mip levels)
3583 * if level is out of bounds (note this can't cover unbound texture
3584 * here, which also requires returning zero).
3585 */
3586 if (params->explicit_lod && params->is_sviewinfo) {
3587 LLVMValueRef last_level, out, out1;
3588 struct lp_build_context leveli_bld;
3589
3590 /* everything is scalar for now */
3591 lp_build_context_init(&leveli_bld, gallivm, lp_type_int_vec(32, 32));
3592 last_level = dynamic_state->last_level(dynamic_state, gallivm,
3593 context_ptr, texture_unit);
3594
3595 out = lp_build_cmp(&leveli_bld, PIPE_FUNC_LESS, level, first_level);
3596 out1 = lp_build_cmp(&leveli_bld, PIPE_FUNC_GREATER, level, last_level);
3597 out = lp_build_or(&leveli_bld, out, out1);
3598 if (num_lods == 1) {
3599 out = lp_build_broadcast_scalar(&bld_int_vec4, out);
3600 }
3601 else {
3602 /* TODO */
3603 assert(0);
3604 }
3605 size = lp_build_andnot(&bld_int_vec4, size, out);
3606 }
3607 for (i = 0; i < dims + (has_array ? 1 : 0); i++) {
3608 params->sizes_out[i] = lp_build_extract_broadcast(gallivm, bld_int_vec4.type, params->int_type,
3609 size,
3610 lp_build_const_int32(gallivm, i));
3611 }
3612 if (params->is_sviewinfo) {
3613 for (; i < 4; i++) {
3614 params->sizes_out[i] = lp_build_const_vec(gallivm, params->int_type, 0.0);
3615 }
3616 }
3617
3618 /*
3619 * if there's no explicit_lod (buffers, rects) queries requiring nr of
3620 * mips would be illegal.
3621 */
3622 if (params->is_sviewinfo && params->explicit_lod) {
3623 struct lp_build_context bld_int_scalar;
3624 LLVMValueRef num_levels;
3625 lp_build_context_init(&bld_int_scalar, gallivm, lp_type_int(32));
3626
3627 if (static_state->level_zero_only) {
3628 num_levels = bld_int_scalar.one;
3629 }
3630 else {
3631 LLVMValueRef last_level;
3632
3633 last_level = dynamic_state->last_level(dynamic_state, gallivm,
3634 context_ptr, texture_unit);
3635 num_levels = lp_build_sub(&bld_int_scalar, last_level, first_level);
3636 num_levels = lp_build_add(&bld_int_scalar, num_levels, bld_int_scalar.one);
3637 }
3638 params->sizes_out[3] = lp_build_broadcast(gallivm, lp_build_vec_type(gallivm, params->int_type),
3639 num_levels);
3640 }
3641 }