gallivm: use common code to select texel type
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_sample_soa.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * Texture sampling -- SoA.
31 *
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 * @author Brian Paul <brianp@vmware.com>
34 */
35
36 #include "pipe/p_defines.h"
37 #include "pipe/p_state.h"
38 #include "pipe/p_shader_tokens.h"
39 #include "util/u_debug.h"
40 #include "util/u_dump.h"
41 #include "util/u_memory.h"
42 #include "util/u_math.h"
43 #include "util/format/u_format.h"
44 #include "util/u_cpu_detect.h"
45 #include "util/format_rgb9e5.h"
46 #include "lp_bld_debug.h"
47 #include "lp_bld_type.h"
48 #include "lp_bld_const.h"
49 #include "lp_bld_conv.h"
50 #include "lp_bld_arit.h"
51 #include "lp_bld_bitarit.h"
52 #include "lp_bld_logic.h"
53 #include "lp_bld_printf.h"
54 #include "lp_bld_swizzle.h"
55 #include "lp_bld_flow.h"
56 #include "lp_bld_gather.h"
57 #include "lp_bld_format.h"
58 #include "lp_bld_sample.h"
59 #include "lp_bld_sample_aos.h"
60 #include "lp_bld_struct.h"
61 #include "lp_bld_quad.h"
62 #include "lp_bld_pack.h"
63 #include "lp_bld_intr.h"
64 #include "lp_bld_misc.h"
65
66
67 /**
68 * Generate code to fetch a texel from a texture at int coords (x, y, z).
69 * The computation depends on whether the texture is 1D, 2D or 3D.
70 * The result, texel, will be float vectors:
71 * texel[0] = red values
72 * texel[1] = green values
73 * texel[2] = blue values
74 * texel[3] = alpha values
75 */
76 static void
77 lp_build_sample_texel_soa(struct lp_build_sample_context *bld,
78 LLVMValueRef width,
79 LLVMValueRef height,
80 LLVMValueRef depth,
81 LLVMValueRef x,
82 LLVMValueRef y,
83 LLVMValueRef z,
84 LLVMValueRef y_stride,
85 LLVMValueRef z_stride,
86 LLVMValueRef data_ptr,
87 LLVMValueRef mipoffsets,
88 LLVMValueRef texel_out[4])
89 {
90 const struct lp_static_sampler_state *static_state = bld->static_sampler_state;
91 const unsigned dims = bld->dims;
92 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
93 LLVMBuilderRef builder = bld->gallivm->builder;
94 LLVMValueRef offset;
95 LLVMValueRef i, j;
96 LLVMValueRef use_border = NULL;
97
98 /* use_border = x < 0 || x >= width || y < 0 || y >= height */
99 if (lp_sampler_wrap_mode_uses_border_color(static_state->wrap_s,
100 static_state->min_img_filter,
101 static_state->mag_img_filter)) {
102 LLVMValueRef b1, b2;
103 b1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, x, int_coord_bld->zero);
104 b2 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, x, width);
105 use_border = LLVMBuildOr(builder, b1, b2, "b1_or_b2");
106 }
107
108 if (dims >= 2 &&
109 lp_sampler_wrap_mode_uses_border_color(static_state->wrap_t,
110 static_state->min_img_filter,
111 static_state->mag_img_filter)) {
112 LLVMValueRef b1, b2;
113 b1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, y, int_coord_bld->zero);
114 b2 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, y, height);
115 if (use_border) {
116 use_border = LLVMBuildOr(builder, use_border, b1, "ub_or_b1");
117 use_border = LLVMBuildOr(builder, use_border, b2, "ub_or_b2");
118 }
119 else {
120 use_border = LLVMBuildOr(builder, b1, b2, "b1_or_b2");
121 }
122 }
123
124 if (dims == 3 &&
125 lp_sampler_wrap_mode_uses_border_color(static_state->wrap_r,
126 static_state->min_img_filter,
127 static_state->mag_img_filter)) {
128 LLVMValueRef b1, b2;
129 b1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, z, int_coord_bld->zero);
130 b2 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, z, depth);
131 if (use_border) {
132 use_border = LLVMBuildOr(builder, use_border, b1, "ub_or_b1");
133 use_border = LLVMBuildOr(builder, use_border, b2, "ub_or_b2");
134 }
135 else {
136 use_border = LLVMBuildOr(builder, b1, b2, "b1_or_b2");
137 }
138 }
139
140 /* convert x,y,z coords to linear offset from start of texture, in bytes */
141 lp_build_sample_offset(&bld->int_coord_bld,
142 bld->format_desc,
143 x, y, z, y_stride, z_stride,
144 &offset, &i, &j);
145 if (mipoffsets) {
146 offset = lp_build_add(&bld->int_coord_bld, offset, mipoffsets);
147 }
148
149 if (use_border) {
150 /* If we can sample the border color, it means that texcoords may
151 * lie outside the bounds of the texture image. We need to do
152 * something to prevent reading out of bounds and causing a segfault.
153 *
154 * Simply AND the texture coords with !use_border. This will cause
155 * coords which are out of bounds to become zero. Zero's guaranteed
156 * to be inside the texture image.
157 */
158 offset = lp_build_andnot(&bld->int_coord_bld, offset, use_border);
159 }
160
161 lp_build_fetch_rgba_soa(bld->gallivm,
162 bld->format_desc,
163 bld->texel_type, TRUE,
164 data_ptr, offset,
165 i, j,
166 bld->cache,
167 texel_out);
168
169 /*
170 * Note: if we find an app which frequently samples the texture border
171 * we might want to implement a true conditional here to avoid sampling
172 * the texture whenever possible (since that's quite a bit of code).
173 * Ex:
174 * if (use_border) {
175 * texel = border_color;
176 * }
177 * else {
178 * texel = sample_texture(coord);
179 * }
180 * As it is now, we always sample the texture, then selectively replace
181 * the texel color results with the border color.
182 */
183
184 if (use_border) {
185 /* select texel color or border color depending on use_border. */
186 const struct util_format_description *format_desc = bld->format_desc;
187 int chan;
188 struct lp_type border_type = bld->texel_type;
189 border_type.length = 4;
190 /*
191 * Only replace channels which are actually present. The others should
192 * get optimized away eventually by sampler_view swizzle anyway but it's
193 * easier too.
194 */
195 for (chan = 0; chan < 4; chan++) {
196 unsigned chan_s;
197 /* reverse-map channel... */
198 if (util_format_has_stencil(format_desc)) {
199 if (chan == 0)
200 chan_s = 0;
201 else
202 break;
203 }
204 else {
205 for (chan_s = 0; chan_s < 4; chan_s++) {
206 if (chan_s == format_desc->swizzle[chan]) {
207 break;
208 }
209 }
210 }
211 if (chan_s <= 3) {
212 /* use the already clamped color */
213 LLVMValueRef idx = lp_build_const_int32(bld->gallivm, chan);
214 LLVMValueRef border_chan;
215
216 border_chan = lp_build_extract_broadcast(bld->gallivm,
217 border_type,
218 bld->texel_type,
219 bld->border_color_clamped,
220 idx);
221 texel_out[chan] = lp_build_select(&bld->texel_bld, use_border,
222 border_chan, texel_out[chan]);
223 }
224 }
225 }
226 }
227
228
229 /**
230 * Helper to compute the mirror function for the PIPE_WRAP_MIRROR_REPEAT mode.
231 * (Note that with pot sizes could do this much more easily post-scale
232 * with some bit arithmetic.)
233 */
234 static LLVMValueRef
235 lp_build_coord_mirror(struct lp_build_sample_context *bld,
236 LLVMValueRef coord, boolean posOnly)
237 {
238 struct lp_build_context *coord_bld = &bld->coord_bld;
239 LLVMValueRef fract;
240 LLVMValueRef half = lp_build_const_vec(bld->gallivm, coord_bld->type, 0.5);
241
242 /*
243 * We can just use 2*(x - round(0.5*x)) to do all the mirroring,
244 * it all works out. (The result is in range [-1, 1.0], negative if
245 * the coord is in the "odd" section, otherwise positive.)
246 */
247
248 coord = lp_build_mul(coord_bld, coord, half);
249 fract = lp_build_round(coord_bld, coord);
250 fract = lp_build_sub(coord_bld, coord, fract);
251 coord = lp_build_add(coord_bld, fract, fract);
252
253 if (posOnly) {
254 /*
255 * Theoretically it's not quite 100% accurate because the spec says
256 * that ultimately a scaled coord of -x.0 should map to int coord
257 * -x + 1 with mirroring, not -x (this does not matter for bilinear
258 * filtering).
259 */
260 coord = lp_build_abs(coord_bld, coord);
261 /* kill off NaNs */
262 /* XXX: not safe without arch rounding, fract can be anything. */
263 coord = lp_build_max_ext(coord_bld, coord, coord_bld->zero,
264 GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN);
265 }
266
267 return coord;
268 }
269
270
271 /**
272 * Helper to compute the first coord and the weight for
273 * linear wrap repeat npot textures
274 */
275 void
276 lp_build_coord_repeat_npot_linear(struct lp_build_sample_context *bld,
277 LLVMValueRef coord_f,
278 LLVMValueRef length_i,
279 LLVMValueRef length_f,
280 LLVMValueRef *coord0_i,
281 LLVMValueRef *weight_f)
282 {
283 struct lp_build_context *coord_bld = &bld->coord_bld;
284 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
285 LLVMValueRef half = lp_build_const_vec(bld->gallivm, coord_bld->type, 0.5);
286 LLVMValueRef length_minus_one = lp_build_sub(int_coord_bld, length_i,
287 int_coord_bld->one);
288 LLVMValueRef mask;
289 /* wrap with normalized floats is just fract */
290 coord_f = lp_build_fract(coord_bld, coord_f);
291 /* mul by size and subtract 0.5 */
292 coord_f = lp_build_mul(coord_bld, coord_f, length_f);
293 coord_f = lp_build_sub(coord_bld, coord_f, half);
294 /*
295 * we avoided the 0.5/length division before the repeat wrap,
296 * now need to fix up edge cases with selects
297 */
298 /*
299 * Note we do a float (unordered) compare so we can eliminate NaNs.
300 * (Otherwise would need fract_safe above).
301 */
302 mask = lp_build_compare(coord_bld->gallivm, coord_bld->type,
303 PIPE_FUNC_LESS, coord_f, coord_bld->zero);
304
305 /* convert to int, compute lerp weight */
306 lp_build_ifloor_fract(coord_bld, coord_f, coord0_i, weight_f);
307 *coord0_i = lp_build_select(int_coord_bld, mask, length_minus_one, *coord0_i);
308 }
309
310
311 /**
312 * Build LLVM code for texture wrap mode for linear filtering.
313 * \param x0_out returns first integer texcoord
314 * \param x1_out returns second integer texcoord
315 * \param weight_out returns linear interpolation weight
316 */
317 static void
318 lp_build_sample_wrap_linear(struct lp_build_sample_context *bld,
319 boolean is_gather,
320 LLVMValueRef coord,
321 LLVMValueRef length,
322 LLVMValueRef length_f,
323 LLVMValueRef offset,
324 boolean is_pot,
325 unsigned wrap_mode,
326 LLVMValueRef *x0_out,
327 LLVMValueRef *x1_out,
328 LLVMValueRef *weight_out)
329 {
330 struct lp_build_context *coord_bld = &bld->coord_bld;
331 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
332 LLVMBuilderRef builder = bld->gallivm->builder;
333 LLVMValueRef half = lp_build_const_vec(bld->gallivm, coord_bld->type, 0.5);
334 LLVMValueRef length_minus_one = lp_build_sub(int_coord_bld, length, int_coord_bld->one);
335 LLVMValueRef coord0, coord1, weight;
336
337 switch(wrap_mode) {
338 case PIPE_TEX_WRAP_REPEAT:
339 if (is_pot) {
340 /* mul by size and subtract 0.5 */
341 coord = lp_build_mul(coord_bld, coord, length_f);
342 coord = lp_build_sub(coord_bld, coord, half);
343 if (offset) {
344 offset = lp_build_int_to_float(coord_bld, offset);
345 coord = lp_build_add(coord_bld, coord, offset);
346 }
347 /* convert to int, compute lerp weight */
348 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
349 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
350 /* repeat wrap */
351 coord0 = LLVMBuildAnd(builder, coord0, length_minus_one, "");
352 coord1 = LLVMBuildAnd(builder, coord1, length_minus_one, "");
353 }
354 else {
355 LLVMValueRef mask;
356 if (offset) {
357 offset = lp_build_int_to_float(coord_bld, offset);
358 offset = lp_build_div(coord_bld, offset, length_f);
359 coord = lp_build_add(coord_bld, coord, offset);
360 }
361 lp_build_coord_repeat_npot_linear(bld, coord,
362 length, length_f,
363 &coord0, &weight);
364 mask = lp_build_compare(int_coord_bld->gallivm, int_coord_bld->type,
365 PIPE_FUNC_NOTEQUAL, coord0, length_minus_one);
366 coord1 = LLVMBuildAnd(builder,
367 lp_build_add(int_coord_bld, coord0, int_coord_bld->one),
368 mask, "");
369 }
370 break;
371
372 case PIPE_TEX_WRAP_CLAMP:
373 if (bld->static_sampler_state->normalized_coords) {
374 /* scale coord to length */
375 coord = lp_build_mul(coord_bld, coord, length_f);
376 }
377 if (offset) {
378 offset = lp_build_int_to_float(coord_bld, offset);
379 coord = lp_build_add(coord_bld, coord, offset);
380 }
381
382 /*
383 * clamp to [0, length]
384 *
385 * Unlike some other wrap modes, this should be correct for gather
386 * too. GL_CLAMP explicitly does this clamp on the coord prior to
387 * actual wrapping (which is per sample).
388 */
389 coord = lp_build_clamp(coord_bld, coord, coord_bld->zero, length_f);
390
391 coord = lp_build_sub(coord_bld, coord, half);
392
393 /* convert to int, compute lerp weight */
394 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
395 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
396 break;
397
398 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
399 {
400 struct lp_build_context abs_coord_bld = bld->coord_bld;
401 abs_coord_bld.type.sign = FALSE;
402
403 if (bld->static_sampler_state->normalized_coords) {
404 /* mul by tex size */
405 coord = lp_build_mul(coord_bld, coord, length_f);
406 }
407 if (offset) {
408 offset = lp_build_int_to_float(coord_bld, offset);
409 coord = lp_build_add(coord_bld, coord, offset);
410 }
411
412 /* clamp to length max */
413 coord = lp_build_min_ext(coord_bld, coord, length_f,
414 GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN);
415 if (!is_gather) {
416 /* subtract 0.5 */
417 coord = lp_build_sub(coord_bld, coord, half);
418 /* clamp to [0, length - 0.5] */
419 coord = lp_build_max(coord_bld, coord, coord_bld->zero);
420 /* convert to int, compute lerp weight */
421 lp_build_ifloor_fract(&abs_coord_bld, coord, &coord0, &weight);
422 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
423 } else {
424 /*
425 * The non-gather path will end up with coords 0, 1 if coord was
426 * smaller than 0.5 (with corresponding weight 0.0 so it doesn't
427 * really matter what the second coord is). But for gather, we
428 * really need to end up with coords 0, 0.
429 */
430 coord = lp_build_max(coord_bld, coord, coord_bld->zero);
431 coord0 = lp_build_sub(coord_bld, coord, half);
432 coord1 = lp_build_add(coord_bld, coord, half);
433 /* Values range ([-0.5, length_f - 0.5], [0.5, length_f + 0.5] */
434 coord0 = lp_build_itrunc(coord_bld, coord0);
435 coord1 = lp_build_itrunc(coord_bld, coord1);
436 weight = coord_bld->undef;
437 }
438 /* coord1 = min(coord1, length-1) */
439 coord1 = lp_build_min(int_coord_bld, coord1, length_minus_one);
440 break;
441 }
442
443 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
444 if (bld->static_sampler_state->normalized_coords) {
445 /* scale coord to length */
446 coord = lp_build_mul(coord_bld, coord, length_f);
447 }
448 if (offset) {
449 offset = lp_build_int_to_float(coord_bld, offset);
450 coord = lp_build_add(coord_bld, coord, offset);
451 }
452 /*
453 * We don't need any clamp. Technically, for very large (pos or neg)
454 * (or infinite) values, clamp against [-length, length] would be
455 * correct, but we don't need to guarantee any specific
456 * result for such coords (the ifloor will be undefined, but for modes
457 * requiring border all resulting coords are safe).
458 */
459 coord = lp_build_sub(coord_bld, coord, half);
460 /* convert to int, compute lerp weight */
461 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
462 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
463 break;
464
465 case PIPE_TEX_WRAP_MIRROR_REPEAT:
466 if (offset) {
467 offset = lp_build_int_to_float(coord_bld, offset);
468 offset = lp_build_div(coord_bld, offset, length_f);
469 coord = lp_build_add(coord_bld, coord, offset);
470 }
471 if (!is_gather) {
472 /* compute mirror function */
473 coord = lp_build_coord_mirror(bld, coord, TRUE);
474
475 /* scale coord to length */
476 coord = lp_build_mul(coord_bld, coord, length_f);
477 coord = lp_build_sub(coord_bld, coord, half);
478
479 /* convert to int, compute lerp weight */
480 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
481 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
482
483 /* coord0 = max(coord0, 0) */
484 coord0 = lp_build_max(int_coord_bld, coord0, int_coord_bld->zero);
485 /* coord1 = min(coord1, length-1) */
486 coord1 = lp_build_min(int_coord_bld, coord1, length_minus_one);
487 } else {
488 /*
489 * This is pretty reasonable in the end, all what the tests care
490 * about is nasty edge cases (scaled coords x.5, so the individual
491 * coords are actually integers, which is REALLY tricky to get right
492 * due to this working differently both for negative numbers as well
493 * as for even/odd cases). But with enough magic it's not too complex
494 * after all.
495 * Maybe should try a bit arithmetic one though for POT textures...
496 */
497 LLVMValueRef isNeg;
498 /*
499 * Wrapping just once still works, even though it means we can
500 * get "wrong" sign due to performing mirror in the middle of the
501 * two coords (because this can only happen very near the odd/even
502 * edges, so both coords will actually end up as 0 or length - 1
503 * in the end).
504 * For GL4 gather with per-sample offsets we'd need to the mirroring
505 * per coord too.
506 */
507 coord = lp_build_coord_mirror(bld, coord, FALSE);
508 coord = lp_build_mul(coord_bld, coord, length_f);
509
510 /*
511 * NaNs should be safe here, we'll do away with them with
512 * the ones' complement plus min.
513 */
514 coord0 = lp_build_sub(coord_bld, coord, half);
515 coord0 = lp_build_ifloor(coord_bld, coord0);
516 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
517 /* ones complement for neg numbers (mirror(negX) = X - 1) */
518 isNeg = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS,
519 coord0, int_coord_bld->zero);
520 coord0 = lp_build_xor(int_coord_bld, coord0, isNeg);
521 isNeg = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS,
522 coord1, int_coord_bld->zero);
523 coord1 = lp_build_xor(int_coord_bld, coord1, isNeg);
524 coord0 = lp_build_min(int_coord_bld, coord0, length_minus_one);
525 coord1 = lp_build_min(int_coord_bld, coord1, length_minus_one);
526
527 weight = coord_bld->undef;
528 }
529 break;
530
531 case PIPE_TEX_WRAP_MIRROR_CLAMP:
532 if (bld->static_sampler_state->normalized_coords) {
533 /* scale coord to length */
534 coord = lp_build_mul(coord_bld, coord, length_f);
535 }
536 if (offset) {
537 offset = lp_build_int_to_float(coord_bld, offset);
538 coord = lp_build_add(coord_bld, coord, offset);
539 }
540 /*
541 * XXX: probably not correct for gather, albeit I'm not
542 * entirely sure as it's poorly specified. The wrapping looks
543 * correct according to the spec which is against gl 1.2.1,
544 * however negative values will be swapped - gl re-specified
545 * wrapping with newer versions (no more pre-clamp except with
546 * GL_CLAMP).
547 */
548 coord = lp_build_abs(coord_bld, coord);
549
550 /* clamp to [0, length] */
551 coord = lp_build_min_ext(coord_bld, coord, length_f,
552 GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN);
553
554 coord = lp_build_sub(coord_bld, coord, half);
555
556 /* convert to int, compute lerp weight */
557 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
558 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
559 break;
560
561 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
562 {
563 struct lp_build_context abs_coord_bld = bld->coord_bld;
564 abs_coord_bld.type.sign = FALSE;
565
566 if (bld->static_sampler_state->normalized_coords) {
567 /* scale coord to length */
568 coord = lp_build_mul(coord_bld, coord, length_f);
569 }
570 if (offset) {
571 offset = lp_build_int_to_float(coord_bld, offset);
572 coord = lp_build_add(coord_bld, coord, offset);
573 }
574 if (!is_gather) {
575 coord = lp_build_abs(coord_bld, coord);
576
577 /* clamp to length max */
578 coord = lp_build_min_ext(coord_bld, coord, length_f,
579 GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN);
580 /* subtract 0.5 */
581 coord = lp_build_sub(coord_bld, coord, half);
582 /* clamp to [0, length - 0.5] */
583 coord = lp_build_max(coord_bld, coord, coord_bld->zero);
584
585 /* convert to int, compute lerp weight */
586 lp_build_ifloor_fract(&abs_coord_bld, coord, &coord0, &weight);
587 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
588 /* coord1 = min(coord1, length-1) */
589 coord1 = lp_build_min(int_coord_bld, coord1, length_minus_one);
590 } else {
591 /*
592 * The non-gather path will swap coord0/1 if coord was negative,
593 * which is ok for filtering since the filter weight matches
594 * accordingly. Also, if coord is close to zero, coord0/1 will
595 * be 0 and 1, instead of 0 and 0 (again ok due to filter
596 * weight being 0.0). Both issues need to be fixed for gather.
597 */
598 LLVMValueRef isNeg;
599
600 /*
601 * Actually wanted to cheat here and use:
602 * coord1 = lp_build_iround(coord_bld, coord);
603 * but it's not good enough for some tests (even piglit
604 * textureGather is set up in a way so the coords area always
605 * .5, that is right at the crossover points).
606 * So do ordinary sub/floor, then do ones' complement
607 * for negative numbers.
608 * (Note can't just do sub|add/abs/itrunc per coord neither -
609 * because the spec demands that mirror(3.0) = 3 but
610 * mirror(-3.0) = 2.)
611 */
612 coord = lp_build_sub(coord_bld, coord, half);
613 coord0 = lp_build_ifloor(coord_bld, coord);
614 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
615 isNeg = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, coord0,
616 int_coord_bld->zero);
617 coord0 = lp_build_xor(int_coord_bld, isNeg, coord0);
618 coord0 = lp_build_min(int_coord_bld, coord0, length_minus_one);
619
620 isNeg = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, coord1,
621 int_coord_bld->zero);
622 coord1 = lp_build_xor(int_coord_bld, isNeg, coord1);
623 coord1 = lp_build_min(int_coord_bld, coord1, length_minus_one);
624
625 weight = coord_bld->undef;
626 }
627 }
628 break;
629
630 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
631 {
632 if (bld->static_sampler_state->normalized_coords) {
633 /* scale coord to length */
634 coord = lp_build_mul(coord_bld, coord, length_f);
635 }
636 if (offset) {
637 offset = lp_build_int_to_float(coord_bld, offset);
638 coord = lp_build_add(coord_bld, coord, offset);
639 }
640 /*
641 * XXX: probably not correct for gather due to swapped
642 * order if coord is negative (same rationale as for
643 * MIRROR_CLAMP).
644 */
645 coord = lp_build_abs(coord_bld, coord);
646
647 /*
648 * We don't need any clamp. Technically, for very large
649 * (or infinite) values, clamp against length would be
650 * correct, but we don't need to guarantee any specific
651 * result for such coords (the ifloor will be undefined, but
652 * for modes requiring border all resulting coords are safe).
653 */
654 coord = lp_build_sub(coord_bld, coord, half);
655
656 /* convert to int, compute lerp weight */
657 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
658 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
659 }
660 break;
661
662 default:
663 assert(0);
664 coord0 = NULL;
665 coord1 = NULL;
666 weight = NULL;
667 }
668
669 *x0_out = coord0;
670 *x1_out = coord1;
671 *weight_out = weight;
672 }
673
674
675 /**
676 * Build LLVM code for texture wrap mode for nearest filtering.
677 * \param coord the incoming texcoord (nominally in [0,1])
678 * \param length the texture size along one dimension, as int vector
679 * \param length_f the texture size along one dimension, as float vector
680 * \param offset texel offset along one dimension (as int vector)
681 * \param is_pot if TRUE, length is a power of two
682 * \param wrap_mode one of PIPE_TEX_WRAP_x
683 */
684 static LLVMValueRef
685 lp_build_sample_wrap_nearest(struct lp_build_sample_context *bld,
686 LLVMValueRef coord,
687 LLVMValueRef length,
688 LLVMValueRef length_f,
689 LLVMValueRef offset,
690 boolean is_pot,
691 unsigned wrap_mode)
692 {
693 struct lp_build_context *coord_bld = &bld->coord_bld;
694 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
695 LLVMBuilderRef builder = bld->gallivm->builder;
696 LLVMValueRef length_minus_one = lp_build_sub(int_coord_bld, length, int_coord_bld->one);
697 LLVMValueRef icoord;
698
699 switch(wrap_mode) {
700 case PIPE_TEX_WRAP_REPEAT:
701 if (is_pot) {
702 coord = lp_build_mul(coord_bld, coord, length_f);
703 icoord = lp_build_ifloor(coord_bld, coord);
704 if (offset) {
705 icoord = lp_build_add(int_coord_bld, icoord, offset);
706 }
707 icoord = LLVMBuildAnd(builder, icoord, length_minus_one, "");
708 }
709 else {
710 if (offset) {
711 offset = lp_build_int_to_float(coord_bld, offset);
712 offset = lp_build_div(coord_bld, offset, length_f);
713 coord = lp_build_add(coord_bld, coord, offset);
714 }
715 /* take fraction, unnormalize */
716 coord = lp_build_fract_safe(coord_bld, coord);
717 coord = lp_build_mul(coord_bld, coord, length_f);
718 icoord = lp_build_itrunc(coord_bld, coord);
719 }
720 break;
721
722 case PIPE_TEX_WRAP_CLAMP:
723 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
724 if (bld->static_sampler_state->normalized_coords) {
725 /* scale coord to length */
726 coord = lp_build_mul(coord_bld, coord, length_f);
727 }
728
729 if (offset) {
730 offset = lp_build_int_to_float(coord_bld, offset);
731 coord = lp_build_add(coord_bld, coord, offset);
732 }
733 /* floor */
734 /* use itrunc instead since we clamp to 0 anyway */
735 icoord = lp_build_itrunc(coord_bld, coord);
736
737 /* clamp to [0, length - 1]. */
738 icoord = lp_build_clamp(int_coord_bld, icoord, int_coord_bld->zero,
739 length_minus_one);
740 break;
741
742 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
743 if (bld->static_sampler_state->normalized_coords) {
744 /* scale coord to length */
745 coord = lp_build_mul(coord_bld, coord, length_f);
746 }
747 /* no clamp necessary, border masking will handle this */
748 icoord = lp_build_ifloor(coord_bld, coord);
749 if (offset) {
750 icoord = lp_build_add(int_coord_bld, icoord, offset);
751 }
752 break;
753
754 case PIPE_TEX_WRAP_MIRROR_REPEAT:
755 if (offset) {
756 offset = lp_build_int_to_float(coord_bld, offset);
757 offset = lp_build_div(coord_bld, offset, length_f);
758 coord = lp_build_add(coord_bld, coord, offset);
759 }
760 /* compute mirror function */
761 coord = lp_build_coord_mirror(bld, coord, TRUE);
762
763 /* scale coord to length */
764 assert(bld->static_sampler_state->normalized_coords);
765 coord = lp_build_mul(coord_bld, coord, length_f);
766
767 /* itrunc == ifloor here */
768 icoord = lp_build_itrunc(coord_bld, coord);
769
770 /* clamp to [0, length - 1] */
771 icoord = lp_build_min(int_coord_bld, icoord, length_minus_one);
772 break;
773
774 case PIPE_TEX_WRAP_MIRROR_CLAMP:
775 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
776 if (bld->static_sampler_state->normalized_coords) {
777 /* scale coord to length */
778 coord = lp_build_mul(coord_bld, coord, length_f);
779 }
780 if (offset) {
781 offset = lp_build_int_to_float(coord_bld, offset);
782 coord = lp_build_add(coord_bld, coord, offset);
783 }
784 coord = lp_build_abs(coord_bld, coord);
785
786 /* itrunc == ifloor here */
787 icoord = lp_build_itrunc(coord_bld, coord);
788 /*
789 * Use unsigned min due to possible undef values (NaNs, overflow)
790 */
791 {
792 struct lp_build_context abs_coord_bld = *int_coord_bld;
793 abs_coord_bld.type.sign = FALSE;
794 /* clamp to [0, length - 1] */
795 icoord = lp_build_min(&abs_coord_bld, icoord, length_minus_one);
796 }
797 break;
798
799 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
800 if (bld->static_sampler_state->normalized_coords) {
801 /* scale coord to length */
802 coord = lp_build_mul(coord_bld, coord, length_f);
803 }
804 if (offset) {
805 offset = lp_build_int_to_float(coord_bld, offset);
806 coord = lp_build_add(coord_bld, coord, offset);
807 }
808 coord = lp_build_abs(coord_bld, coord);
809
810 /* itrunc == ifloor here */
811 icoord = lp_build_itrunc(coord_bld, coord);
812 break;
813
814 default:
815 assert(0);
816 icoord = NULL;
817 }
818
819 return icoord;
820 }
821
822
823 /**
824 * Do shadow test/comparison.
825 * \param p shadow ref value
826 * \param texel the texel to compare against
827 */
828 static LLVMValueRef
829 lp_build_sample_comparefunc(struct lp_build_sample_context *bld,
830 LLVMValueRef p,
831 LLVMValueRef texel)
832 {
833 struct lp_build_context *texel_bld = &bld->texel_bld;
834 LLVMValueRef res;
835
836 if (0) {
837 //lp_build_print_value(bld->gallivm, "shadow cmp coord", p);
838 lp_build_print_value(bld->gallivm, "shadow cmp texel", texel);
839 }
840
841 /* result = (p FUNC texel) ? 1 : 0 */
842 /*
843 * honor d3d10 floating point rules here, which state that comparisons
844 * are ordered except NOT_EQUAL which is unordered.
845 */
846 if (bld->static_sampler_state->compare_func != PIPE_FUNC_NOTEQUAL) {
847 res = lp_build_cmp_ordered(texel_bld, bld->static_sampler_state->compare_func,
848 p, texel);
849 }
850 else {
851 res = lp_build_cmp(texel_bld, bld->static_sampler_state->compare_func,
852 p, texel);
853 }
854 return res;
855 }
856
857
858 /**
859 * Generate code to sample a mipmap level with nearest filtering.
860 * If sampling a cube texture, r = cube face in [0,5].
861 */
862 static void
863 lp_build_sample_image_nearest(struct lp_build_sample_context *bld,
864 LLVMValueRef size,
865 LLVMValueRef row_stride_vec,
866 LLVMValueRef img_stride_vec,
867 LLVMValueRef data_ptr,
868 LLVMValueRef mipoffsets,
869 const LLVMValueRef *coords,
870 const LLVMValueRef *offsets,
871 LLVMValueRef colors_out[4])
872 {
873 const unsigned dims = bld->dims;
874 LLVMValueRef width_vec;
875 LLVMValueRef height_vec;
876 LLVMValueRef depth_vec;
877 LLVMValueRef flt_size;
878 LLVMValueRef flt_width_vec;
879 LLVMValueRef flt_height_vec;
880 LLVMValueRef flt_depth_vec;
881 LLVMValueRef x, y = NULL, z = NULL;
882
883 lp_build_extract_image_sizes(bld,
884 &bld->int_size_bld,
885 bld->int_coord_type,
886 size,
887 &width_vec, &height_vec, &depth_vec);
888
889 flt_size = lp_build_int_to_float(&bld->float_size_bld, size);
890
891 lp_build_extract_image_sizes(bld,
892 &bld->float_size_bld,
893 bld->coord_type,
894 flt_size,
895 &flt_width_vec, &flt_height_vec, &flt_depth_vec);
896
897 /*
898 * Compute integer texcoords.
899 */
900 x = lp_build_sample_wrap_nearest(bld, coords[0], width_vec,
901 flt_width_vec, offsets[0],
902 bld->static_texture_state->pot_width,
903 bld->static_sampler_state->wrap_s);
904 lp_build_name(x, "tex.x.wrapped");
905
906 if (dims >= 2) {
907 y = lp_build_sample_wrap_nearest(bld, coords[1], height_vec,
908 flt_height_vec, offsets[1],
909 bld->static_texture_state->pot_height,
910 bld->static_sampler_state->wrap_t);
911 lp_build_name(y, "tex.y.wrapped");
912
913 if (dims == 3) {
914 z = lp_build_sample_wrap_nearest(bld, coords[2], depth_vec,
915 flt_depth_vec, offsets[2],
916 bld->static_texture_state->pot_depth,
917 bld->static_sampler_state->wrap_r);
918 lp_build_name(z, "tex.z.wrapped");
919 }
920 }
921 if (has_layer_coord(bld->static_texture_state->target)) {
922 if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) {
923 /* add cube layer to face */
924 z = lp_build_add(&bld->int_coord_bld, coords[2], coords[3]);
925 }
926 else {
927 z = coords[2];
928 }
929 lp_build_name(z, "tex.z.layer");
930 }
931
932 /*
933 * Get texture colors.
934 */
935 lp_build_sample_texel_soa(bld,
936 width_vec, height_vec, depth_vec,
937 x, y, z,
938 row_stride_vec, img_stride_vec,
939 data_ptr, mipoffsets, colors_out);
940
941 if (bld->static_sampler_state->compare_mode != PIPE_TEX_COMPARE_NONE) {
942 LLVMValueRef cmpval;
943 cmpval = lp_build_sample_comparefunc(bld, coords[4], colors_out[0]);
944 /* this is really just a AND 1.0, cmpval but llvm is clever enough */
945 colors_out[0] = lp_build_select(&bld->texel_bld, cmpval,
946 bld->texel_bld.one, bld->texel_bld.zero);
947 colors_out[1] = colors_out[2] = colors_out[3] = colors_out[0];
948 }
949
950 }
951
952
953 /**
954 * Like a lerp, but inputs are 0/~0 masks, so can simplify slightly.
955 */
956 static LLVMValueRef
957 lp_build_masklerp(struct lp_build_context *bld,
958 LLVMValueRef weight,
959 LLVMValueRef mask0,
960 LLVMValueRef mask1)
961 {
962 struct gallivm_state *gallivm = bld->gallivm;
963 LLVMBuilderRef builder = gallivm->builder;
964 LLVMValueRef weight2;
965
966 weight2 = lp_build_sub(bld, bld->one, weight);
967 weight = LLVMBuildBitCast(builder, weight,
968 lp_build_int_vec_type(gallivm, bld->type), "");
969 weight2 = LLVMBuildBitCast(builder, weight2,
970 lp_build_int_vec_type(gallivm, bld->type), "");
971 weight = LLVMBuildAnd(builder, weight, mask1, "");
972 weight2 = LLVMBuildAnd(builder, weight2, mask0, "");
973 weight = LLVMBuildBitCast(builder, weight, bld->vec_type, "");
974 weight2 = LLVMBuildBitCast(builder, weight2, bld->vec_type, "");
975 return lp_build_add(bld, weight, weight2);
976 }
977
978 /**
979 * Like a 2d lerp, but inputs are 0/~0 masks, so can simplify slightly.
980 */
981 static LLVMValueRef
982 lp_build_masklerp2d(struct lp_build_context *bld,
983 LLVMValueRef weight0,
984 LLVMValueRef weight1,
985 LLVMValueRef mask00,
986 LLVMValueRef mask01,
987 LLVMValueRef mask10,
988 LLVMValueRef mask11)
989 {
990 LLVMValueRef val0 = lp_build_masklerp(bld, weight0, mask00, mask01);
991 LLVMValueRef val1 = lp_build_masklerp(bld, weight0, mask10, mask11);
992 return lp_build_lerp(bld, weight1, val0, val1, 0);
993 }
994
995 /*
996 * this is a bit excessive code for something OpenGL just recommends
997 * but does not require.
998 */
999 #define ACCURATE_CUBE_CORNERS 1
1000
1001 /**
1002 * Generate code to sample a mipmap level with linear filtering.
1003 * If sampling a cube texture, r = cube face in [0,5].
1004 * If linear_mask is present, only pixels having their mask set
1005 * will receive linear filtering, the rest will use nearest.
1006 */
1007 static void
1008 lp_build_sample_image_linear(struct lp_build_sample_context *bld,
1009 boolean is_gather,
1010 LLVMValueRef size,
1011 LLVMValueRef linear_mask,
1012 LLVMValueRef row_stride_vec,
1013 LLVMValueRef img_stride_vec,
1014 LLVMValueRef data_ptr,
1015 LLVMValueRef mipoffsets,
1016 const LLVMValueRef *coords,
1017 const LLVMValueRef *offsets,
1018 LLVMValueRef colors_out[4])
1019 {
1020 LLVMBuilderRef builder = bld->gallivm->builder;
1021 struct lp_build_context *ivec_bld = &bld->int_coord_bld;
1022 struct lp_build_context *coord_bld = &bld->coord_bld;
1023 struct lp_build_context *texel_bld = &bld->texel_bld;
1024 const unsigned dims = bld->dims;
1025 LLVMValueRef width_vec;
1026 LLVMValueRef height_vec;
1027 LLVMValueRef depth_vec;
1028 LLVMValueRef flt_size;
1029 LLVMValueRef flt_width_vec;
1030 LLVMValueRef flt_height_vec;
1031 LLVMValueRef flt_depth_vec;
1032 LLVMValueRef fall_off[4], have_corners;
1033 LLVMValueRef z1 = NULL;
1034 LLVMValueRef z00 = NULL, z01 = NULL, z10 = NULL, z11 = NULL;
1035 LLVMValueRef x00 = NULL, x01 = NULL, x10 = NULL, x11 = NULL;
1036 LLVMValueRef y00 = NULL, y01 = NULL, y10 = NULL, y11 = NULL;
1037 LLVMValueRef s_fpart, t_fpart = NULL, r_fpart = NULL;
1038 LLVMValueRef xs[4], ys[4], zs[4];
1039 LLVMValueRef neighbors[2][2][4];
1040 int chan, texel_index;
1041 boolean seamless_cube_filter, accurate_cube_corners;
1042 unsigned chan_swiz = bld->static_texture_state->swizzle_r;
1043
1044 if (is_gather) {
1045 switch (bld->gather_comp) {
1046 case 0: chan_swiz = bld->static_texture_state->swizzle_r; break;
1047 case 1: chan_swiz = bld->static_texture_state->swizzle_g; break;
1048 case 2: chan_swiz = bld->static_texture_state->swizzle_b; break;
1049 case 3: chan_swiz = bld->static_texture_state->swizzle_a; break;
1050 default:
1051 break;
1052 }
1053 }
1054
1055 seamless_cube_filter = (bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
1056 bld->static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) &&
1057 bld->static_sampler_state->seamless_cube_map;
1058
1059 /*
1060 * Disable accurate cube corners for integer textures, which should only
1061 * get here in the gather path.
1062 */
1063 accurate_cube_corners = ACCURATE_CUBE_CORNERS && seamless_cube_filter &&
1064 !util_format_is_pure_integer(bld->static_texture_state->format);
1065
1066 lp_build_extract_image_sizes(bld,
1067 &bld->int_size_bld,
1068 bld->int_coord_type,
1069 size,
1070 &width_vec, &height_vec, &depth_vec);
1071
1072 flt_size = lp_build_int_to_float(&bld->float_size_bld, size);
1073
1074 lp_build_extract_image_sizes(bld,
1075 &bld->float_size_bld,
1076 bld->coord_type,
1077 flt_size,
1078 &flt_width_vec, &flt_height_vec, &flt_depth_vec);
1079
1080 /*
1081 * Compute integer texcoords.
1082 */
1083
1084 if (!seamless_cube_filter) {
1085 lp_build_sample_wrap_linear(bld, is_gather, coords[0], width_vec,
1086 flt_width_vec, offsets[0],
1087 bld->static_texture_state->pot_width,
1088 bld->static_sampler_state->wrap_s,
1089 &x00, &x01, &s_fpart);
1090 lp_build_name(x00, "tex.x0.wrapped");
1091 lp_build_name(x01, "tex.x1.wrapped");
1092 x10 = x00;
1093 x11 = x01;
1094
1095 if (dims >= 2) {
1096 lp_build_sample_wrap_linear(bld, is_gather, coords[1], height_vec,
1097 flt_height_vec, offsets[1],
1098 bld->static_texture_state->pot_height,
1099 bld->static_sampler_state->wrap_t,
1100 &y00, &y10, &t_fpart);
1101 lp_build_name(y00, "tex.y0.wrapped");
1102 lp_build_name(y10, "tex.y1.wrapped");
1103 y01 = y00;
1104 y11 = y10;
1105
1106 if (dims == 3) {
1107 lp_build_sample_wrap_linear(bld, is_gather, coords[2], depth_vec,
1108 flt_depth_vec, offsets[2],
1109 bld->static_texture_state->pot_depth,
1110 bld->static_sampler_state->wrap_r,
1111 &z00, &z1, &r_fpart);
1112 z01 = z10 = z11 = z00;
1113 lp_build_name(z00, "tex.z0.wrapped");
1114 lp_build_name(z1, "tex.z1.wrapped");
1115 }
1116 }
1117 if (has_layer_coord(bld->static_texture_state->target)) {
1118 if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) {
1119 /* add cube layer to face */
1120 z00 = z01 = z10 = z11 = z1 =
1121 lp_build_add(&bld->int_coord_bld, coords[2], coords[3]);
1122 }
1123 else {
1124 z00 = z01 = z10 = z11 = z1 = coords[2]; /* cube face or layer */
1125 }
1126 lp_build_name(z00, "tex.z0.layer");
1127 lp_build_name(z1, "tex.z1.layer");
1128 }
1129 }
1130 else {
1131 struct lp_build_if_state edge_if;
1132 LLVMTypeRef int1t;
1133 LLVMValueRef new_faces[4], new_xcoords[4][2], new_ycoords[4][2];
1134 LLVMValueRef coord0, coord1, have_edge, have_corner;
1135 LLVMValueRef fall_off_ym_notxm, fall_off_ym_notxp, fall_off_x, fall_off_y;
1136 LLVMValueRef fall_off_yp_notxm, fall_off_yp_notxp;
1137 LLVMValueRef x0, x1, y0, y1, y0_clamped, y1_clamped;
1138 LLVMValueRef face = coords[2];
1139 LLVMValueRef half = lp_build_const_vec(bld->gallivm, coord_bld->type, 0.5f);
1140 LLVMValueRef length_minus_one = lp_build_sub(ivec_bld, width_vec, ivec_bld->one);
1141 /* XXX drop height calcs. Could (should) do this without seamless filtering too */
1142 height_vec = width_vec;
1143 flt_height_vec = flt_width_vec;
1144
1145 /* XXX the overflow logic is actually sort of duplicated with trilinear,
1146 * since an overflow in one mip should also have a corresponding overflow
1147 * in another.
1148 */
1149 /* should always have normalized coords, and offsets are undefined */
1150 assert(bld->static_sampler_state->normalized_coords);
1151 /*
1152 * The coords should all be between [0,1] however we can have NaNs,
1153 * which will wreak havoc. In particular the y1_clamped value below
1154 * can be -INT_MAX (on x86) and be propagated right through (probably
1155 * other values might be bogus in the end too).
1156 * So kill off the NaNs here.
1157 */
1158 coord0 = lp_build_max_ext(coord_bld, coords[0], coord_bld->zero,
1159 GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN);
1160 coord0 = lp_build_mul(coord_bld, coord0, flt_width_vec);
1161 /* instead of clamp, build mask if overflowed */
1162 coord0 = lp_build_sub(coord_bld, coord0, half);
1163 /* convert to int, compute lerp weight */
1164 /* not ideal with AVX (and no AVX2) */
1165 lp_build_ifloor_fract(coord_bld, coord0, &x0, &s_fpart);
1166 x1 = lp_build_add(ivec_bld, x0, ivec_bld->one);
1167 coord1 = lp_build_max_ext(coord_bld, coords[1], coord_bld->zero,
1168 GALLIVM_NAN_RETURN_OTHER_SECOND_NONNAN);
1169 coord1 = lp_build_mul(coord_bld, coord1, flt_height_vec);
1170 coord1 = lp_build_sub(coord_bld, coord1, half);
1171 lp_build_ifloor_fract(coord_bld, coord1, &y0, &t_fpart);
1172 y1 = lp_build_add(ivec_bld, y0, ivec_bld->one);
1173
1174 fall_off[0] = lp_build_cmp(ivec_bld, PIPE_FUNC_LESS, x0, ivec_bld->zero);
1175 fall_off[1] = lp_build_cmp(ivec_bld, PIPE_FUNC_GREATER, x1, length_minus_one);
1176 fall_off[2] = lp_build_cmp(ivec_bld, PIPE_FUNC_LESS, y0, ivec_bld->zero);
1177 fall_off[3] = lp_build_cmp(ivec_bld, PIPE_FUNC_GREATER, y1, length_minus_one);
1178
1179 fall_off_x = lp_build_or(ivec_bld, fall_off[0], fall_off[1]);
1180 fall_off_y = lp_build_or(ivec_bld, fall_off[2], fall_off[3]);
1181 have_edge = lp_build_or(ivec_bld, fall_off_x, fall_off_y);
1182 have_edge = lp_build_any_true_range(ivec_bld, ivec_bld->type.length, have_edge);
1183
1184 /* needed for accurate corner filtering branch later, rely on 0 init */
1185 int1t = LLVMInt1TypeInContext(bld->gallivm->context);
1186 have_corners = lp_build_alloca(bld->gallivm, int1t, "have_corner");
1187
1188 for (texel_index = 0; texel_index < 4; texel_index++) {
1189 xs[texel_index] = lp_build_alloca(bld->gallivm, ivec_bld->vec_type, "xs");
1190 ys[texel_index] = lp_build_alloca(bld->gallivm, ivec_bld->vec_type, "ys");
1191 zs[texel_index] = lp_build_alloca(bld->gallivm, ivec_bld->vec_type, "zs");
1192 }
1193
1194 lp_build_if(&edge_if, bld->gallivm, have_edge);
1195
1196 have_corner = lp_build_and(ivec_bld, fall_off_x, fall_off_y);
1197 have_corner = lp_build_any_true_range(ivec_bld, ivec_bld->type.length, have_corner);
1198 LLVMBuildStore(builder, have_corner, have_corners);
1199
1200 /*
1201 * Need to feed clamped values here for cheap corner handling,
1202 * but only for y coord (as when falling off both edges we only
1203 * fall off the x one) - this should be sufficient.
1204 */
1205 y0_clamped = lp_build_max(ivec_bld, y0, ivec_bld->zero);
1206 y1_clamped = lp_build_min(ivec_bld, y1, length_minus_one);
1207
1208 /*
1209 * Get all possible new coords.
1210 */
1211 lp_build_cube_new_coords(ivec_bld, face,
1212 x0, x1, y0_clamped, y1_clamped,
1213 length_minus_one,
1214 new_faces, new_xcoords, new_ycoords);
1215
1216 /* handle fall off x-, x+ direction */
1217 /* determine new coords, face (not both fall_off vars can be true at same time) */
1218 x00 = lp_build_select(ivec_bld, fall_off[0], new_xcoords[0][0], x0);
1219 y00 = lp_build_select(ivec_bld, fall_off[0], new_ycoords[0][0], y0_clamped);
1220 x10 = lp_build_select(ivec_bld, fall_off[0], new_xcoords[0][1], x0);
1221 y10 = lp_build_select(ivec_bld, fall_off[0], new_ycoords[0][1], y1_clamped);
1222 x01 = lp_build_select(ivec_bld, fall_off[1], new_xcoords[1][0], x1);
1223 y01 = lp_build_select(ivec_bld, fall_off[1], new_ycoords[1][0], y0_clamped);
1224 x11 = lp_build_select(ivec_bld, fall_off[1], new_xcoords[1][1], x1);
1225 y11 = lp_build_select(ivec_bld, fall_off[1], new_ycoords[1][1], y1_clamped);
1226
1227 z00 = z10 = lp_build_select(ivec_bld, fall_off[0], new_faces[0], face);
1228 z01 = z11 = lp_build_select(ivec_bld, fall_off[1], new_faces[1], face);
1229
1230 /* handle fall off y-, y+ direction */
1231 /*
1232 * Cheap corner logic: just hack up things so a texel doesn't fall
1233 * off both sides (which means filter weights will be wrong but we'll only
1234 * use valid texels in the filter).
1235 * This means however (y) coords must additionally be clamped (see above).
1236 * This corner handling should be fully OpenGL (but not d3d10) compliant.
1237 */
1238 fall_off_ym_notxm = lp_build_andnot(ivec_bld, fall_off[2], fall_off[0]);
1239 fall_off_ym_notxp = lp_build_andnot(ivec_bld, fall_off[2], fall_off[1]);
1240 fall_off_yp_notxm = lp_build_andnot(ivec_bld, fall_off[3], fall_off[0]);
1241 fall_off_yp_notxp = lp_build_andnot(ivec_bld, fall_off[3], fall_off[1]);
1242
1243 x00 = lp_build_select(ivec_bld, fall_off_ym_notxm, new_xcoords[2][0], x00);
1244 y00 = lp_build_select(ivec_bld, fall_off_ym_notxm, new_ycoords[2][0], y00);
1245 x01 = lp_build_select(ivec_bld, fall_off_ym_notxp, new_xcoords[2][1], x01);
1246 y01 = lp_build_select(ivec_bld, fall_off_ym_notxp, new_ycoords[2][1], y01);
1247 x10 = lp_build_select(ivec_bld, fall_off_yp_notxm, new_xcoords[3][0], x10);
1248 y10 = lp_build_select(ivec_bld, fall_off_yp_notxm, new_ycoords[3][0], y10);
1249 x11 = lp_build_select(ivec_bld, fall_off_yp_notxp, new_xcoords[3][1], x11);
1250 y11 = lp_build_select(ivec_bld, fall_off_yp_notxp, new_ycoords[3][1], y11);
1251
1252 z00 = lp_build_select(ivec_bld, fall_off_ym_notxm, new_faces[2], z00);
1253 z01 = lp_build_select(ivec_bld, fall_off_ym_notxp, new_faces[2], z01);
1254 z10 = lp_build_select(ivec_bld, fall_off_yp_notxm, new_faces[3], z10);
1255 z11 = lp_build_select(ivec_bld, fall_off_yp_notxp, new_faces[3], z11);
1256
1257 if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) {
1258 /* now can add cube layer to face (per sample) */
1259 z00 = lp_build_add(ivec_bld, z00, coords[3]);
1260 z01 = lp_build_add(ivec_bld, z01, coords[3]);
1261 z10 = lp_build_add(ivec_bld, z10, coords[3]);
1262 z11 = lp_build_add(ivec_bld, z11, coords[3]);
1263 }
1264
1265 LLVMBuildStore(builder, x00, xs[0]);
1266 LLVMBuildStore(builder, x01, xs[1]);
1267 LLVMBuildStore(builder, x10, xs[2]);
1268 LLVMBuildStore(builder, x11, xs[3]);
1269 LLVMBuildStore(builder, y00, ys[0]);
1270 LLVMBuildStore(builder, y01, ys[1]);
1271 LLVMBuildStore(builder, y10, ys[2]);
1272 LLVMBuildStore(builder, y11, ys[3]);
1273 LLVMBuildStore(builder, z00, zs[0]);
1274 LLVMBuildStore(builder, z01, zs[1]);
1275 LLVMBuildStore(builder, z10, zs[2]);
1276 LLVMBuildStore(builder, z11, zs[3]);
1277
1278 lp_build_else(&edge_if);
1279
1280 LLVMBuildStore(builder, x0, xs[0]);
1281 LLVMBuildStore(builder, x1, xs[1]);
1282 LLVMBuildStore(builder, x0, xs[2]);
1283 LLVMBuildStore(builder, x1, xs[3]);
1284 LLVMBuildStore(builder, y0, ys[0]);
1285 LLVMBuildStore(builder, y0, ys[1]);
1286 LLVMBuildStore(builder, y1, ys[2]);
1287 LLVMBuildStore(builder, y1, ys[3]);
1288 if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) {
1289 LLVMValueRef cube_layer = lp_build_add(ivec_bld, face, coords[3]);
1290 LLVMBuildStore(builder, cube_layer, zs[0]);
1291 LLVMBuildStore(builder, cube_layer, zs[1]);
1292 LLVMBuildStore(builder, cube_layer, zs[2]);
1293 LLVMBuildStore(builder, cube_layer, zs[3]);
1294 }
1295 else {
1296 LLVMBuildStore(builder, face, zs[0]);
1297 LLVMBuildStore(builder, face, zs[1]);
1298 LLVMBuildStore(builder, face, zs[2]);
1299 LLVMBuildStore(builder, face, zs[3]);
1300 }
1301
1302 lp_build_endif(&edge_if);
1303
1304 x00 = LLVMBuildLoad(builder, xs[0], "");
1305 x01 = LLVMBuildLoad(builder, xs[1], "");
1306 x10 = LLVMBuildLoad(builder, xs[2], "");
1307 x11 = LLVMBuildLoad(builder, xs[3], "");
1308 y00 = LLVMBuildLoad(builder, ys[0], "");
1309 y01 = LLVMBuildLoad(builder, ys[1], "");
1310 y10 = LLVMBuildLoad(builder, ys[2], "");
1311 y11 = LLVMBuildLoad(builder, ys[3], "");
1312 z00 = LLVMBuildLoad(builder, zs[0], "");
1313 z01 = LLVMBuildLoad(builder, zs[1], "");
1314 z10 = LLVMBuildLoad(builder, zs[2], "");
1315 z11 = LLVMBuildLoad(builder, zs[3], "");
1316 }
1317
1318 if (linear_mask) {
1319 /*
1320 * Whack filter weights into place. Whatever texel had more weight is
1321 * the one which should have been selected by nearest filtering hence
1322 * just use 100% weight for it.
1323 */
1324 struct lp_build_context *c_bld = &bld->coord_bld;
1325 LLVMValueRef w1_mask, w1_weight;
1326 LLVMValueRef half = lp_build_const_vec(bld->gallivm, c_bld->type, 0.5f);
1327
1328 w1_mask = lp_build_cmp(c_bld, PIPE_FUNC_GREATER, s_fpart, half);
1329 /* this select is really just a "and" */
1330 w1_weight = lp_build_select(c_bld, w1_mask, c_bld->one, c_bld->zero);
1331 s_fpart = lp_build_select(c_bld, linear_mask, s_fpart, w1_weight);
1332 if (dims >= 2) {
1333 w1_mask = lp_build_cmp(c_bld, PIPE_FUNC_GREATER, t_fpart, half);
1334 w1_weight = lp_build_select(c_bld, w1_mask, c_bld->one, c_bld->zero);
1335 t_fpart = lp_build_select(c_bld, linear_mask, t_fpart, w1_weight);
1336 if (dims == 3) {
1337 w1_mask = lp_build_cmp(c_bld, PIPE_FUNC_GREATER, r_fpart, half);
1338 w1_weight = lp_build_select(c_bld, w1_mask, c_bld->one, c_bld->zero);
1339 r_fpart = lp_build_select(c_bld, linear_mask, r_fpart, w1_weight);
1340 }
1341 }
1342 }
1343
1344 /*
1345 * Get texture colors.
1346 */
1347 /* get x0/x1 texels */
1348 lp_build_sample_texel_soa(bld,
1349 width_vec, height_vec, depth_vec,
1350 x00, y00, z00,
1351 row_stride_vec, img_stride_vec,
1352 data_ptr, mipoffsets, neighbors[0][0]);
1353 lp_build_sample_texel_soa(bld,
1354 width_vec, height_vec, depth_vec,
1355 x01, y01, z01,
1356 row_stride_vec, img_stride_vec,
1357 data_ptr, mipoffsets, neighbors[0][1]);
1358
1359 if (dims == 1) {
1360 assert(!is_gather);
1361 if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
1362 /* Interpolate two samples from 1D image to produce one color */
1363 for (chan = 0; chan < 4; chan++) {
1364 colors_out[chan] = lp_build_lerp(texel_bld, s_fpart,
1365 neighbors[0][0][chan],
1366 neighbors[0][1][chan],
1367 0);
1368 }
1369 }
1370 else {
1371 LLVMValueRef cmpval0, cmpval1;
1372 cmpval0 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][0][0]);
1373 cmpval1 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
1374 /* simplified lerp, AND mask with weight and add */
1375 colors_out[0] = lp_build_masklerp(texel_bld, s_fpart,
1376 cmpval0, cmpval1);
1377 colors_out[1] = colors_out[2] = colors_out[3] = colors_out[0];
1378 }
1379 }
1380 else {
1381 /* 2D/3D texture */
1382 struct lp_build_if_state corner_if;
1383 LLVMValueRef colors0[4], colorss[4];
1384
1385 /* get x0/x1 texels at y1 */
1386 lp_build_sample_texel_soa(bld,
1387 width_vec, height_vec, depth_vec,
1388 x10, y10, z10,
1389 row_stride_vec, img_stride_vec,
1390 data_ptr, mipoffsets, neighbors[1][0]);
1391 lp_build_sample_texel_soa(bld,
1392 width_vec, height_vec, depth_vec,
1393 x11, y11, z11,
1394 row_stride_vec, img_stride_vec,
1395 data_ptr, mipoffsets, neighbors[1][1]);
1396
1397 /*
1398 * To avoid having to duplicate linear_mask / fetch code use
1399 * another branch (with corner condition though edge would work
1400 * as well) here.
1401 */
1402 if (accurate_cube_corners) {
1403 LLVMValueRef c00, c01, c10, c11, c00f, c01f, c10f, c11f;
1404 LLVMValueRef have_corner, one_third;
1405
1406 colorss[0] = lp_build_alloca(bld->gallivm, coord_bld->vec_type, "cs0");
1407 colorss[1] = lp_build_alloca(bld->gallivm, coord_bld->vec_type, "cs1");
1408 colorss[2] = lp_build_alloca(bld->gallivm, coord_bld->vec_type, "cs2");
1409 colorss[3] = lp_build_alloca(bld->gallivm, coord_bld->vec_type, "cs3");
1410
1411 have_corner = LLVMBuildLoad(builder, have_corners, "");
1412
1413 lp_build_if(&corner_if, bld->gallivm, have_corner);
1414
1415 one_third = lp_build_const_vec(bld->gallivm, coord_bld->type,
1416 1.0f/3.0f);
1417
1418 /* find corner */
1419 c00 = lp_build_and(ivec_bld, fall_off[0], fall_off[2]);
1420 c00f = LLVMBuildBitCast(builder, c00, coord_bld->vec_type, "");
1421 c01 = lp_build_and(ivec_bld, fall_off[1], fall_off[2]);
1422 c01f = LLVMBuildBitCast(builder, c01, coord_bld->vec_type, "");
1423 c10 = lp_build_and(ivec_bld, fall_off[0], fall_off[3]);
1424 c10f = LLVMBuildBitCast(builder, c10, coord_bld->vec_type, "");
1425 c11 = lp_build_and(ivec_bld, fall_off[1], fall_off[3]);
1426 c11f = LLVMBuildBitCast(builder, c11, coord_bld->vec_type, "");
1427
1428 if (!is_gather) {
1429 /*
1430 * we can't use standard 2d lerp as we need per-element weight
1431 * in case of corners, so just calculate bilinear result as
1432 * w00*s00 + w01*s01 + w10*s10 + w11*s11.
1433 * (This is actually less work than using 2d lerp, 7 vs. 9
1434 * instructions, however calculating the weights needs another 6,
1435 * so actually probably not slower than 2d lerp only for 4 channels
1436 * as weights only need to be calculated once - of course fixing
1437 * the weights has additional cost.)
1438 */
1439 LLVMValueRef w00, w01, w10, w11, wx0, wy0, c_weight, tmp;
1440 wx0 = lp_build_sub(coord_bld, coord_bld->one, s_fpart);
1441 wy0 = lp_build_sub(coord_bld, coord_bld->one, t_fpart);
1442 w00 = lp_build_mul(coord_bld, wx0, wy0);
1443 w01 = lp_build_mul(coord_bld, s_fpart, wy0);
1444 w10 = lp_build_mul(coord_bld, wx0, t_fpart);
1445 w11 = lp_build_mul(coord_bld, s_fpart, t_fpart);
1446
1447 /* find corner weight */
1448 c_weight = lp_build_select(coord_bld, c00, w00, coord_bld->zero);
1449 c_weight = lp_build_select(coord_bld, c01, w01, c_weight);
1450 c_weight = lp_build_select(coord_bld, c10, w10, c_weight);
1451 c_weight = lp_build_select(coord_bld, c11, w11, c_weight);
1452
1453 /*
1454 * add 1/3 of the corner weight to the weight of the 3 other
1455 * samples and null out corner weight.
1456 */
1457 c_weight = lp_build_mul(coord_bld, c_weight, one_third);
1458 w00 = lp_build_add(coord_bld, w00, c_weight);
1459 w00 = lp_build_andnot(coord_bld, w00, c00f);
1460 w01 = lp_build_add(coord_bld, w01, c_weight);
1461 w01 = lp_build_andnot(coord_bld, w01, c01f);
1462 w10 = lp_build_add(coord_bld, w10, c_weight);
1463 w10 = lp_build_andnot(coord_bld, w10, c10f);
1464 w11 = lp_build_add(coord_bld, w11, c_weight);
1465 w11 = lp_build_andnot(coord_bld, w11, c11f);
1466
1467 if (bld->static_sampler_state->compare_mode ==
1468 PIPE_TEX_COMPARE_NONE) {
1469 for (chan = 0; chan < 4; chan++) {
1470 colors0[chan] = lp_build_mul(coord_bld, w00,
1471 neighbors[0][0][chan]);
1472 tmp = lp_build_mul(coord_bld, w01, neighbors[0][1][chan]);
1473 colors0[chan] = lp_build_add(coord_bld, tmp, colors0[chan]);
1474 tmp = lp_build_mul(coord_bld, w10, neighbors[1][0][chan]);
1475 colors0[chan] = lp_build_add(coord_bld, tmp, colors0[chan]);
1476 tmp = lp_build_mul(coord_bld, w11, neighbors[1][1][chan]);
1477 colors0[chan] = lp_build_add(coord_bld, tmp, colors0[chan]);
1478 }
1479 }
1480 else {
1481 LLVMValueRef cmpval00, cmpval01, cmpval10, cmpval11;
1482 cmpval00 = lp_build_sample_comparefunc(bld, coords[4],
1483 neighbors[0][0][0]);
1484 cmpval01 = lp_build_sample_comparefunc(bld, coords[4],
1485 neighbors[0][1][0]);
1486 cmpval10 = lp_build_sample_comparefunc(bld, coords[4],
1487 neighbors[1][0][0]);
1488 cmpval11 = lp_build_sample_comparefunc(bld, coords[4],
1489 neighbors[1][1][0]);
1490 /*
1491 * inputs to interpolation are just masks so just add
1492 * masked weights together
1493 */
1494 cmpval00 = LLVMBuildBitCast(builder, cmpval00,
1495 coord_bld->vec_type, "");
1496 cmpval01 = LLVMBuildBitCast(builder, cmpval01,
1497 coord_bld->vec_type, "");
1498 cmpval10 = LLVMBuildBitCast(builder, cmpval10,
1499 coord_bld->vec_type, "");
1500 cmpval11 = LLVMBuildBitCast(builder, cmpval11,
1501 coord_bld->vec_type, "");
1502 colors0[0] = lp_build_and(coord_bld, w00, cmpval00);
1503 tmp = lp_build_and(coord_bld, w01, cmpval01);
1504 colors0[0] = lp_build_add(coord_bld, tmp, colors0[0]);
1505 tmp = lp_build_and(coord_bld, w10, cmpval10);
1506 colors0[0] = lp_build_add(coord_bld, tmp, colors0[0]);
1507 tmp = lp_build_and(coord_bld, w11, cmpval11);
1508 colors0[0] = lp_build_add(coord_bld, tmp, colors0[0]);
1509 colors0[1] = colors0[2] = colors0[3] = colors0[0];
1510 }
1511 }
1512 else {
1513 /*
1514 * We don't have any weights to adjust, so instead calculate
1515 * the fourth texel as simply the average of the other 3.
1516 * (This would work for non-gather too, however we'd have
1517 * a boatload more of the select stuff due to there being
1518 * 4 times as many colors as weights.)
1519 */
1520 LLVMValueRef col00, col01, col10, col11;
1521 LLVMValueRef colc, colc0, colc1;
1522 col10 = lp_build_swizzle_soa_channel(texel_bld,
1523 neighbors[1][0], chan_swiz);
1524 col11 = lp_build_swizzle_soa_channel(texel_bld,
1525 neighbors[1][1], chan_swiz);
1526 col01 = lp_build_swizzle_soa_channel(texel_bld,
1527 neighbors[0][1], chan_swiz);
1528 col00 = lp_build_swizzle_soa_channel(texel_bld,
1529 neighbors[0][0], chan_swiz);
1530
1531 /*
1532 * The spec says for comparison filtering, the comparison
1533 * must happen before synthesizing the new value.
1534 * This means all gathered values are always 0 or 1,
1535 * except for the non-existing texel, which can be 0,1/3,2/3,1...
1536 * Seems like we'd be allowed to just return 0 or 1 too, so we
1537 * could simplify and pass down the compare mask values to the
1538 * end (using int arithmetic/compare on the mask values to
1539 * construct the fourth texel) and only there convert to floats
1540 * but it's probably not worth it (it might be easier for the cpu
1541 * but not for the code)...
1542 */
1543 if (bld->static_sampler_state->compare_mode !=
1544 PIPE_TEX_COMPARE_NONE) {
1545 LLVMValueRef cmpval00, cmpval01, cmpval10, cmpval11;
1546 cmpval00 = lp_build_sample_comparefunc(bld, coords[4], col00);
1547 cmpval01 = lp_build_sample_comparefunc(bld, coords[4], col01);
1548 cmpval10 = lp_build_sample_comparefunc(bld, coords[4], col10);
1549 cmpval11 = lp_build_sample_comparefunc(bld, coords[4], col11);
1550 col00 = lp_build_select(texel_bld, cmpval00,
1551 texel_bld->one, texel_bld->zero);
1552 col01 = lp_build_select(texel_bld, cmpval01,
1553 texel_bld->one, texel_bld->zero);
1554 col10 = lp_build_select(texel_bld, cmpval10,
1555 texel_bld->one, texel_bld->zero);
1556 col11 = lp_build_select(texel_bld, cmpval11,
1557 texel_bld->one, texel_bld->zero);
1558 }
1559
1560 /*
1561 * Null out corner color.
1562 */
1563 col00 = lp_build_andnot(coord_bld, col00, c00f);
1564 col01 = lp_build_andnot(coord_bld, col01, c01f);
1565 col10 = lp_build_andnot(coord_bld, col10, c10f);
1566 col11 = lp_build_andnot(coord_bld, col11, c11f);
1567
1568 /*
1569 * New corner texel color is all colors added / 3.
1570 */
1571 colc0 = lp_build_add(coord_bld, col00, col01);
1572 colc1 = lp_build_add(coord_bld, col10, col11);
1573 colc = lp_build_add(coord_bld, colc0, colc1);
1574 colc = lp_build_mul(coord_bld, one_third, colc);
1575
1576 /*
1577 * Replace the corner texel color with the new value.
1578 */
1579 col00 = lp_build_select(coord_bld, c00, colc, col00);
1580 col01 = lp_build_select(coord_bld, c01, colc, col01);
1581 col10 = lp_build_select(coord_bld, c10, colc, col10);
1582 col11 = lp_build_select(coord_bld, c11, colc, col11);
1583
1584 colors0[0] = col10;
1585 colors0[1] = col11;
1586 colors0[2] = col01;
1587 colors0[3] = col00;
1588 }
1589
1590 LLVMBuildStore(builder, colors0[0], colorss[0]);
1591 LLVMBuildStore(builder, colors0[1], colorss[1]);
1592 LLVMBuildStore(builder, colors0[2], colorss[2]);
1593 LLVMBuildStore(builder, colors0[3], colorss[3]);
1594
1595 lp_build_else(&corner_if);
1596 }
1597
1598 if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
1599 if (is_gather) {
1600 /*
1601 * Just assign the red channel (no component selection yet).
1602 * This is a bit hackish, we usually do the swizzle at the
1603 * end of sampling (much less values to swizzle), but this
1604 * obviously cannot work when using gather.
1605 */
1606 colors0[0] = lp_build_swizzle_soa_channel(texel_bld,
1607 neighbors[1][0],
1608 chan_swiz);
1609 colors0[1] = lp_build_swizzle_soa_channel(texel_bld,
1610 neighbors[1][1],
1611 chan_swiz);
1612 colors0[2] = lp_build_swizzle_soa_channel(texel_bld,
1613 neighbors[0][1],
1614 chan_swiz);
1615 colors0[3] = lp_build_swizzle_soa_channel(texel_bld,
1616 neighbors[0][0],
1617 chan_swiz);
1618 }
1619 else {
1620 /* Bilinear interpolate the four samples from the 2D image / 3D slice */
1621 for (chan = 0; chan < 4; chan++) {
1622 colors0[chan] = lp_build_lerp_2d(texel_bld,
1623 s_fpart, t_fpart,
1624 neighbors[0][0][chan],
1625 neighbors[0][1][chan],
1626 neighbors[1][0][chan],
1627 neighbors[1][1][chan],
1628 0);
1629 }
1630 }
1631 }
1632 else {
1633 LLVMValueRef cmpval00, cmpval01, cmpval10, cmpval11;
1634 cmpval00 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][0][0]);
1635 cmpval01 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
1636 cmpval10 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][0][0]);
1637 cmpval11 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][1][0]);
1638
1639 if (is_gather) {
1640 /* more hacks for swizzling, should be X, ONE or ZERO... */
1641 colors0[0] = lp_build_select(texel_bld, cmpval10,
1642 texel_bld->one, texel_bld->zero);
1643 colors0[1] = lp_build_select(texel_bld, cmpval11,
1644 texel_bld->one, texel_bld->zero);
1645 colors0[2] = lp_build_select(texel_bld, cmpval01,
1646 texel_bld->one, texel_bld->zero);
1647 colors0[3] = lp_build_select(texel_bld, cmpval00,
1648 texel_bld->one, texel_bld->zero);
1649 }
1650 else {
1651 colors0[0] = lp_build_masklerp2d(texel_bld, s_fpart, t_fpart,
1652 cmpval00, cmpval01, cmpval10, cmpval11);
1653 colors0[1] = colors0[2] = colors0[3] = colors0[0];
1654 }
1655 }
1656
1657 if (accurate_cube_corners) {
1658 LLVMBuildStore(builder, colors0[0], colorss[0]);
1659 LLVMBuildStore(builder, colors0[1], colorss[1]);
1660 LLVMBuildStore(builder, colors0[2], colorss[2]);
1661 LLVMBuildStore(builder, colors0[3], colorss[3]);
1662
1663 lp_build_endif(&corner_if);
1664
1665 colors0[0] = LLVMBuildLoad(builder, colorss[0], "");
1666 colors0[1] = LLVMBuildLoad(builder, colorss[1], "");
1667 colors0[2] = LLVMBuildLoad(builder, colorss[2], "");
1668 colors0[3] = LLVMBuildLoad(builder, colorss[3], "");
1669 }
1670
1671 if (dims == 3) {
1672 LLVMValueRef neighbors1[2][2][4];
1673 LLVMValueRef colors1[4];
1674
1675 assert(!is_gather);
1676
1677 /* get x0/x1/y0/y1 texels at z1 */
1678 lp_build_sample_texel_soa(bld,
1679 width_vec, height_vec, depth_vec,
1680 x00, y00, z1,
1681 row_stride_vec, img_stride_vec,
1682 data_ptr, mipoffsets, neighbors1[0][0]);
1683 lp_build_sample_texel_soa(bld,
1684 width_vec, height_vec, depth_vec,
1685 x01, y01, z1,
1686 row_stride_vec, img_stride_vec,
1687 data_ptr, mipoffsets, neighbors1[0][1]);
1688 lp_build_sample_texel_soa(bld,
1689 width_vec, height_vec, depth_vec,
1690 x10, y10, z1,
1691 row_stride_vec, img_stride_vec,
1692 data_ptr, mipoffsets, neighbors1[1][0]);
1693 lp_build_sample_texel_soa(bld,
1694 width_vec, height_vec, depth_vec,
1695 x11, y11, z1,
1696 row_stride_vec, img_stride_vec,
1697 data_ptr, mipoffsets, neighbors1[1][1]);
1698
1699 if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
1700 /* Bilinear interpolate the four samples from the second Z slice */
1701 for (chan = 0; chan < 4; chan++) {
1702 colors1[chan] = lp_build_lerp_2d(texel_bld,
1703 s_fpart, t_fpart,
1704 neighbors1[0][0][chan],
1705 neighbors1[0][1][chan],
1706 neighbors1[1][0][chan],
1707 neighbors1[1][1][chan],
1708 0);
1709 }
1710 /* Linearly interpolate the two samples from the two 3D slices */
1711 for (chan = 0; chan < 4; chan++) {
1712 colors_out[chan] = lp_build_lerp(texel_bld,
1713 r_fpart,
1714 colors0[chan], colors1[chan],
1715 0);
1716 }
1717 }
1718 else {
1719 LLVMValueRef cmpval00, cmpval01, cmpval10, cmpval11;
1720 cmpval00 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][0][0]);
1721 cmpval01 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
1722 cmpval10 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][0][0]);
1723 cmpval11 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][1][0]);
1724 colors1[0] = lp_build_masklerp2d(texel_bld, s_fpart, t_fpart,
1725 cmpval00, cmpval01, cmpval10, cmpval11);
1726 /* Linearly interpolate the two samples from the two 3D slices */
1727 colors_out[0] = lp_build_lerp(texel_bld,
1728 r_fpart,
1729 colors0[0], colors1[0],
1730 0);
1731 colors_out[1] = colors_out[2] = colors_out[3] = colors_out[0];
1732 }
1733 }
1734 else {
1735 /* 2D tex */
1736 for (chan = 0; chan < 4; chan++) {
1737 colors_out[chan] = colors0[chan];
1738 }
1739 }
1740 }
1741 if (is_gather) {
1742 /*
1743 * For gather, we can't do our usual channel swizzling done later,
1744 * so do it here. It only really matters for 0/1 swizzles in case
1745 * of comparison filtering, since in this case the results would be
1746 * wrong, without comparison it should all work out alright but it
1747 * can't hurt to do that here, since it will instantly drop all
1748 * calculations above, though it's a rather stupid idea to do
1749 * gather on a channel which will always return 0 or 1 in any case...
1750 */
1751 if (chan_swiz == PIPE_SWIZZLE_1) {
1752 for (chan = 0; chan < 4; chan++) {
1753 colors_out[chan] = texel_bld->one;
1754 }
1755 } else if (chan_swiz == PIPE_SWIZZLE_0) {
1756 for (chan = 0; chan < 4; chan++) {
1757 colors_out[chan] = texel_bld->zero;
1758 }
1759 }
1760 }
1761 }
1762
1763
1764 /**
1765 * Sample the texture/mipmap using given image filter and mip filter.
1766 * ilevel0 and ilevel1 indicate the two mipmap levels to sample
1767 * from (vectors or scalars).
1768 * If we're using nearest miplevel sampling the '1' values will be null/unused.
1769 */
1770 static void
1771 lp_build_sample_mipmap(struct lp_build_sample_context *bld,
1772 unsigned img_filter,
1773 unsigned mip_filter,
1774 boolean is_gather,
1775 const LLVMValueRef *coords,
1776 const LLVMValueRef *offsets,
1777 LLVMValueRef ilevel0,
1778 LLVMValueRef ilevel1,
1779 LLVMValueRef lod_fpart,
1780 LLVMValueRef *colors_out)
1781 {
1782 LLVMBuilderRef builder = bld->gallivm->builder;
1783 LLVMValueRef size0 = NULL;
1784 LLVMValueRef size1 = NULL;
1785 LLVMValueRef row_stride0_vec = NULL;
1786 LLVMValueRef row_stride1_vec = NULL;
1787 LLVMValueRef img_stride0_vec = NULL;
1788 LLVMValueRef img_stride1_vec = NULL;
1789 LLVMValueRef data_ptr0 = NULL;
1790 LLVMValueRef data_ptr1 = NULL;
1791 LLVMValueRef mipoff0 = NULL;
1792 LLVMValueRef mipoff1 = NULL;
1793 LLVMValueRef colors0[4], colors1[4];
1794 unsigned chan;
1795
1796 /* sample the first mipmap level */
1797 lp_build_mipmap_level_sizes(bld, ilevel0,
1798 &size0,
1799 &row_stride0_vec, &img_stride0_vec);
1800 if (bld->num_mips == 1) {
1801 data_ptr0 = lp_build_get_mipmap_level(bld, ilevel0);
1802 }
1803 else {
1804 /* This path should work for num_lods 1 too but slightly less efficient */
1805 data_ptr0 = bld->base_ptr;
1806 mipoff0 = lp_build_get_mip_offsets(bld, ilevel0);
1807 }
1808 if (img_filter == PIPE_TEX_FILTER_NEAREST) {
1809 lp_build_sample_image_nearest(bld, size0,
1810 row_stride0_vec, img_stride0_vec,
1811 data_ptr0, mipoff0, coords, offsets,
1812 colors0);
1813 }
1814 else {
1815 assert(img_filter == PIPE_TEX_FILTER_LINEAR);
1816 lp_build_sample_image_linear(bld, is_gather, size0, NULL,
1817 row_stride0_vec, img_stride0_vec,
1818 data_ptr0, mipoff0, coords, offsets,
1819 colors0);
1820 }
1821
1822 /* Store the first level's colors in the output variables */
1823 for (chan = 0; chan < 4; chan++) {
1824 LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
1825 }
1826
1827 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
1828 struct lp_build_if_state if_ctx;
1829 LLVMValueRef need_lerp;
1830
1831 /* need_lerp = lod_fpart > 0 */
1832 if (bld->num_lods == 1) {
1833 need_lerp = LLVMBuildFCmp(builder, LLVMRealUGT,
1834 lod_fpart, bld->lodf_bld.zero,
1835 "need_lerp");
1836 }
1837 else {
1838 /*
1839 * We'll do mip filtering if any of the quads (or individual
1840 * pixel in case of per-pixel lod) need it.
1841 * It might be better to split the vectors here and only fetch/filter
1842 * quads which need it (if there's one lod per quad).
1843 */
1844 need_lerp = lp_build_compare(bld->gallivm, bld->lodf_bld.type,
1845 PIPE_FUNC_GREATER,
1846 lod_fpart, bld->lodf_bld.zero);
1847 need_lerp = lp_build_any_true_range(&bld->lodi_bld, bld->num_lods, need_lerp);
1848 lp_build_name(need_lerp, "need_lerp");
1849 }
1850
1851 lp_build_if(&if_ctx, bld->gallivm, need_lerp);
1852 {
1853 /*
1854 * We unfortunately need to clamp lod_fpart here since we can get
1855 * negative values which would screw up filtering if not all
1856 * lod_fpart values have same sign.
1857 */
1858 lod_fpart = lp_build_max(&bld->lodf_bld, lod_fpart,
1859 bld->lodf_bld.zero);
1860 /* sample the second mipmap level */
1861 lp_build_mipmap_level_sizes(bld, ilevel1,
1862 &size1,
1863 &row_stride1_vec, &img_stride1_vec);
1864 if (bld->num_mips == 1) {
1865 data_ptr1 = lp_build_get_mipmap_level(bld, ilevel1);
1866 }
1867 else {
1868 data_ptr1 = bld->base_ptr;
1869 mipoff1 = lp_build_get_mip_offsets(bld, ilevel1);
1870 }
1871 if (img_filter == PIPE_TEX_FILTER_NEAREST) {
1872 lp_build_sample_image_nearest(bld, size1,
1873 row_stride1_vec, img_stride1_vec,
1874 data_ptr1, mipoff1, coords, offsets,
1875 colors1);
1876 }
1877 else {
1878 lp_build_sample_image_linear(bld, FALSE, size1, NULL,
1879 row_stride1_vec, img_stride1_vec,
1880 data_ptr1, mipoff1, coords, offsets,
1881 colors1);
1882 }
1883
1884 /* interpolate samples from the two mipmap levels */
1885
1886 if (bld->num_lods != bld->coord_type.length)
1887 lod_fpart = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
1888 bld->lodf_bld.type,
1889 bld->texel_bld.type,
1890 lod_fpart);
1891
1892 for (chan = 0; chan < 4; chan++) {
1893 colors0[chan] = lp_build_lerp(&bld->texel_bld, lod_fpart,
1894 colors0[chan], colors1[chan],
1895 0);
1896 LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
1897 }
1898 }
1899 lp_build_endif(&if_ctx);
1900 }
1901 }
1902
1903
1904 /**
1905 * Sample the texture/mipmap using given mip filter, and using
1906 * both nearest and linear filtering at the same time depending
1907 * on linear_mask.
1908 * lod can be per quad but linear_mask is always per pixel.
1909 * ilevel0 and ilevel1 indicate the two mipmap levels to sample
1910 * from (vectors or scalars).
1911 * If we're using nearest miplevel sampling the '1' values will be null/unused.
1912 */
1913 static void
1914 lp_build_sample_mipmap_both(struct lp_build_sample_context *bld,
1915 LLVMValueRef linear_mask,
1916 unsigned mip_filter,
1917 const LLVMValueRef *coords,
1918 const LLVMValueRef *offsets,
1919 LLVMValueRef ilevel0,
1920 LLVMValueRef ilevel1,
1921 LLVMValueRef lod_fpart,
1922 LLVMValueRef lod_positive,
1923 LLVMValueRef *colors_out)
1924 {
1925 LLVMBuilderRef builder = bld->gallivm->builder;
1926 LLVMValueRef size0 = NULL;
1927 LLVMValueRef size1 = NULL;
1928 LLVMValueRef row_stride0_vec = NULL;
1929 LLVMValueRef row_stride1_vec = NULL;
1930 LLVMValueRef img_stride0_vec = NULL;
1931 LLVMValueRef img_stride1_vec = NULL;
1932 LLVMValueRef data_ptr0 = NULL;
1933 LLVMValueRef data_ptr1 = NULL;
1934 LLVMValueRef mipoff0 = NULL;
1935 LLVMValueRef mipoff1 = NULL;
1936 LLVMValueRef colors0[4], colors1[4];
1937 unsigned chan;
1938
1939 /* sample the first mipmap level */
1940 lp_build_mipmap_level_sizes(bld, ilevel0,
1941 &size0,
1942 &row_stride0_vec, &img_stride0_vec);
1943 if (bld->num_mips == 1) {
1944 data_ptr0 = lp_build_get_mipmap_level(bld, ilevel0);
1945 }
1946 else {
1947 /* This path should work for num_lods 1 too but slightly less efficient */
1948 data_ptr0 = bld->base_ptr;
1949 mipoff0 = lp_build_get_mip_offsets(bld, ilevel0);
1950 }
1951
1952 lp_build_sample_image_linear(bld, FALSE, size0, linear_mask,
1953 row_stride0_vec, img_stride0_vec,
1954 data_ptr0, mipoff0, coords, offsets,
1955 colors0);
1956
1957 /* Store the first level's colors in the output variables */
1958 for (chan = 0; chan < 4; chan++) {
1959 LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
1960 }
1961
1962 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
1963 struct lp_build_if_state if_ctx;
1964 LLVMValueRef need_lerp;
1965
1966 /*
1967 * We'll do mip filtering if any of the quads (or individual
1968 * pixel in case of per-pixel lod) need it.
1969 * Note using lod_positive here not lod_fpart since it may be the same
1970 * condition as that used in the outer "if" in the caller hence llvm
1971 * should be able to merge the branches in this case.
1972 */
1973 need_lerp = lp_build_any_true_range(&bld->lodi_bld, bld->num_lods, lod_positive);
1974 lp_build_name(need_lerp, "need_lerp");
1975
1976 lp_build_if(&if_ctx, bld->gallivm, need_lerp);
1977 {
1978 /*
1979 * We unfortunately need to clamp lod_fpart here since we can get
1980 * negative values which would screw up filtering if not all
1981 * lod_fpart values have same sign.
1982 */
1983 lod_fpart = lp_build_max(&bld->lodf_bld, lod_fpart,
1984 bld->lodf_bld.zero);
1985 /* sample the second mipmap level */
1986 lp_build_mipmap_level_sizes(bld, ilevel1,
1987 &size1,
1988 &row_stride1_vec, &img_stride1_vec);
1989 if (bld->num_mips == 1) {
1990 data_ptr1 = lp_build_get_mipmap_level(bld, ilevel1);
1991 }
1992 else {
1993 data_ptr1 = bld->base_ptr;
1994 mipoff1 = lp_build_get_mip_offsets(bld, ilevel1);
1995 }
1996
1997 lp_build_sample_image_linear(bld, FALSE, size1, linear_mask,
1998 row_stride1_vec, img_stride1_vec,
1999 data_ptr1, mipoff1, coords, offsets,
2000 colors1);
2001
2002 /* interpolate samples from the two mipmap levels */
2003
2004 if (bld->num_lods != bld->coord_type.length)
2005 lod_fpart = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
2006 bld->lodf_bld.type,
2007 bld->texel_bld.type,
2008 lod_fpart);
2009
2010 for (chan = 0; chan < 4; chan++) {
2011 colors0[chan] = lp_build_lerp(&bld->texel_bld, lod_fpart,
2012 colors0[chan], colors1[chan],
2013 0);
2014 LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
2015 }
2016 }
2017 lp_build_endif(&if_ctx);
2018 }
2019 }
2020
2021
2022 /**
2023 * Build (per-coord) layer value.
2024 * Either clamp layer to valid values or fill in optional out_of_bounds
2025 * value and just return value unclamped.
2026 */
2027 static LLVMValueRef
2028 lp_build_layer_coord(struct lp_build_sample_context *bld,
2029 unsigned texture_unit,
2030 boolean is_cube_array,
2031 LLVMValueRef layer,
2032 LLVMValueRef *out_of_bounds)
2033 {
2034 LLVMValueRef num_layers;
2035 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
2036
2037 num_layers = bld->dynamic_state->depth(bld->dynamic_state, bld->gallivm,
2038 bld->context_ptr, texture_unit, NULL);
2039
2040 if (out_of_bounds) {
2041 LLVMValueRef out1, out;
2042 assert(!is_cube_array);
2043 num_layers = lp_build_broadcast_scalar(int_coord_bld, num_layers);
2044 out = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, layer, int_coord_bld->zero);
2045 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, layer, num_layers);
2046 *out_of_bounds = lp_build_or(int_coord_bld, out, out1);
2047 return layer;
2048 }
2049 else {
2050 LLVMValueRef maxlayer;
2051 LLVMValueRef s = is_cube_array ? lp_build_const_int32(bld->gallivm, 6) :
2052 bld->int_bld.one;
2053 maxlayer = lp_build_sub(&bld->int_bld, num_layers, s);
2054 maxlayer = lp_build_broadcast_scalar(int_coord_bld, maxlayer);
2055 return lp_build_clamp(int_coord_bld, layer, int_coord_bld->zero, maxlayer);
2056 }
2057 }
2058
2059
2060 /**
2061 * Calculate cube face, lod, mip levels.
2062 */
2063 static void
2064 lp_build_sample_common(struct lp_build_sample_context *bld,
2065 boolean is_lodq,
2066 unsigned texture_index,
2067 unsigned sampler_index,
2068 LLVMValueRef *coords,
2069 const struct lp_derivatives *derivs, /* optional */
2070 LLVMValueRef lod_bias, /* optional */
2071 LLVMValueRef explicit_lod, /* optional */
2072 LLVMValueRef *lod_pos_or_zero,
2073 LLVMValueRef *lod,
2074 LLVMValueRef *lod_fpart,
2075 LLVMValueRef *ilevel0,
2076 LLVMValueRef *ilevel1)
2077 {
2078 const unsigned mip_filter = bld->static_sampler_state->min_mip_filter;
2079 const unsigned min_filter = bld->static_sampler_state->min_img_filter;
2080 const unsigned mag_filter = bld->static_sampler_state->mag_img_filter;
2081 const unsigned target = bld->static_texture_state->target;
2082 LLVMValueRef first_level, cube_rho = NULL;
2083 LLVMValueRef lod_ipart = NULL;
2084 struct lp_derivatives cube_derivs;
2085
2086 /*
2087 printf("%s mip %d min %d mag %d\n", __FUNCTION__,
2088 mip_filter, min_filter, mag_filter);
2089 */
2090
2091 /*
2092 * Choose cube face, recompute texcoords for the chosen face and
2093 * compute rho here too (as it requires transform of derivatives).
2094 */
2095 if (target == PIPE_TEXTURE_CUBE || target == PIPE_TEXTURE_CUBE_ARRAY) {
2096 boolean need_derivs;
2097 need_derivs = ((min_filter != mag_filter ||
2098 mip_filter != PIPE_TEX_MIPFILTER_NONE) &&
2099 !bld->static_sampler_state->min_max_lod_equal &&
2100 !explicit_lod);
2101 lp_build_cube_lookup(bld, coords, derivs, &cube_rho, &cube_derivs, need_derivs);
2102 derivs = &cube_derivs;
2103 if (target == PIPE_TEXTURE_CUBE_ARRAY && !is_lodq) {
2104 /* calculate cube layer coord now */
2105 LLVMValueRef layer = lp_build_iround(&bld->coord_bld, coords[3]);
2106 LLVMValueRef six = lp_build_const_int_vec(bld->gallivm, bld->int_coord_type, 6);
2107 layer = lp_build_mul(&bld->int_coord_bld, layer, six);
2108 coords[3] = lp_build_layer_coord(bld, texture_index, TRUE, layer, NULL);
2109 /* because of seamless filtering can't add it to face (coords[2]) here. */
2110 }
2111 }
2112 else if ((target == PIPE_TEXTURE_1D_ARRAY ||
2113 target == PIPE_TEXTURE_2D_ARRAY) && !is_lodq) {
2114 coords[2] = lp_build_iround(&bld->coord_bld, coords[2]);
2115 coords[2] = lp_build_layer_coord(bld, texture_index, FALSE, coords[2], NULL);
2116 }
2117
2118 if (bld->static_sampler_state->compare_mode != PIPE_TEX_COMPARE_NONE) {
2119 /*
2120 * Clamp p coords to [0,1] for fixed function depth texture format here.
2121 * Technically this is not entirely correct for unorm depth as the ref value
2122 * should be converted to the depth format (quantization!) and comparison
2123 * then done in texture format. This would actually help performance (since
2124 * only need to do it once and could save the per-sample conversion of texels
2125 * to floats instead), but it would need more messy code (would need to push
2126 * at least some bits down to actual fetch so conversion could be skipped,
2127 * and would have ugly interaction with border color, would need to convert
2128 * border color to that format too or do some other tricks to make it work).
2129 */
2130 const struct util_format_description *format_desc = bld->format_desc;
2131 unsigned chan_type;
2132 /* not entirely sure we couldn't end up with non-valid swizzle here */
2133 chan_type = format_desc->swizzle[0] <= PIPE_SWIZZLE_W ?
2134 format_desc->channel[format_desc->swizzle[0]].type :
2135 UTIL_FORMAT_TYPE_FLOAT;
2136 if (chan_type != UTIL_FORMAT_TYPE_FLOAT) {
2137 coords[4] = lp_build_clamp(&bld->coord_bld, coords[4],
2138 bld->coord_bld.zero, bld->coord_bld.one);
2139 }
2140 }
2141
2142 /*
2143 * Compute the level of detail (float).
2144 */
2145 if (min_filter != mag_filter ||
2146 mip_filter != PIPE_TEX_MIPFILTER_NONE || is_lodq) {
2147 /* Need to compute lod either to choose mipmap levels or to
2148 * distinguish between minification/magnification with one mipmap level.
2149 */
2150 lp_build_lod_selector(bld, is_lodq, texture_index, sampler_index,
2151 coords[0], coords[1], coords[2], cube_rho,
2152 derivs, lod_bias, explicit_lod,
2153 mip_filter, lod,
2154 &lod_ipart, lod_fpart, lod_pos_or_zero);
2155 if (is_lodq) {
2156 LLVMValueRef last_level;
2157 last_level = bld->dynamic_state->last_level(bld->dynamic_state,
2158 bld->gallivm,
2159 bld->context_ptr,
2160 texture_index, NULL);
2161 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
2162 bld->gallivm,
2163 bld->context_ptr,
2164 texture_index, NULL);
2165 last_level = lp_build_sub(&bld->int_bld, last_level, first_level);
2166 last_level = lp_build_int_to_float(&bld->float_bld, last_level);
2167 last_level = lp_build_broadcast_scalar(&bld->lodf_bld, last_level);
2168
2169 switch (mip_filter) {
2170 case PIPE_TEX_MIPFILTER_NONE:
2171 *lod_fpart = bld->lodf_bld.zero;
2172 break;
2173 case PIPE_TEX_MIPFILTER_NEAREST:
2174 *lod_fpart = lp_build_round(&bld->lodf_bld, *lod_fpart);
2175 /* fallthrough */
2176 case PIPE_TEX_MIPFILTER_LINEAR:
2177 *lod_fpart = lp_build_clamp(&bld->lodf_bld, *lod_fpart,
2178 bld->lodf_bld.zero, last_level);
2179 break;
2180 }
2181 return;
2182 }
2183
2184 } else {
2185 lod_ipart = bld->lodi_bld.zero;
2186 *lod_pos_or_zero = bld->lodi_bld.zero;
2187 }
2188
2189 if (bld->num_lods != bld->num_mips) {
2190 /* only makes sense if there's just a single mip level */
2191 assert(bld->num_mips == 1);
2192 lod_ipart = lp_build_extract_range(bld->gallivm, lod_ipart, 0, 1);
2193 }
2194
2195 /*
2196 * Compute integer mipmap level(s) to fetch texels from: ilevel0, ilevel1
2197 */
2198 switch (mip_filter) {
2199 default:
2200 assert(0 && "bad mip_filter value in lp_build_sample_soa()");
2201 /* fall-through */
2202 case PIPE_TEX_MIPFILTER_NONE:
2203 /* always use mip level 0 */
2204 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
2205 bld->gallivm, bld->context_ptr,
2206 texture_index, NULL);
2207 first_level = lp_build_broadcast_scalar(&bld->leveli_bld, first_level);
2208 *ilevel0 = first_level;
2209 break;
2210 case PIPE_TEX_MIPFILTER_NEAREST:
2211 assert(lod_ipart);
2212 lp_build_nearest_mip_level(bld, texture_index, lod_ipart, ilevel0, NULL);
2213 break;
2214 case PIPE_TEX_MIPFILTER_LINEAR:
2215 assert(lod_ipart);
2216 assert(*lod_fpart);
2217 lp_build_linear_mip_levels(bld, texture_index,
2218 lod_ipart, lod_fpart,
2219 ilevel0, ilevel1);
2220 break;
2221 }
2222 }
2223
2224 static void
2225 lp_build_clamp_border_color(struct lp_build_sample_context *bld,
2226 unsigned sampler_unit)
2227 {
2228 struct gallivm_state *gallivm = bld->gallivm;
2229 LLVMBuilderRef builder = gallivm->builder;
2230 LLVMValueRef border_color_ptr =
2231 bld->dynamic_state->border_color(bld->dynamic_state, gallivm,
2232 bld->context_ptr, sampler_unit);
2233 LLVMValueRef border_color;
2234 const struct util_format_description *format_desc = bld->format_desc;
2235 struct lp_type vec4_type = bld->texel_type;
2236 struct lp_build_context vec4_bld;
2237 LLVMValueRef min_clamp = NULL;
2238 LLVMValueRef max_clamp = NULL;
2239
2240 /*
2241 * For normalized format need to clamp border color (technically
2242 * probably should also quantize the data). Really sucks doing this
2243 * here but can't avoid at least for now since this is part of
2244 * sampler state and texture format is part of sampler_view state.
2245 * GL expects also expects clamping for uint/sint formats too so
2246 * do that as well (d3d10 can't end up here with uint/sint since it
2247 * only supports them with ld).
2248 */
2249 vec4_type.length = 4;
2250 lp_build_context_init(&vec4_bld, gallivm, vec4_type);
2251
2252 /*
2253 * Vectorized clamping of border color. Loading is a bit of a hack since
2254 * we just cast the pointer to float array to pointer to vec4
2255 * (int or float).
2256 */
2257 border_color_ptr = lp_build_array_get_ptr(gallivm, border_color_ptr,
2258 lp_build_const_int32(gallivm, 0));
2259 border_color_ptr = LLVMBuildBitCast(builder, border_color_ptr,
2260 LLVMPointerType(vec4_bld.vec_type, 0), "");
2261 border_color = LLVMBuildLoad(builder, border_color_ptr, "");
2262 /* we don't have aligned type in the dynamic state unfortunately */
2263 LLVMSetAlignment(border_color, 4);
2264
2265 /*
2266 * Instead of having some incredibly complex logic which will try to figure out
2267 * clamping necessary for each channel, simply use the first channel, and treat
2268 * mixed signed/unsigned normalized formats specially.
2269 * (Mixed non-normalized, which wouldn't work at all here, do not exist for a
2270 * good reason.)
2271 */
2272 if (format_desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
2273 int chan;
2274 /* d/s needs special handling because both present means just sampling depth */
2275 if (util_format_is_depth_and_stencil(format_desc->format)) {
2276 chan = format_desc->swizzle[0];
2277 }
2278 else {
2279 chan = util_format_get_first_non_void_channel(format_desc->format);
2280 }
2281 if (chan >= 0 && chan <= PIPE_SWIZZLE_W) {
2282 unsigned chan_type = format_desc->channel[chan].type;
2283 unsigned chan_norm = format_desc->channel[chan].normalized;
2284 unsigned chan_pure = format_desc->channel[chan].pure_integer;
2285 if (chan_type == UTIL_FORMAT_TYPE_SIGNED) {
2286 if (chan_norm) {
2287 min_clamp = lp_build_const_vec(gallivm, vec4_type, -1.0F);
2288 max_clamp = vec4_bld.one;
2289 }
2290 else if (chan_pure) {
2291 /*
2292 * Border color was stored as int, hence need min/max clamp
2293 * only if chan has less than 32 bits..
2294 */
2295 unsigned chan_size = format_desc->channel[chan].size;
2296 if (chan_size < 32) {
2297 min_clamp = lp_build_const_int_vec(gallivm, vec4_type,
2298 0 - (1 << (chan_size - 1)));
2299 max_clamp = lp_build_const_int_vec(gallivm, vec4_type,
2300 (1 << (chan_size - 1)) - 1);
2301 }
2302 }
2303 /* TODO: no idea about non-pure, non-normalized! */
2304 }
2305 else if (chan_type == UTIL_FORMAT_TYPE_UNSIGNED) {
2306 if (chan_norm) {
2307 min_clamp = vec4_bld.zero;
2308 max_clamp = vec4_bld.one;
2309 }
2310 /*
2311 * Need a ugly hack here, because we don't have Z32_FLOAT_X8X24
2312 * we use Z32_FLOAT_S8X24 to imply sampling depth component
2313 * and ignoring stencil, which will blow up here if we try to
2314 * do a uint clamp in a float texel build...
2315 * And even if we had that format, mesa st also thinks using z24s8
2316 * means depth sampling ignoring stencil.
2317 */
2318 else if (chan_pure) {
2319 /*
2320 * Border color was stored as uint, hence never need min
2321 * clamp, and only need max clamp if chan has less than 32 bits.
2322 */
2323 unsigned chan_size = format_desc->channel[chan].size;
2324 if (chan_size < 32) {
2325 max_clamp = lp_build_const_int_vec(gallivm, vec4_type,
2326 (1 << chan_size) - 1);
2327 }
2328 /* TODO: no idea about non-pure, non-normalized! */
2329 }
2330 }
2331 else if (chan_type == UTIL_FORMAT_TYPE_FIXED) {
2332 /* TODO: I have no idea what clamp this would need if any! */
2333 }
2334 }
2335 /* mixed plain formats (or different pure size) */
2336 switch (format_desc->format) {
2337 case PIPE_FORMAT_B10G10R10A2_UINT:
2338 case PIPE_FORMAT_R10G10B10A2_UINT:
2339 {
2340 unsigned max10 = (1 << 10) - 1;
2341 max_clamp = lp_build_const_aos(gallivm, vec4_type, max10, max10,
2342 max10, (1 << 2) - 1, NULL);
2343 }
2344 break;
2345 case PIPE_FORMAT_R10SG10SB10SA2U_NORM:
2346 min_clamp = lp_build_const_aos(gallivm, vec4_type, -1.0F, -1.0F,
2347 -1.0F, 0.0F, NULL);
2348 max_clamp = vec4_bld.one;
2349 break;
2350 case PIPE_FORMAT_R8SG8SB8UX8U_NORM:
2351 case PIPE_FORMAT_R5SG5SB6U_NORM:
2352 min_clamp = lp_build_const_aos(gallivm, vec4_type, -1.0F, -1.0F,
2353 0.0F, 0.0F, NULL);
2354 max_clamp = vec4_bld.one;
2355 break;
2356 default:
2357 break;
2358 }
2359 }
2360 else {
2361 /* cannot figure this out from format description */
2362 if (format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
2363 /* s3tc formats are always unorm */
2364 min_clamp = vec4_bld.zero;
2365 max_clamp = vec4_bld.one;
2366 }
2367 else if (format_desc->layout == UTIL_FORMAT_LAYOUT_RGTC ||
2368 format_desc->layout == UTIL_FORMAT_LAYOUT_ETC ||
2369 format_desc->layout == UTIL_FORMAT_LAYOUT_BPTC) {
2370 switch (format_desc->format) {
2371 case PIPE_FORMAT_RGTC1_UNORM:
2372 case PIPE_FORMAT_RGTC2_UNORM:
2373 case PIPE_FORMAT_LATC1_UNORM:
2374 case PIPE_FORMAT_LATC2_UNORM:
2375 case PIPE_FORMAT_ETC1_RGB8:
2376 case PIPE_FORMAT_BPTC_RGBA_UNORM:
2377 case PIPE_FORMAT_BPTC_SRGBA:
2378 min_clamp = vec4_bld.zero;
2379 max_clamp = vec4_bld.one;
2380 break;
2381 case PIPE_FORMAT_RGTC1_SNORM:
2382 case PIPE_FORMAT_RGTC2_SNORM:
2383 case PIPE_FORMAT_LATC1_SNORM:
2384 case PIPE_FORMAT_LATC2_SNORM:
2385 min_clamp = lp_build_const_vec(gallivm, vec4_type, -1.0F);
2386 max_clamp = vec4_bld.one;
2387 break;
2388 case PIPE_FORMAT_BPTC_RGB_FLOAT:
2389 /* not sure if we should clamp to max half float? */
2390 break;
2391 case PIPE_FORMAT_BPTC_RGB_UFLOAT:
2392 min_clamp = vec4_bld.zero;
2393 break;
2394 default:
2395 assert(0);
2396 break;
2397 }
2398 }
2399 /*
2400 * all others from subsampled/other group, though we don't care
2401 * about yuv (and should not have any from zs here)
2402 */
2403 else if (format_desc->colorspace != UTIL_FORMAT_COLORSPACE_YUV){
2404 switch (format_desc->format) {
2405 case PIPE_FORMAT_R8G8_B8G8_UNORM:
2406 case PIPE_FORMAT_G8R8_G8B8_UNORM:
2407 case PIPE_FORMAT_G8R8_B8R8_UNORM:
2408 case PIPE_FORMAT_R8G8_R8B8_UNORM:
2409 case PIPE_FORMAT_R1_UNORM: /* doesn't make sense but ah well */
2410 min_clamp = vec4_bld.zero;
2411 max_clamp = vec4_bld.one;
2412 break;
2413 case PIPE_FORMAT_R8G8Bx_SNORM:
2414 min_clamp = lp_build_const_vec(gallivm, vec4_type, -1.0F);
2415 max_clamp = vec4_bld.one;
2416 break;
2417 /*
2418 * Note smallfloat formats usually don't need clamping
2419 * (they still have infinite range) however this is not
2420 * true for r11g11b10 and r9g9b9e5, which can't represent
2421 * negative numbers (and additionally r9g9b9e5 can't represent
2422 * very large numbers). d3d10 seems happy without clamping in
2423 * this case, but gl spec is pretty clear: "for floating
2424 * point and integer formats, border values are clamped to
2425 * the representable range of the format" so do that here.
2426 */
2427 case PIPE_FORMAT_R11G11B10_FLOAT:
2428 min_clamp = vec4_bld.zero;
2429 break;
2430 case PIPE_FORMAT_R9G9B9E5_FLOAT:
2431 min_clamp = vec4_bld.zero;
2432 max_clamp = lp_build_const_vec(gallivm, vec4_type, MAX_RGB9E5);
2433 break;
2434 default:
2435 assert(0);
2436 break;
2437 }
2438 }
2439 }
2440
2441 if (min_clamp) {
2442 border_color = lp_build_max(&vec4_bld, border_color, min_clamp);
2443 }
2444 if (max_clamp) {
2445 border_color = lp_build_min(&vec4_bld, border_color, max_clamp);
2446 }
2447
2448 bld->border_color_clamped = border_color;
2449 }
2450
2451
2452 /**
2453 * General texture sampling codegen.
2454 * This function handles texture sampling for all texture targets (1D,
2455 * 2D, 3D, cube) and all filtering modes.
2456 */
2457 static void
2458 lp_build_sample_general(struct lp_build_sample_context *bld,
2459 unsigned sampler_unit,
2460 boolean is_gather,
2461 const LLVMValueRef *coords,
2462 const LLVMValueRef *offsets,
2463 LLVMValueRef lod_positive,
2464 LLVMValueRef lod_fpart,
2465 LLVMValueRef ilevel0,
2466 LLVMValueRef ilevel1,
2467 LLVMValueRef *colors_out)
2468 {
2469 LLVMBuilderRef builder = bld->gallivm->builder;
2470 const struct lp_static_sampler_state *sampler_state = bld->static_sampler_state;
2471 const unsigned mip_filter = sampler_state->min_mip_filter;
2472 const unsigned min_filter = sampler_state->min_img_filter;
2473 const unsigned mag_filter = sampler_state->mag_img_filter;
2474 LLVMValueRef texels[4];
2475 unsigned chan;
2476
2477 /* if we need border color, (potentially) clamp it now */
2478 if (lp_sampler_wrap_mode_uses_border_color(sampler_state->wrap_s,
2479 min_filter,
2480 mag_filter) ||
2481 (bld->dims > 1 &&
2482 lp_sampler_wrap_mode_uses_border_color(sampler_state->wrap_t,
2483 min_filter,
2484 mag_filter)) ||
2485 (bld->dims > 2 &&
2486 lp_sampler_wrap_mode_uses_border_color(sampler_state->wrap_r,
2487 min_filter,
2488 mag_filter))) {
2489 lp_build_clamp_border_color(bld, sampler_unit);
2490 }
2491
2492
2493 /*
2494 * Get/interpolate texture colors.
2495 */
2496
2497 for (chan = 0; chan < 4; ++chan) {
2498 texels[chan] = lp_build_alloca(bld->gallivm, bld->texel_bld.vec_type, "");
2499 lp_build_name(texels[chan], "sampler%u_texel_%c_var", sampler_unit, "xyzw"[chan]);
2500 }
2501
2502 if (min_filter == mag_filter) {
2503 /* no need to distinguish between minification and magnification */
2504 lp_build_sample_mipmap(bld, min_filter, mip_filter,
2505 is_gather,
2506 coords, offsets,
2507 ilevel0, ilevel1, lod_fpart,
2508 texels);
2509 }
2510 else {
2511 /*
2512 * Could also get rid of the if-logic and always use mipmap_both, both
2513 * for the single lod and multi-lod case if nothing really uses this.
2514 */
2515 if (bld->num_lods == 1) {
2516 /* Emit conditional to choose min image filter or mag image filter
2517 * depending on the lod being > 0 or <= 0, respectively.
2518 */
2519 struct lp_build_if_state if_ctx;
2520
2521 lod_positive = LLVMBuildTrunc(builder, lod_positive,
2522 LLVMInt1TypeInContext(bld->gallivm->context),
2523 "lod_pos");
2524
2525 lp_build_if(&if_ctx, bld->gallivm, lod_positive);
2526 {
2527 /* Use the minification filter */
2528 lp_build_sample_mipmap(bld, min_filter, mip_filter, FALSE,
2529 coords, offsets,
2530 ilevel0, ilevel1, lod_fpart,
2531 texels);
2532 }
2533 lp_build_else(&if_ctx);
2534 {
2535 /* Use the magnification filter */
2536 lp_build_sample_mipmap(bld, mag_filter, PIPE_TEX_MIPFILTER_NONE,
2537 FALSE,
2538 coords, offsets,
2539 ilevel0, NULL, NULL,
2540 texels);
2541 }
2542 lp_build_endif(&if_ctx);
2543 }
2544 else {
2545 LLVMValueRef need_linear, linear_mask;
2546 unsigned mip_filter_for_nearest;
2547 struct lp_build_if_state if_ctx;
2548
2549 if (min_filter == PIPE_TEX_FILTER_LINEAR) {
2550 linear_mask = lod_positive;
2551 mip_filter_for_nearest = PIPE_TEX_MIPFILTER_NONE;
2552 }
2553 else {
2554 linear_mask = lp_build_not(&bld->lodi_bld, lod_positive);
2555 mip_filter_for_nearest = mip_filter;
2556 }
2557 need_linear = lp_build_any_true_range(&bld->lodi_bld, bld->num_lods,
2558 linear_mask);
2559 lp_build_name(need_linear, "need_linear");
2560
2561 if (bld->num_lods != bld->coord_type.length) {
2562 linear_mask = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
2563 bld->lodi_type,
2564 bld->int_coord_type,
2565 linear_mask);
2566 }
2567
2568 lp_build_if(&if_ctx, bld->gallivm, need_linear);
2569 {
2570 /*
2571 * Do sampling with both filters simultaneously. This means using
2572 * a linear filter and doing some tricks (with weights) for the pixels
2573 * which need nearest filter.
2574 * Note that it's probably rare some pixels need nearest and some
2575 * linear filter but the fixups required for the nearest pixels
2576 * aren't all that complicated so just always run a combined path
2577 * if at least some pixels require linear.
2578 */
2579 lp_build_sample_mipmap_both(bld, linear_mask, mip_filter,
2580 coords, offsets,
2581 ilevel0, ilevel1,
2582 lod_fpart, lod_positive,
2583 texels);
2584 }
2585 lp_build_else(&if_ctx);
2586 {
2587 /*
2588 * All pixels require just nearest filtering, which is way
2589 * cheaper than linear, hence do a separate path for that.
2590 */
2591 lp_build_sample_mipmap(bld, PIPE_TEX_FILTER_NEAREST,
2592 mip_filter_for_nearest, FALSE,
2593 coords, offsets,
2594 ilevel0, ilevel1, lod_fpart,
2595 texels);
2596 }
2597 lp_build_endif(&if_ctx);
2598 }
2599 }
2600
2601 for (chan = 0; chan < 4; ++chan) {
2602 colors_out[chan] = LLVMBuildLoad(builder, texels[chan], "");
2603 lp_build_name(colors_out[chan], "sampler%u_texel_%c", sampler_unit, "xyzw"[chan]);
2604 }
2605 }
2606
2607
2608 /**
2609 * Texel fetch function.
2610 * In contrast to general sampling there is no filtering, no coord minification,
2611 * lod (if any) is always explicit uint, coords are uints (in terms of texel units)
2612 * directly to be applied to the selected mip level (after adding texel offsets).
2613 * This function handles texel fetch for all targets where texel fetch is supported
2614 * (no cube maps, but 1d, 2d, 3d are supported, arrays and buffers should be too).
2615 */
2616 static void
2617 lp_build_fetch_texel(struct lp_build_sample_context *bld,
2618 unsigned texture_unit,
2619 LLVMValueRef ms_index,
2620 const LLVMValueRef *coords,
2621 LLVMValueRef explicit_lod,
2622 const LLVMValueRef *offsets,
2623 LLVMValueRef *colors_out)
2624 {
2625 struct lp_build_context *perquadi_bld = &bld->lodi_bld;
2626 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
2627 unsigned dims = bld->dims, chan;
2628 unsigned target = bld->static_texture_state->target;
2629 boolean out_of_bound_ret_zero = TRUE;
2630 LLVMValueRef size, ilevel;
2631 LLVMValueRef row_stride_vec = NULL, img_stride_vec = NULL;
2632 LLVMValueRef x = coords[0], y = coords[1], z = coords[2];
2633 LLVMValueRef width, height, depth, i, j;
2634 LLVMValueRef offset, out_of_bounds, out1;
2635
2636 out_of_bounds = int_coord_bld->zero;
2637
2638 if (explicit_lod && bld->static_texture_state->target != PIPE_BUFFER) {
2639 if (bld->num_mips != int_coord_bld->type.length) {
2640 ilevel = lp_build_pack_aos_scalars(bld->gallivm, int_coord_bld->type,
2641 perquadi_bld->type, explicit_lod, 0);
2642 }
2643 else {
2644 ilevel = explicit_lod;
2645 }
2646 lp_build_nearest_mip_level(bld, texture_unit, ilevel, &ilevel,
2647 out_of_bound_ret_zero ? &out_of_bounds : NULL);
2648 }
2649 else {
2650 assert(bld->num_mips == 1);
2651 if (bld->static_texture_state->target != PIPE_BUFFER) {
2652 ilevel = bld->dynamic_state->first_level(bld->dynamic_state, bld->gallivm,
2653 bld->context_ptr, texture_unit, NULL);
2654 }
2655 else {
2656 ilevel = lp_build_const_int32(bld->gallivm, 0);
2657 }
2658 }
2659 lp_build_mipmap_level_sizes(bld, ilevel,
2660 &size,
2661 &row_stride_vec, &img_stride_vec);
2662 lp_build_extract_image_sizes(bld, &bld->int_size_bld, int_coord_bld->type,
2663 size, &width, &height, &depth);
2664
2665 if (target == PIPE_TEXTURE_1D_ARRAY ||
2666 target == PIPE_TEXTURE_2D_ARRAY) {
2667 if (out_of_bound_ret_zero) {
2668 z = lp_build_layer_coord(bld, texture_unit, FALSE, z, &out1);
2669 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2670 }
2671 else {
2672 z = lp_build_layer_coord(bld, texture_unit, FALSE, z, NULL);
2673 }
2674 }
2675
2676 /* This is a lot like border sampling */
2677 if (offsets[0]) {
2678 /*
2679 * coords are really unsigned, offsets are signed, but I don't think
2680 * exceeding 31 bits is possible
2681 */
2682 x = lp_build_add(int_coord_bld, x, offsets[0]);
2683 }
2684 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, x, int_coord_bld->zero);
2685 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2686 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, x, width);
2687 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2688
2689 if (dims >= 2) {
2690 if (offsets[1]) {
2691 y = lp_build_add(int_coord_bld, y, offsets[1]);
2692 }
2693 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, y, int_coord_bld->zero);
2694 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2695 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, y, height);
2696 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2697
2698 if (dims >= 3) {
2699 if (offsets[2]) {
2700 z = lp_build_add(int_coord_bld, z, offsets[2]);
2701 }
2702 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, z, int_coord_bld->zero);
2703 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2704 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, z, depth);
2705 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2706 }
2707 }
2708
2709 lp_build_sample_offset(int_coord_bld,
2710 bld->format_desc,
2711 x, y, z, row_stride_vec, img_stride_vec,
2712 &offset, &i, &j);
2713
2714 if (bld->static_texture_state->target != PIPE_BUFFER) {
2715 offset = lp_build_add(int_coord_bld, offset,
2716 lp_build_get_mip_offsets(bld, ilevel));
2717 }
2718
2719 if (bld->fetch_ms) {
2720 LLVMValueRef num_samples;
2721 num_samples = bld->dynamic_state->num_samples(bld->dynamic_state, bld->gallivm,
2722 bld->context_ptr, texture_unit, NULL);
2723 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, ms_index, int_coord_bld->zero);
2724 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2725 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, ms_index, lp_build_broadcast_scalar(int_coord_bld, num_samples));
2726 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
2727 offset = lp_build_add(int_coord_bld, offset,
2728 lp_build_mul(int_coord_bld, bld->sample_stride, ms_index));
2729 }
2730
2731 offset = lp_build_andnot(int_coord_bld, offset, out_of_bounds);
2732
2733 lp_build_fetch_rgba_soa(bld->gallivm,
2734 bld->format_desc,
2735 bld->texel_type, TRUE,
2736 bld->base_ptr, offset,
2737 i, j,
2738 bld->cache,
2739 colors_out);
2740
2741 if (out_of_bound_ret_zero) {
2742 /*
2743 * Only needed for ARB_robust_buffer_access_behavior and d3d10.
2744 * Could use min/max above instead of out-of-bounds comparisons
2745 * if we don't care about the result returned for out-of-bounds.
2746 */
2747 for (chan = 0; chan < 4; chan++) {
2748 colors_out[chan] = lp_build_select(&bld->texel_bld, out_of_bounds,
2749 bld->texel_bld.zero, colors_out[chan]);
2750 }
2751 }
2752 }
2753
2754
2755 /**
2756 * Just set texels to white instead of actually sampling the texture.
2757 * For debugging.
2758 */
2759 void
2760 lp_build_sample_nop(struct gallivm_state *gallivm,
2761 struct lp_type type,
2762 const LLVMValueRef *coords,
2763 LLVMValueRef texel_out[4])
2764 {
2765 LLVMValueRef one = lp_build_one(gallivm, type);
2766 unsigned chan;
2767
2768 for (chan = 0; chan < 4; chan++) {
2769 texel_out[chan] = one;
2770 }
2771 }
2772
2773 static struct lp_type
2774 lp_build_texel_type(struct lp_type texel_type,
2775 const struct util_format_description *format_desc)
2776 {
2777 /* always using the first channel hopefully should be safe,
2778 * if not things WILL break in other places anyway.
2779 */
2780 if (format_desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB &&
2781 format_desc->channel[0].pure_integer) {
2782 if (format_desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED) {
2783 texel_type = lp_type_int_vec(texel_type.width, texel_type.width * texel_type.length);
2784 } else if (format_desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED) {
2785 texel_type = lp_type_uint_vec(texel_type.width, texel_type.width * texel_type.length);
2786 }
2787 }
2788 else if (util_format_has_stencil(format_desc) &&
2789 !util_format_has_depth(format_desc)) {
2790 /* for stencil only formats, sample stencil (uint) */
2791 texel_type = lp_type_uint_vec(texel_type.width, texel_type.width * texel_type.length);
2792 }
2793 return texel_type;
2794 }
2795
2796
2797 /**
2798 * Build the actual texture sampling code.
2799 * 'texel' will return a vector of four LLVMValueRefs corresponding to
2800 * R, G, B, A.
2801 * \param type vector float type to use for coords, etc.
2802 * \param sample_key
2803 * \param derivs partial derivatives of (s,t,r,q) with respect to x and y
2804 */
2805 static void
2806 lp_build_sample_soa_code(struct gallivm_state *gallivm,
2807 const struct lp_static_texture_state *static_texture_state,
2808 const struct lp_static_sampler_state *static_sampler_state,
2809 struct lp_sampler_dynamic_state *dynamic_state,
2810 struct lp_type type,
2811 unsigned sample_key,
2812 unsigned texture_index,
2813 unsigned sampler_index,
2814 LLVMValueRef context_ptr,
2815 LLVMValueRef thread_data_ptr,
2816 const LLVMValueRef *coords,
2817 const LLVMValueRef *offsets,
2818 const struct lp_derivatives *derivs, /* optional */
2819 LLVMValueRef lod, /* optional */
2820 LLVMValueRef ms_index, /* optional */
2821 LLVMValueRef texel_out[4])
2822 {
2823 unsigned target = static_texture_state->target;
2824 unsigned dims = texture_dims(target);
2825 unsigned num_quads = type.length / 4;
2826 unsigned mip_filter, min_img_filter, mag_img_filter, i;
2827 struct lp_build_sample_context bld;
2828 struct lp_static_sampler_state derived_sampler_state = *static_sampler_state;
2829 LLVMTypeRef i32t = LLVMInt32TypeInContext(gallivm->context);
2830 LLVMBuilderRef builder = gallivm->builder;
2831 LLVMValueRef tex_width, newcoords[5];
2832 enum lp_sampler_lod_property lod_property;
2833 enum lp_sampler_lod_control lod_control;
2834 enum lp_sampler_op_type op_type;
2835 LLVMValueRef lod_bias = NULL;
2836 LLVMValueRef explicit_lod = NULL;
2837 boolean op_is_tex, op_is_lodq, op_is_gather, fetch_ms;
2838
2839 if (0) {
2840 enum pipe_format fmt = static_texture_state->format;
2841 debug_printf("Sample from %s\n", util_format_name(fmt));
2842 }
2843
2844 lod_property = (sample_key & LP_SAMPLER_LOD_PROPERTY_MASK) >>
2845 LP_SAMPLER_LOD_PROPERTY_SHIFT;
2846 lod_control = (sample_key & LP_SAMPLER_LOD_CONTROL_MASK) >>
2847 LP_SAMPLER_LOD_CONTROL_SHIFT;
2848 op_type = (sample_key & LP_SAMPLER_OP_TYPE_MASK) >>
2849 LP_SAMPLER_OP_TYPE_SHIFT;
2850 fetch_ms = !!(sample_key & LP_SAMPLER_FETCH_MS);
2851
2852 op_is_tex = op_type == LP_SAMPLER_OP_TEXTURE;
2853 op_is_lodq = op_type == LP_SAMPLER_OP_LODQ;
2854 op_is_gather = op_type == LP_SAMPLER_OP_GATHER;
2855
2856 if (lod_control == LP_SAMPLER_LOD_BIAS) {
2857 lod_bias = lod;
2858 assert(lod);
2859 assert(derivs == NULL);
2860 }
2861 else if (lod_control == LP_SAMPLER_LOD_EXPLICIT) {
2862 explicit_lod = lod;
2863 assert(lod);
2864 assert(derivs == NULL);
2865 }
2866 else if (lod_control == LP_SAMPLER_LOD_DERIVATIVES) {
2867 assert(derivs);
2868 assert(lod == NULL);
2869 }
2870 else {
2871 assert(derivs == NULL);
2872 assert(lod == NULL);
2873 }
2874
2875 if (static_texture_state->format == PIPE_FORMAT_NONE) {
2876 /*
2877 * If there's nothing bound, format is NONE, and we must return
2878 * all zero as mandated by d3d10 in this case.
2879 */
2880 unsigned chan;
2881 LLVMValueRef zero = lp_build_zero(gallivm, type);
2882 for (chan = 0; chan < 4; chan++) {
2883 texel_out[chan] = zero;
2884 }
2885 return;
2886 }
2887
2888 assert(type.floating);
2889
2890 /* Setup our build context */
2891 memset(&bld, 0, sizeof bld);
2892 bld.gallivm = gallivm;
2893 bld.context_ptr = context_ptr;
2894 bld.static_sampler_state = &derived_sampler_state;
2895 bld.static_texture_state = static_texture_state;
2896 bld.dynamic_state = dynamic_state;
2897 bld.format_desc = util_format_description(static_texture_state->format);
2898 bld.dims = dims;
2899
2900 if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD || op_is_lodq) {
2901 bld.no_quad_lod = TRUE;
2902 }
2903 if (gallivm_perf & GALLIVM_PERF_NO_RHO_APPROX || op_is_lodq) {
2904 bld.no_rho_approx = TRUE;
2905 }
2906 if (gallivm_perf & GALLIVM_PERF_NO_BRILINEAR || op_is_lodq) {
2907 bld.no_brilinear = TRUE;
2908 }
2909
2910 bld.vector_width = lp_type_width(type);
2911
2912 bld.float_type = lp_type_float(32);
2913 bld.int_type = lp_type_int(32);
2914 bld.coord_type = type;
2915 bld.int_coord_type = lp_int_type(type);
2916 bld.float_size_in_type = lp_type_float(32);
2917 bld.float_size_in_type.length = dims > 1 ? 4 : 1;
2918 bld.int_size_in_type = lp_int_type(bld.float_size_in_type);
2919
2920 bld.texel_type = lp_build_texel_type(type, bld.format_desc);
2921
2922 if (!static_texture_state->level_zero_only ||
2923 !static_sampler_state->max_lod_pos || op_is_lodq) {
2924 derived_sampler_state.min_mip_filter = static_sampler_state->min_mip_filter;
2925 } else {
2926 derived_sampler_state.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
2927 }
2928 if (op_is_gather) {
2929 /*
2930 * gather4 is exactly like GL_LINEAR filtering but in the end skipping
2931 * the actual filtering. Using mostly the same paths, so cube face
2932 * selection, coord wrapping etc. all naturally uses the same code.
2933 */
2934 derived_sampler_state.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
2935 derived_sampler_state.min_img_filter = PIPE_TEX_FILTER_LINEAR;
2936 derived_sampler_state.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
2937 }
2938 mip_filter = derived_sampler_state.min_mip_filter;
2939
2940 if (0) {
2941 debug_printf(" .min_mip_filter = %u\n", derived_sampler_state.min_mip_filter);
2942 }
2943
2944 if (static_texture_state->target == PIPE_TEXTURE_CUBE ||
2945 static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY)
2946 {
2947 /*
2948 * Seamless filtering ignores wrap modes.
2949 * Setting to CLAMP_TO_EDGE is correct for nearest filtering, for
2950 * bilinear it's not correct but way better than using for instance repeat.
2951 * Note we even set this for non-seamless. Technically GL allows any wrap
2952 * mode, which made sense when supporting true borders (can get seamless
2953 * effect with border and CLAMP_TO_BORDER), but gallium doesn't support
2954 * borders and d3d9 requires wrap modes to be ignored and it's a pain to fix
2955 * up the sampler state (as it makes it texture dependent).
2956 */
2957 derived_sampler_state.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
2958 derived_sampler_state.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
2959 }
2960 /*
2961 * We could force CLAMP to CLAMP_TO_EDGE here if min/mag filter is nearest,
2962 * so AoS path could be used. Not sure it's worth the trouble...
2963 */
2964
2965 min_img_filter = derived_sampler_state.min_img_filter;
2966 mag_img_filter = derived_sampler_state.mag_img_filter;
2967
2968
2969 /*
2970 * This is all a bit complicated different paths are chosen for performance
2971 * reasons.
2972 * Essentially, there can be 1 lod per element, 1 lod per quad or 1 lod for
2973 * everything (the last two options are equivalent for 4-wide case).
2974 * If there's per-quad lod but we split to 4-wide so we can use AoS, per-quad
2975 * lod is calculated then the lod value extracted afterwards so making this
2976 * case basically the same as far as lod handling is concerned for the
2977 * further sample/filter code as the 1 lod for everything case.
2978 * Different lod handling mostly shows up when building mipmap sizes
2979 * (lp_build_mipmap_level_sizes() and friends) and also in filtering
2980 * (getting the fractional part of the lod to the right texels).
2981 */
2982
2983 /*
2984 * There are other situations where at least the multiple int lods could be
2985 * avoided like min and max lod being equal.
2986 */
2987 bld.num_mips = bld.num_lods = 1;
2988
2989 if (bld.no_quad_lod && bld.no_rho_approx &&
2990 ((mip_filter != PIPE_TEX_MIPFILTER_NONE && op_is_tex &&
2991 (static_texture_state->target == PIPE_TEXTURE_CUBE ||
2992 static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY)) ||
2993 op_is_lodq)) {
2994 /*
2995 * special case for using per-pixel lod even for implicit lod,
2996 * which is generally never required (ok by APIs) except to please
2997 * some (somewhat broken imho) tests (because per-pixel face selection
2998 * can cause derivatives to be different for pixels outside the primitive
2999 * due to the major axis division even if pre-project derivatives are
3000 * looking normal).
3001 * For lodq, we do it to simply avoid scalar pack / unpack (albeit for
3002 * cube maps we do indeed get per-pixel lod values).
3003 */
3004 bld.num_mips = type.length;
3005 bld.num_lods = type.length;
3006 }
3007 else if (lod_property == LP_SAMPLER_LOD_PER_ELEMENT ||
3008 (explicit_lod || lod_bias || derivs)) {
3009 if ((!op_is_tex && target != PIPE_BUFFER) ||
3010 (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
3011 bld.num_mips = type.length;
3012 bld.num_lods = type.length;
3013 }
3014 else if (op_is_tex && min_img_filter != mag_img_filter) {
3015 bld.num_mips = 1;
3016 bld.num_lods = type.length;
3017 }
3018 }
3019 /* TODO: for true scalar_lod should only use 1 lod value */
3020 else if ((!op_is_tex && explicit_lod && target != PIPE_BUFFER) ||
3021 (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
3022 bld.num_mips = num_quads;
3023 bld.num_lods = num_quads;
3024 }
3025 else if (op_is_tex && min_img_filter != mag_img_filter) {
3026 bld.num_mips = 1;
3027 bld.num_lods = num_quads;
3028 }
3029
3030 bld.fetch_ms = fetch_ms;
3031 if (op_is_gather)
3032 bld.gather_comp = (sample_key & LP_SAMPLER_GATHER_COMP_MASK) >> LP_SAMPLER_GATHER_COMP_SHIFT;
3033 bld.lodf_type = type;
3034 /* we want native vector size to be able to use our intrinsics */
3035 if (bld.num_lods != type.length) {
3036 /* TODO: this currently always has to be per-quad or per-element */
3037 bld.lodf_type.length = type.length > 4 ? ((type.length + 15) / 16) * 4 : 1;
3038 }
3039 bld.lodi_type = lp_int_type(bld.lodf_type);
3040 bld.levelf_type = bld.lodf_type;
3041 if (bld.num_mips == 1) {
3042 bld.levelf_type.length = 1;
3043 }
3044 bld.leveli_type = lp_int_type(bld.levelf_type);
3045 bld.float_size_type = bld.float_size_in_type;
3046 /* Note: size vectors may not be native. They contain minified w/h/d/_ values,
3047 * with per-element lod that is w0/h0/d0/_/w1/h1/d1_/... so up to 8x4f32 */
3048 if (bld.num_mips > 1) {
3049 bld.float_size_type.length = bld.num_mips == type.length ?
3050 bld.num_mips * bld.float_size_in_type.length :
3051 type.length;
3052 }
3053 bld.int_size_type = lp_int_type(bld.float_size_type);
3054
3055 lp_build_context_init(&bld.float_bld, gallivm, bld.float_type);
3056 lp_build_context_init(&bld.float_vec_bld, gallivm, type);
3057 lp_build_context_init(&bld.int_bld, gallivm, bld.int_type);
3058 lp_build_context_init(&bld.coord_bld, gallivm, bld.coord_type);
3059 lp_build_context_init(&bld.int_coord_bld, gallivm, bld.int_coord_type);
3060 lp_build_context_init(&bld.int_size_in_bld, gallivm, bld.int_size_in_type);
3061 lp_build_context_init(&bld.float_size_in_bld, gallivm, bld.float_size_in_type);
3062 lp_build_context_init(&bld.int_size_bld, gallivm, bld.int_size_type);
3063 lp_build_context_init(&bld.float_size_bld, gallivm, bld.float_size_type);
3064 lp_build_context_init(&bld.texel_bld, gallivm, bld.texel_type);
3065 lp_build_context_init(&bld.levelf_bld, gallivm, bld.levelf_type);
3066 lp_build_context_init(&bld.leveli_bld, gallivm, bld.leveli_type);
3067 lp_build_context_init(&bld.lodf_bld, gallivm, bld.lodf_type);
3068 lp_build_context_init(&bld.lodi_bld, gallivm, bld.lodi_type);
3069
3070 /* Get the dynamic state */
3071 tex_width = dynamic_state->width(dynamic_state, gallivm,
3072 context_ptr, texture_index, NULL);
3073 bld.row_stride_array = dynamic_state->row_stride(dynamic_state, gallivm,
3074 context_ptr, texture_index, NULL);
3075 bld.img_stride_array = dynamic_state->img_stride(dynamic_state, gallivm,
3076 context_ptr, texture_index, NULL);
3077 bld.base_ptr = dynamic_state->base_ptr(dynamic_state, gallivm,
3078 context_ptr, texture_index, NULL);
3079 bld.mip_offsets = dynamic_state->mip_offsets(dynamic_state, gallivm,
3080 context_ptr, texture_index, NULL);
3081
3082 if (fetch_ms)
3083 bld.sample_stride = lp_build_broadcast_scalar(&bld.int_coord_bld, dynamic_state->sample_stride(dynamic_state, gallivm,
3084 context_ptr, texture_index, NULL));
3085 /* Note that mip_offsets is an array[level] of offsets to texture images */
3086
3087 if (dynamic_state->cache_ptr && thread_data_ptr) {
3088 bld.cache = dynamic_state->cache_ptr(dynamic_state, gallivm,
3089 thread_data_ptr, texture_index);
3090 }
3091
3092 /* width, height, depth as single int vector */
3093 if (dims <= 1) {
3094 bld.int_size = tex_width;
3095 }
3096 else {
3097 bld.int_size = LLVMBuildInsertElement(builder, bld.int_size_in_bld.undef,
3098 tex_width,
3099 LLVMConstInt(i32t, 0, 0), "");
3100 if (dims >= 2) {
3101 LLVMValueRef tex_height =
3102 dynamic_state->height(dynamic_state, gallivm,
3103 context_ptr, texture_index, NULL);
3104 bld.int_size = LLVMBuildInsertElement(builder, bld.int_size,
3105 tex_height,
3106 LLVMConstInt(i32t, 1, 0), "");
3107 if (dims >= 3) {
3108 LLVMValueRef tex_depth =
3109 dynamic_state->depth(dynamic_state, gallivm, context_ptr,
3110 texture_index, NULL);
3111 bld.int_size = LLVMBuildInsertElement(builder, bld.int_size,
3112 tex_depth,
3113 LLVMConstInt(i32t, 2, 0), "");
3114 }
3115 }
3116 }
3117
3118 for (i = 0; i < 5; i++) {
3119 newcoords[i] = coords[i];
3120 }
3121
3122 if (util_format_is_pure_integer(static_texture_state->format) &&
3123 !util_format_has_depth(bld.format_desc) && op_is_tex &&
3124 (static_sampler_state->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR ||
3125 static_sampler_state->min_img_filter == PIPE_TEX_FILTER_LINEAR ||
3126 static_sampler_state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)) {
3127 /*
3128 * Bail if impossible filtering is specified (the awkard additional
3129 * depth check is because it is legal in gallium to have things like S8Z24
3130 * here which would say it's pure int despite such formats should sample
3131 * the depth component).
3132 * In GL such filters make the texture incomplete, this makes it robust
3133 * against gallium frontends which set this up regardless (we'd crash in the
3134 * lerp later otherwise).
3135 * At least in some apis it may be legal to use such filters with lod
3136 * queries and/or gather (at least for gather d3d10 says only the wrap
3137 * bits are really used hence filter bits are likely simply ignored).
3138 * For fetch, we don't get valid samplers either way here.
3139 */
3140 unsigned chan;
3141 LLVMValueRef zero = lp_build_zero(gallivm, type);
3142 for (chan = 0; chan < 4; chan++) {
3143 texel_out[chan] = zero;
3144 }
3145 return;
3146 }
3147
3148 if (0) {
3149 /* For debug: no-op texture sampling */
3150 lp_build_sample_nop(gallivm,
3151 bld.texel_type,
3152 newcoords,
3153 texel_out);
3154 }
3155
3156 else if (op_type == LP_SAMPLER_OP_FETCH) {
3157 lp_build_fetch_texel(&bld, texture_index, ms_index, newcoords,
3158 lod, offsets,
3159 texel_out);
3160 }
3161
3162 else {
3163 LLVMValueRef lod_fpart = NULL, lod_positive = NULL;
3164 LLVMValueRef ilevel0 = NULL, ilevel1 = NULL, lod = NULL;
3165 boolean use_aos;
3166
3167 use_aos = util_format_fits_8unorm(bld.format_desc) &&
3168 op_is_tex &&
3169 /* not sure this is strictly needed or simply impossible */
3170 derived_sampler_state.compare_mode == PIPE_TEX_COMPARE_NONE &&
3171 lp_is_simple_wrap_mode(derived_sampler_state.wrap_s);
3172
3173 use_aos &= bld.num_lods <= num_quads ||
3174 derived_sampler_state.min_img_filter ==
3175 derived_sampler_state.mag_img_filter;
3176
3177 if(gallivm_perf & GALLIVM_PERF_NO_AOS_SAMPLING) {
3178 use_aos = 0;
3179 }
3180
3181 if (dims > 1) {
3182 use_aos &= lp_is_simple_wrap_mode(derived_sampler_state.wrap_t);
3183 if (dims > 2) {
3184 use_aos &= lp_is_simple_wrap_mode(derived_sampler_state.wrap_r);
3185 }
3186 }
3187 if ((static_texture_state->target == PIPE_TEXTURE_CUBE ||
3188 static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) &&
3189 derived_sampler_state.seamless_cube_map &&
3190 (derived_sampler_state.min_img_filter == PIPE_TEX_FILTER_LINEAR ||
3191 derived_sampler_state.mag_img_filter == PIPE_TEX_FILTER_LINEAR)) {
3192 /* theoretically possible with AoS filtering but not implemented (complex!) */
3193 use_aos = 0;
3194 }
3195
3196 if ((gallivm_debug & GALLIVM_DEBUG_PERF) &&
3197 !use_aos && util_format_fits_8unorm(bld.format_desc)) {
3198 debug_printf("%s: using floating point linear filtering for %s\n",
3199 __FUNCTION__, bld.format_desc->short_name);
3200 debug_printf(" min_img %d mag_img %d mip %d target %d seamless %d"
3201 " wraps %d wrapt %d wrapr %d\n",
3202 derived_sampler_state.min_img_filter,
3203 derived_sampler_state.mag_img_filter,
3204 derived_sampler_state.min_mip_filter,
3205 static_texture_state->target,
3206 derived_sampler_state.seamless_cube_map,
3207 derived_sampler_state.wrap_s,
3208 derived_sampler_state.wrap_t,
3209 derived_sampler_state.wrap_r);
3210 }
3211
3212 lp_build_sample_common(&bld, op_is_lodq, texture_index, sampler_index,
3213 newcoords,
3214 derivs, lod_bias, explicit_lod,
3215 &lod_positive, &lod, &lod_fpart,
3216 &ilevel0, &ilevel1);
3217
3218 if (op_is_lodq) {
3219 texel_out[0] = lod_fpart;
3220 texel_out[1] = lod;
3221 texel_out[2] = texel_out[3] = bld.coord_bld.zero;
3222 return;
3223 }
3224
3225 if (use_aos && static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) {
3226 /* The aos path doesn't do seamless filtering so simply add cube layer
3227 * to face now.
3228 */
3229 newcoords[2] = lp_build_add(&bld.int_coord_bld, newcoords[2], newcoords[3]);
3230 }
3231
3232 /*
3233 * we only try 8-wide sampling with soa or if we have AVX2
3234 * as it appears to be a loss with just AVX)
3235 */
3236 if (num_quads == 1 || !use_aos ||
3237 (util_cpu_caps.has_avx2 &&
3238 (bld.num_lods == 1 ||
3239 derived_sampler_state.min_img_filter == derived_sampler_state.mag_img_filter))) {
3240 if (use_aos) {
3241 /* do sampling/filtering with fixed pt arithmetic */
3242 lp_build_sample_aos(&bld, sampler_index,
3243 newcoords[0], newcoords[1],
3244 newcoords[2],
3245 offsets, lod_positive, lod_fpart,
3246 ilevel0, ilevel1,
3247 texel_out);
3248 }
3249
3250 else {
3251 lp_build_sample_general(&bld, sampler_index,
3252 op_type == LP_SAMPLER_OP_GATHER,
3253 newcoords, offsets,
3254 lod_positive, lod_fpart,
3255 ilevel0, ilevel1,
3256 texel_out);
3257 }
3258 }
3259 else {
3260 unsigned j;
3261 struct lp_build_sample_context bld4;
3262 struct lp_type type4 = type;
3263 unsigned i;
3264 LLVMValueRef texelout4[4];
3265 LLVMValueRef texelouttmp[4][LP_MAX_VECTOR_LENGTH/16];
3266
3267 type4.length = 4;
3268
3269 /* Setup our build context */
3270 memset(&bld4, 0, sizeof bld4);
3271 bld4.no_quad_lod = bld.no_quad_lod;
3272 bld4.no_rho_approx = bld.no_rho_approx;
3273 bld4.no_brilinear = bld.no_brilinear;
3274 bld4.gallivm = bld.gallivm;
3275 bld4.context_ptr = bld.context_ptr;
3276 bld4.static_texture_state = bld.static_texture_state;
3277 bld4.static_sampler_state = bld.static_sampler_state;
3278 bld4.dynamic_state = bld.dynamic_state;
3279 bld4.format_desc = bld.format_desc;
3280 bld4.dims = bld.dims;
3281 bld4.row_stride_array = bld.row_stride_array;
3282 bld4.img_stride_array = bld.img_stride_array;
3283 bld4.base_ptr = bld.base_ptr;
3284 bld4.mip_offsets = bld.mip_offsets;
3285 bld4.int_size = bld.int_size;
3286 bld4.cache = bld.cache;
3287
3288 bld4.vector_width = lp_type_width(type4);
3289
3290 bld4.float_type = lp_type_float(32);
3291 bld4.int_type = lp_type_int(32);
3292 bld4.coord_type = type4;
3293 bld4.int_coord_type = lp_int_type(type4);
3294 bld4.float_size_in_type = lp_type_float(32);
3295 bld4.float_size_in_type.length = dims > 1 ? 4 : 1;
3296 bld4.int_size_in_type = lp_int_type(bld4.float_size_in_type);
3297 bld4.texel_type = bld.texel_type;
3298 bld4.texel_type.length = 4;
3299
3300 bld4.num_mips = bld4.num_lods = 1;
3301 if (bld4.no_quad_lod && bld4.no_rho_approx &&
3302 (static_texture_state->target == PIPE_TEXTURE_CUBE ||
3303 static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) &&
3304 (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
3305 bld4.num_mips = type4.length;
3306 bld4.num_lods = type4.length;
3307 }
3308 if (lod_property == LP_SAMPLER_LOD_PER_ELEMENT &&
3309 (explicit_lod || lod_bias || derivs)) {
3310 if ((!op_is_tex && target != PIPE_BUFFER) ||
3311 (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
3312 bld4.num_mips = type4.length;
3313 bld4.num_lods = type4.length;
3314 }
3315 else if (op_is_tex && min_img_filter != mag_img_filter) {
3316 bld4.num_mips = 1;
3317 bld4.num_lods = type4.length;
3318 }
3319 }
3320
3321 /* we want native vector size to be able to use our intrinsics */
3322 bld4.lodf_type = type4;
3323 if (bld4.num_lods != type4.length) {
3324 bld4.lodf_type.length = 1;
3325 }
3326 bld4.lodi_type = lp_int_type(bld4.lodf_type);
3327 bld4.levelf_type = type4;
3328 if (bld4.num_mips != type4.length) {
3329 bld4.levelf_type.length = 1;
3330 }
3331 bld4.leveli_type = lp_int_type(bld4.levelf_type);
3332 bld4.float_size_type = bld4.float_size_in_type;
3333 if (bld4.num_mips > 1) {
3334 bld4.float_size_type.length = bld4.num_mips == type4.length ?
3335 bld4.num_mips * bld4.float_size_in_type.length :
3336 type4.length;
3337 }
3338 bld4.int_size_type = lp_int_type(bld4.float_size_type);
3339
3340 lp_build_context_init(&bld4.float_bld, gallivm, bld4.float_type);
3341 lp_build_context_init(&bld4.float_vec_bld, gallivm, type4);
3342 lp_build_context_init(&bld4.int_bld, gallivm, bld4.int_type);
3343 lp_build_context_init(&bld4.coord_bld, gallivm, bld4.coord_type);
3344 lp_build_context_init(&bld4.int_coord_bld, gallivm, bld4.int_coord_type);
3345 lp_build_context_init(&bld4.int_size_in_bld, gallivm, bld4.int_size_in_type);
3346 lp_build_context_init(&bld4.float_size_in_bld, gallivm, bld4.float_size_in_type);
3347 lp_build_context_init(&bld4.int_size_bld, gallivm, bld4.int_size_type);
3348 lp_build_context_init(&bld4.float_size_bld, gallivm, bld4.float_size_type);
3349 lp_build_context_init(&bld4.texel_bld, gallivm, bld4.texel_type);
3350 lp_build_context_init(&bld4.levelf_bld, gallivm, bld4.levelf_type);
3351 lp_build_context_init(&bld4.leveli_bld, gallivm, bld4.leveli_type);
3352 lp_build_context_init(&bld4.lodf_bld, gallivm, bld4.lodf_type);
3353 lp_build_context_init(&bld4.lodi_bld, gallivm, bld4.lodi_type);
3354
3355 for (i = 0; i < num_quads; i++) {
3356 LLVMValueRef s4, t4, r4;
3357 LLVMValueRef lod_positive4, lod_fpart4 = NULL;
3358 LLVMValueRef ilevel04, ilevel14 = NULL;
3359 LLVMValueRef offsets4[4] = { NULL };
3360 unsigned num_lods = bld4.num_lods;
3361
3362 s4 = lp_build_extract_range(gallivm, newcoords[0], 4*i, 4);
3363 t4 = lp_build_extract_range(gallivm, newcoords[1], 4*i, 4);
3364 r4 = lp_build_extract_range(gallivm, newcoords[2], 4*i, 4);
3365
3366 if (offsets[0]) {
3367 offsets4[0] = lp_build_extract_range(gallivm, offsets[0], 4*i, 4);
3368 if (dims > 1) {
3369 offsets4[1] = lp_build_extract_range(gallivm, offsets[1], 4*i, 4);
3370 if (dims > 2) {
3371 offsets4[2] = lp_build_extract_range(gallivm, offsets[2], 4*i, 4);
3372 }
3373 }
3374 }
3375 lod_positive4 = lp_build_extract_range(gallivm, lod_positive, num_lods * i, num_lods);
3376 ilevel04 = bld.num_mips == 1 ? ilevel0 :
3377 lp_build_extract_range(gallivm, ilevel0, num_lods * i, num_lods);
3378 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
3379 ilevel14 = lp_build_extract_range(gallivm, ilevel1, num_lods * i, num_lods);
3380 lod_fpart4 = lp_build_extract_range(gallivm, lod_fpart, num_lods * i, num_lods);
3381 }
3382
3383 if (use_aos) {
3384 /* do sampling/filtering with fixed pt arithmetic */
3385 lp_build_sample_aos(&bld4, sampler_index,
3386 s4, t4, r4, offsets4,
3387 lod_positive4, lod_fpart4,
3388 ilevel04, ilevel14,
3389 texelout4);
3390 }
3391
3392 else {
3393 /* this path is currently unreachable and hence might break easily... */
3394 LLVMValueRef newcoords4[5];
3395 newcoords4[0] = s4;
3396 newcoords4[1] = t4;
3397 newcoords4[2] = r4;
3398 newcoords4[3] = lp_build_extract_range(gallivm, newcoords[3], 4*i, 4);
3399 newcoords4[4] = lp_build_extract_range(gallivm, newcoords[4], 4*i, 4);
3400
3401 lp_build_sample_general(&bld4, sampler_index,
3402 op_type == LP_SAMPLER_OP_GATHER,
3403 newcoords4, offsets4,
3404 lod_positive4, lod_fpart4,
3405 ilevel04, ilevel14,
3406 texelout4);
3407 }
3408 for (j = 0; j < 4; j++) {
3409 texelouttmp[j][i] = texelout4[j];
3410 }
3411 }
3412
3413 for (j = 0; j < 4; j++) {
3414 texel_out[j] = lp_build_concat(gallivm, texelouttmp[j], type4, num_quads);
3415 }
3416 }
3417 }
3418
3419 if (target != PIPE_BUFFER && op_type != LP_SAMPLER_OP_GATHER) {
3420 apply_sampler_swizzle(&bld, texel_out);
3421 }
3422
3423 /*
3424 * texel type can be a (32bit) int/uint (for pure int formats only),
3425 * however we are expected to always return floats (storage is untyped).
3426 */
3427 if (!bld.texel_type.floating) {
3428 unsigned chan;
3429 for (chan = 0; chan < 4; chan++) {
3430 texel_out[chan] = LLVMBuildBitCast(builder, texel_out[chan],
3431 lp_build_vec_type(gallivm, type), "");
3432 }
3433 }
3434 }
3435
3436
3437 #define USE_TEX_FUNC_CALL 1
3438
3439 #define LP_MAX_TEX_FUNC_ARGS 32
3440
3441 static inline void
3442 get_target_info(enum pipe_texture_target target,
3443 unsigned *num_coords, unsigned *num_derivs,
3444 unsigned *num_offsets, unsigned *layer)
3445 {
3446 unsigned dims = texture_dims(target);
3447 *num_coords = dims;
3448 *num_offsets = dims;
3449 *num_derivs = (target == PIPE_TEXTURE_CUBE ||
3450 target == PIPE_TEXTURE_CUBE_ARRAY) ? 3 : dims;
3451 *layer = has_layer_coord(target) ? 2: 0;
3452 if (target == PIPE_TEXTURE_CUBE_ARRAY) {
3453 /*
3454 * dims doesn't include r coord for cubes - this is handled
3455 * by layer instead, but need to fix up for cube arrays...
3456 */
3457 *layer = 3;
3458 *num_coords = 3;
3459 }
3460 }
3461
3462
3463 /**
3464 * Generate the function body for a texture sampling function.
3465 */
3466 static void
3467 lp_build_sample_gen_func(struct gallivm_state *gallivm,
3468 const struct lp_static_texture_state *static_texture_state,
3469 const struct lp_static_sampler_state *static_sampler_state,
3470 struct lp_sampler_dynamic_state *dynamic_state,
3471 struct lp_type type,
3472 unsigned texture_index,
3473 unsigned sampler_index,
3474 LLVMValueRef function,
3475 unsigned num_args,
3476 unsigned sample_key)
3477 {
3478 LLVMBuilderRef old_builder;
3479 LLVMBasicBlockRef block;
3480 LLVMValueRef coords[5];
3481 LLVMValueRef offsets[3] = { NULL };
3482 LLVMValueRef lod = NULL;
3483 LLVMValueRef ms_index = NULL;
3484 LLVMValueRef context_ptr;
3485 LLVMValueRef thread_data_ptr = NULL;
3486 LLVMValueRef texel_out[4];
3487 struct lp_derivatives derivs;
3488 struct lp_derivatives *deriv_ptr = NULL;
3489 unsigned num_param = 0;
3490 unsigned i, num_coords, num_derivs, num_offsets, layer;
3491 enum lp_sampler_lod_control lod_control;
3492 enum lp_sampler_op_type op_type;
3493 boolean need_cache = FALSE;
3494
3495 lod_control = (sample_key & LP_SAMPLER_LOD_CONTROL_MASK) >>
3496 LP_SAMPLER_LOD_CONTROL_SHIFT;
3497
3498 op_type = (sample_key & LP_SAMPLER_OP_TYPE_MASK) >>
3499 LP_SAMPLER_OP_TYPE_SHIFT;
3500
3501 get_target_info(static_texture_state->target,
3502 &num_coords, &num_derivs, &num_offsets, &layer);
3503
3504 /* lod query doesn't take a layer */
3505 if (layer && op_type == LP_SAMPLER_OP_LODQ)
3506 layer = 0;
3507
3508 if (dynamic_state->cache_ptr) {
3509 const struct util_format_description *format_desc;
3510 format_desc = util_format_description(static_texture_state->format);
3511 if (format_desc && format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
3512 need_cache = TRUE;
3513 }
3514 }
3515
3516 /* "unpack" arguments */
3517 context_ptr = LLVMGetParam(function, num_param++);
3518 if (need_cache) {
3519 thread_data_ptr = LLVMGetParam(function, num_param++);
3520 }
3521 for (i = 0; i < num_coords; i++) {
3522 coords[i] = LLVMGetParam(function, num_param++);
3523 }
3524 for (i = num_coords; i < 5; i++) {
3525 /* This is rather unfortunate... */
3526 coords[i] = lp_build_undef(gallivm, type);
3527 }
3528 if (layer) {
3529 coords[layer] = LLVMGetParam(function, num_param++);
3530 }
3531 if (sample_key & LP_SAMPLER_SHADOW) {
3532 coords[4] = LLVMGetParam(function, num_param++);
3533 }
3534 if (sample_key & LP_SAMPLER_FETCH_MS) {
3535 ms_index = LLVMGetParam(function, num_param++);
3536 }
3537 if (sample_key & LP_SAMPLER_OFFSETS) {
3538 for (i = 0; i < num_offsets; i++) {
3539 offsets[i] = LLVMGetParam(function, num_param++);
3540 }
3541 }
3542 if (lod_control == LP_SAMPLER_LOD_BIAS ||
3543 lod_control == LP_SAMPLER_LOD_EXPLICIT) {
3544 lod = LLVMGetParam(function, num_param++);
3545 }
3546 else if (lod_control == LP_SAMPLER_LOD_DERIVATIVES) {
3547 for (i = 0; i < num_derivs; i++) {
3548 derivs.ddx[i] = LLVMGetParam(function, num_param++);
3549 derivs.ddy[i] = LLVMGetParam(function, num_param++);
3550 }
3551 deriv_ptr = &derivs;
3552 }
3553
3554 assert(num_args == num_param);
3555
3556 /*
3557 * Function body
3558 */
3559
3560 old_builder = gallivm->builder;
3561 block = LLVMAppendBasicBlockInContext(gallivm->context, function, "entry");
3562 gallivm->builder = LLVMCreateBuilderInContext(gallivm->context);
3563 LLVMPositionBuilderAtEnd(gallivm->builder, block);
3564
3565 lp_build_sample_soa_code(gallivm,
3566 static_texture_state,
3567 static_sampler_state,
3568 dynamic_state,
3569 type,
3570 sample_key,
3571 texture_index,
3572 sampler_index,
3573 context_ptr,
3574 thread_data_ptr,
3575 coords,
3576 offsets,
3577 deriv_ptr,
3578 lod,
3579 ms_index,
3580 texel_out);
3581
3582 LLVMBuildAggregateRet(gallivm->builder, texel_out, 4);
3583
3584 LLVMDisposeBuilder(gallivm->builder);
3585 gallivm->builder = old_builder;
3586
3587 gallivm_verify_function(gallivm, function);
3588 }
3589
3590
3591 /**
3592 * Call the matching function for texture sampling.
3593 * If there's no match, generate a new one.
3594 */
3595 static void
3596 lp_build_sample_soa_func(struct gallivm_state *gallivm,
3597 const struct lp_static_texture_state *static_texture_state,
3598 const struct lp_static_sampler_state *static_sampler_state,
3599 struct lp_sampler_dynamic_state *dynamic_state,
3600 const struct lp_sampler_params *params,
3601 int texture_index, int sampler_index,
3602 LLVMValueRef *tex_ret)
3603 {
3604 LLVMBuilderRef builder = gallivm->builder;
3605 LLVMModuleRef module = LLVMGetGlobalParent(LLVMGetBasicBlockParent(
3606 LLVMGetInsertBlock(builder)));
3607 LLVMValueRef function, inst;
3608 LLVMValueRef args[LP_MAX_TEX_FUNC_ARGS];
3609 LLVMBasicBlockRef bb;
3610 unsigned num_args = 0;
3611 char func_name[64];
3612 unsigned i, num_coords, num_derivs, num_offsets, layer;
3613 unsigned sample_key = params->sample_key;
3614 const LLVMValueRef *coords = params->coords;
3615 const LLVMValueRef *offsets = params->offsets;
3616 const struct lp_derivatives *derivs = params->derivs;
3617 enum lp_sampler_lod_control lod_control;
3618 enum lp_sampler_op_type op_type;
3619 boolean need_cache = FALSE;
3620
3621 lod_control = (sample_key & LP_SAMPLER_LOD_CONTROL_MASK) >>
3622 LP_SAMPLER_LOD_CONTROL_SHIFT;
3623
3624 op_type = (sample_key & LP_SAMPLER_OP_TYPE_MASK) >>
3625 LP_SAMPLER_OP_TYPE_SHIFT;
3626
3627 get_target_info(static_texture_state->target,
3628 &num_coords, &num_derivs, &num_offsets, &layer);
3629
3630 /* lod query doesn't take a layer */
3631 if (layer && op_type == LP_SAMPLER_OP_LODQ)
3632 layer = 0;
3633
3634 if (dynamic_state->cache_ptr) {
3635 const struct util_format_description *format_desc;
3636 format_desc = util_format_description(static_texture_state->format);
3637 if (format_desc && format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
3638 need_cache = TRUE;
3639 }
3640 }
3641 /*
3642 * texture function matches are found by name.
3643 * Thus the name has to include both the texture and sampler unit
3644 * (which covers all static state) plus the actual texture function
3645 * (including things like offsets, shadow coord, lod control).
3646 * Additionally lod_property has to be included too.
3647 */
3648
3649 snprintf(func_name, sizeof(func_name), "texfunc_res_%d_sam_%d_%x",
3650 texture_index, sampler_index, sample_key);
3651
3652 function = LLVMGetNamedFunction(module, func_name);
3653
3654 if(!function) {
3655 LLVMTypeRef arg_types[LP_MAX_TEX_FUNC_ARGS];
3656 LLVMTypeRef ret_type;
3657 LLVMTypeRef function_type;
3658 LLVMTypeRef val_type[4];
3659 unsigned num_param = 0;
3660
3661 /*
3662 * Generate the function prototype.
3663 */
3664
3665 arg_types[num_param++] = LLVMTypeOf(params->context_ptr);
3666 if (need_cache) {
3667 arg_types[num_param++] = LLVMTypeOf(params->thread_data_ptr);
3668 }
3669 for (i = 0; i < num_coords; i++) {
3670 arg_types[num_param++] = LLVMTypeOf(coords[0]);
3671 assert(LLVMTypeOf(coords[0]) == LLVMTypeOf(coords[i]));
3672 }
3673 if (layer) {
3674 arg_types[num_param++] = LLVMTypeOf(coords[layer]);
3675 assert(LLVMTypeOf(coords[0]) == LLVMTypeOf(coords[layer]));
3676 }
3677 if (sample_key & LP_SAMPLER_SHADOW) {
3678 arg_types[num_param++] = LLVMTypeOf(coords[0]);
3679 }
3680 if (sample_key & LP_SAMPLER_FETCH_MS) {
3681 arg_types[num_param++] = LLVMTypeOf(params->ms_index);
3682 }
3683 if (sample_key & LP_SAMPLER_OFFSETS) {
3684 for (i = 0; i < num_offsets; i++) {
3685 arg_types[num_param++] = LLVMTypeOf(offsets[0]);
3686 assert(LLVMTypeOf(offsets[0]) == LLVMTypeOf(offsets[i]));
3687 }
3688 }
3689 if (lod_control == LP_SAMPLER_LOD_BIAS ||
3690 lod_control == LP_SAMPLER_LOD_EXPLICIT) {
3691 arg_types[num_param++] = LLVMTypeOf(params->lod);
3692 }
3693 else if (lod_control == LP_SAMPLER_LOD_DERIVATIVES) {
3694 for (i = 0; i < num_derivs; i++) {
3695 arg_types[num_param++] = LLVMTypeOf(derivs->ddx[i]);
3696 arg_types[num_param++] = LLVMTypeOf(derivs->ddy[i]);
3697 assert(LLVMTypeOf(derivs->ddx[0]) == LLVMTypeOf(derivs->ddx[i]));
3698 assert(LLVMTypeOf(derivs->ddy[0]) == LLVMTypeOf(derivs->ddy[i]));
3699 }
3700 }
3701
3702 val_type[0] = val_type[1] = val_type[2] = val_type[3] =
3703 lp_build_vec_type(gallivm, params->type);
3704 ret_type = LLVMStructTypeInContext(gallivm->context, val_type, 4, 0);
3705 function_type = LLVMFunctionType(ret_type, arg_types, num_param, 0);
3706 function = LLVMAddFunction(module, func_name, function_type);
3707
3708 for (i = 0; i < num_param; ++i) {
3709 if(LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind) {
3710
3711 lp_add_function_attr(function, i + 1, LP_FUNC_ATTR_NOALIAS);
3712 }
3713 }
3714
3715 LLVMSetFunctionCallConv(function, LLVMFastCallConv);
3716 LLVMSetLinkage(function, LLVMInternalLinkage);
3717
3718 lp_build_sample_gen_func(gallivm,
3719 static_texture_state,
3720 static_sampler_state,
3721 dynamic_state,
3722 params->type,
3723 texture_index,
3724 sampler_index,
3725 function,
3726 num_param,
3727 sample_key);
3728 }
3729
3730 num_args = 0;
3731 args[num_args++] = params->context_ptr;
3732 if (need_cache) {
3733 args[num_args++] = params->thread_data_ptr;
3734 }
3735 for (i = 0; i < num_coords; i++) {
3736 args[num_args++] = coords[i];
3737 }
3738 if (layer) {
3739 args[num_args++] = coords[layer];
3740 }
3741 if (sample_key & LP_SAMPLER_SHADOW) {
3742 args[num_args++] = coords[4];
3743 }
3744 if (sample_key & LP_SAMPLER_FETCH_MS) {
3745 args[num_args++] = params->ms_index;
3746 }
3747 if (sample_key & LP_SAMPLER_OFFSETS) {
3748 for (i = 0; i < num_offsets; i++) {
3749 args[num_args++] = offsets[i];
3750 }
3751 }
3752 if (lod_control == LP_SAMPLER_LOD_BIAS ||
3753 lod_control == LP_SAMPLER_LOD_EXPLICIT) {
3754 args[num_args++] = params->lod;
3755 }
3756 else if (lod_control == LP_SAMPLER_LOD_DERIVATIVES) {
3757 for (i = 0; i < num_derivs; i++) {
3758 args[num_args++] = derivs->ddx[i];
3759 args[num_args++] = derivs->ddy[i];
3760 }
3761 }
3762
3763 assert(num_args <= LP_MAX_TEX_FUNC_ARGS);
3764
3765 *tex_ret = LLVMBuildCall(builder, function, args, num_args, "");
3766 bb = LLVMGetInsertBlock(builder);
3767 inst = LLVMGetLastInstruction(bb);
3768 LLVMSetInstructionCallConv(inst, LLVMFastCallConv);
3769
3770 }
3771
3772
3773 /**
3774 * Build texture sampling code.
3775 * Either via a function call or inline it directly.
3776 */
3777 void
3778 lp_build_sample_soa(const struct lp_static_texture_state *static_texture_state,
3779 const struct lp_static_sampler_state *static_sampler_state,
3780 struct lp_sampler_dynamic_state *dynamic_state,
3781 struct gallivm_state *gallivm,
3782 const struct lp_sampler_params *params)
3783 {
3784 boolean use_tex_func = FALSE;
3785
3786 /*
3787 * Do not use a function call if the sampling is "simple enough".
3788 * We define this by
3789 * a) format
3790 * b) no mips (either one level only or no mip filter)
3791 * No mips will definitely make the code smaller, though
3792 * the format requirement is a bit iffy - there's some (SoA) formats
3793 * which definitely generate less code. This does happen to catch
3794 * some important cases though which are hurt quite a bit by using
3795 * a call (though not really because of the call overhead but because
3796 * they are reusing the same texture unit with some of the same
3797 * parameters).
3798 * Ideally we'd let llvm recognize this stuff by doing IPO passes.
3799 */
3800
3801 if (USE_TEX_FUNC_CALL) {
3802 const struct util_format_description *format_desc;
3803 boolean simple_format;
3804 boolean simple_tex;
3805 enum lp_sampler_op_type op_type;
3806 format_desc = util_format_description(static_texture_state->format);
3807 simple_format = !format_desc ||
3808 (util_format_is_rgba8_variant(format_desc) &&
3809 format_desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB);
3810
3811 op_type = (params->sample_key & LP_SAMPLER_OP_TYPE_MASK) >>
3812 LP_SAMPLER_OP_TYPE_SHIFT;
3813 simple_tex =
3814 op_type != LP_SAMPLER_OP_TEXTURE ||
3815 ((static_sampler_state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE ||
3816 static_texture_state->level_zero_only == TRUE) &&
3817 static_sampler_state->min_img_filter == static_sampler_state->mag_img_filter);
3818
3819 use_tex_func = format_desc && !(simple_format && simple_tex);
3820 }
3821
3822 if (use_tex_func) {
3823 LLVMValueRef tex_ret;
3824 lp_build_sample_soa_func(gallivm,
3825 static_texture_state,
3826 static_sampler_state,
3827 dynamic_state,
3828 params, params->texture_index, params->sampler_index, &tex_ret);
3829
3830 for (unsigned i = 0; i < 4; i++) {
3831 params->texel[i] = LLVMBuildExtractValue(gallivm->builder, tex_ret, i, "");
3832 }
3833 }
3834 else {
3835 lp_build_sample_soa_code(gallivm,
3836 static_texture_state,
3837 static_sampler_state,
3838 dynamic_state,
3839 params->type,
3840 params->sample_key,
3841 params->texture_index,
3842 params->sampler_index,
3843 params->context_ptr,
3844 params->thread_data_ptr,
3845 params->coords,
3846 params->offsets,
3847 params->derivs,
3848 params->lod,
3849 params->ms_index,
3850 params->texel);
3851 }
3852 }
3853
3854
3855 void
3856 lp_build_size_query_soa(struct gallivm_state *gallivm,
3857 const struct lp_static_texture_state *static_state,
3858 struct lp_sampler_dynamic_state *dynamic_state,
3859 const struct lp_sampler_size_query_params *params)
3860 {
3861 LLVMValueRef lod, level = 0, size;
3862 LLVMValueRef first_level = NULL;
3863 int dims, i;
3864 boolean has_array;
3865 unsigned num_lods = 1;
3866 struct lp_build_context bld_int_vec4;
3867 LLVMValueRef context_ptr = params->context_ptr;
3868 unsigned texture_unit = params->texture_unit;
3869 unsigned target = params->target;
3870 LLVMValueRef texture_unit_offset = params->texture_unit_offset;
3871
3872 if (static_state->format == PIPE_FORMAT_NONE) {
3873 /*
3874 * If there's nothing bound, format is NONE, and we must return
3875 * all zero as mandated by d3d10 in this case.
3876 */
3877 unsigned chan;
3878 LLVMValueRef zero = lp_build_const_vec(gallivm, params->int_type, 0.0F);
3879 for (chan = 0; chan < 4; chan++) {
3880 params->sizes_out[chan] = zero;
3881 }
3882 return;
3883 }
3884
3885 /*
3886 * Do some sanity verification about bound texture and shader dcl target.
3887 * Not entirely sure what's possible but assume array/non-array
3888 * always compatible (probably not ok for OpenGL but d3d10 has no
3889 * distinction of arrays at the resource level).
3890 * Everything else looks bogus (though not entirely sure about rect/2d).
3891 * Currently disabled because it causes assertion failures if there's
3892 * nothing bound (or rather a dummy texture, not that this case would
3893 * return the right values).
3894 */
3895 if (0 && static_state->target != target) {
3896 if (static_state->target == PIPE_TEXTURE_1D)
3897 assert(target == PIPE_TEXTURE_1D_ARRAY);
3898 else if (static_state->target == PIPE_TEXTURE_1D_ARRAY)
3899 assert(target == PIPE_TEXTURE_1D);
3900 else if (static_state->target == PIPE_TEXTURE_2D)
3901 assert(target == PIPE_TEXTURE_2D_ARRAY);
3902 else if (static_state->target == PIPE_TEXTURE_2D_ARRAY)
3903 assert(target == PIPE_TEXTURE_2D);
3904 else if (static_state->target == PIPE_TEXTURE_CUBE)
3905 assert(target == PIPE_TEXTURE_CUBE_ARRAY);
3906 else if (static_state->target == PIPE_TEXTURE_CUBE_ARRAY)
3907 assert(target == PIPE_TEXTURE_CUBE);
3908 else
3909 assert(0);
3910 }
3911
3912 dims = texture_dims(target);
3913
3914 switch (target) {
3915 case PIPE_TEXTURE_1D_ARRAY:
3916 case PIPE_TEXTURE_2D_ARRAY:
3917 case PIPE_TEXTURE_CUBE_ARRAY:
3918 has_array = TRUE;
3919 break;
3920 default:
3921 has_array = FALSE;
3922 break;
3923 }
3924
3925 assert(!params->int_type.floating);
3926
3927 lp_build_context_init(&bld_int_vec4, gallivm, lp_type_int_vec(32, 128));
3928
3929 if (params->samples_only) {
3930 params->sizes_out[0] = lp_build_broadcast(gallivm, lp_build_vec_type(gallivm, params->int_type),
3931 dynamic_state->num_samples(dynamic_state, gallivm,
3932 context_ptr, texture_unit,
3933 texture_unit_offset));
3934 return;
3935 }
3936 if (params->explicit_lod) {
3937 /* FIXME: this needs to honor per-element lod */
3938 lod = LLVMBuildExtractElement(gallivm->builder, params->explicit_lod,
3939 lp_build_const_int32(gallivm, 0), "");
3940 first_level = dynamic_state->first_level(dynamic_state, gallivm,
3941 context_ptr, texture_unit, texture_unit_offset);
3942 level = LLVMBuildAdd(gallivm->builder, lod, first_level, "level");
3943 lod = lp_build_broadcast_scalar(&bld_int_vec4, level);
3944 } else {
3945 lod = bld_int_vec4.zero;
3946 }
3947
3948 size = bld_int_vec4.undef;
3949
3950 size = LLVMBuildInsertElement(gallivm->builder, size,
3951 dynamic_state->width(dynamic_state, gallivm,
3952 context_ptr, texture_unit, texture_unit_offset),
3953 lp_build_const_int32(gallivm, 0), "");
3954
3955 if (dims >= 2) {
3956 size = LLVMBuildInsertElement(gallivm->builder, size,
3957 dynamic_state->height(dynamic_state, gallivm,
3958 context_ptr, texture_unit, texture_unit_offset),
3959 lp_build_const_int32(gallivm, 1), "");
3960 }
3961
3962 if (dims >= 3) {
3963 size = LLVMBuildInsertElement(gallivm->builder, size,
3964 dynamic_state->depth(dynamic_state, gallivm,
3965 context_ptr, texture_unit, texture_unit_offset),
3966 lp_build_const_int32(gallivm, 2), "");
3967 }
3968
3969 size = lp_build_minify(&bld_int_vec4, size, lod, TRUE);
3970
3971 if (has_array) {
3972 LLVMValueRef layers = dynamic_state->depth(dynamic_state, gallivm,
3973 context_ptr, texture_unit, texture_unit_offset);
3974 if (target == PIPE_TEXTURE_CUBE_ARRAY) {
3975 /*
3976 * It looks like GL wants number of cubes, d3d10.1 has it undefined?
3977 * Could avoid this by passing in number of cubes instead of total
3978 * number of layers (might make things easier elsewhere too).
3979 */
3980 LLVMValueRef six = lp_build_const_int32(gallivm, 6);
3981 layers = LLVMBuildSDiv(gallivm->builder, layers, six, "");
3982 }
3983 size = LLVMBuildInsertElement(gallivm->builder, size, layers,
3984 lp_build_const_int32(gallivm, dims), "");
3985 }
3986
3987 /*
3988 * d3d10 requires zero for x/y/z values (but not w, i.e. mip levels)
3989 * if level is out of bounds (note this can't cover unbound texture
3990 * here, which also requires returning zero).
3991 */
3992 if (params->explicit_lod && params->is_sviewinfo) {
3993 LLVMValueRef last_level, out, out1;
3994 struct lp_build_context leveli_bld;
3995
3996 /* everything is scalar for now */
3997 lp_build_context_init(&leveli_bld, gallivm, lp_type_int_vec(32, 32));
3998 last_level = dynamic_state->last_level(dynamic_state, gallivm,
3999 context_ptr, texture_unit, texture_unit_offset);
4000
4001 out = lp_build_cmp(&leveli_bld, PIPE_FUNC_LESS, level, first_level);
4002 out1 = lp_build_cmp(&leveli_bld, PIPE_FUNC_GREATER, level, last_level);
4003 out = lp_build_or(&leveli_bld, out, out1);
4004 if (num_lods == 1) {
4005 out = lp_build_broadcast_scalar(&bld_int_vec4, out);
4006 }
4007 else {
4008 /* TODO */
4009 assert(0);
4010 }
4011 size = lp_build_andnot(&bld_int_vec4, size, out);
4012 }
4013 for (i = 0; i < dims + (has_array ? 1 : 0); i++) {
4014 params->sizes_out[i] = lp_build_extract_broadcast(gallivm, bld_int_vec4.type, params->int_type,
4015 size,
4016 lp_build_const_int32(gallivm, i));
4017 }
4018 if (params->is_sviewinfo) {
4019 for (; i < 4; i++) {
4020 params->sizes_out[i] = lp_build_const_vec(gallivm, params->int_type, 0.0);
4021 }
4022 }
4023
4024 /*
4025 * if there's no explicit_lod (buffers, rects) queries requiring nr of
4026 * mips would be illegal.
4027 */
4028 if (params->is_sviewinfo && params->explicit_lod) {
4029 struct lp_build_context bld_int_scalar;
4030 LLVMValueRef num_levels;
4031 lp_build_context_init(&bld_int_scalar, gallivm, lp_type_int(32));
4032
4033 if (static_state->level_zero_only) {
4034 num_levels = bld_int_scalar.one;
4035 }
4036 else {
4037 LLVMValueRef last_level;
4038
4039 last_level = dynamic_state->last_level(dynamic_state, gallivm,
4040 context_ptr, texture_unit, texture_unit_offset);
4041 num_levels = lp_build_sub(&bld_int_scalar, last_level, first_level);
4042 num_levels = lp_build_add(&bld_int_scalar, num_levels, bld_int_scalar.one);
4043 }
4044 params->sizes_out[3] = lp_build_broadcast(gallivm, lp_build_vec_type(gallivm, params->int_type),
4045 num_levels);
4046 }
4047 }
4048
4049 static void
4050 lp_build_do_atomic_soa(struct gallivm_state *gallivm,
4051 const struct util_format_description *format_desc,
4052 struct lp_type type,
4053 LLVMValueRef exec_mask,
4054 LLVMValueRef base_ptr,
4055 LLVMValueRef offset,
4056 LLVMValueRef out_of_bounds,
4057 unsigned img_op,
4058 LLVMAtomicRMWBinOp op,
4059 const LLVMValueRef rgba_in[4],
4060 const LLVMValueRef rgba2_in[4],
4061 LLVMValueRef atomic_result[4])
4062 {
4063 enum pipe_format format = format_desc->format;
4064
4065 if (format != PIPE_FORMAT_R32_UINT && format != PIPE_FORMAT_R32_SINT && format != PIPE_FORMAT_R32_FLOAT) {
4066 atomic_result[0] = lp_build_zero(gallivm, type);
4067 return;
4068 }
4069
4070 LLVMValueRef atom_res = lp_build_alloca(gallivm,
4071 LLVMVectorType(LLVMInt32TypeInContext(gallivm->context), type.length), "");
4072
4073 offset = LLVMBuildGEP(gallivm->builder, base_ptr, &offset, 1, "");
4074 struct lp_build_loop_state loop_state;
4075 lp_build_loop_begin(&loop_state, gallivm, lp_build_const_int32(gallivm, 0));
4076 struct lp_build_if_state ifthen;
4077 LLVMValueRef cond;
4078 LLVMValueRef packed = rgba_in[0], packed2 = rgba2_in[0];
4079
4080 LLVMValueRef should_store_mask = LLVMBuildAnd(gallivm->builder, exec_mask, LLVMBuildNot(gallivm->builder, out_of_bounds, ""), "store_mask");
4081 assert(exec_mask);
4082
4083 cond = LLVMBuildICmp(gallivm->builder, LLVMIntNE, should_store_mask, lp_build_const_int_vec(gallivm, type, 0), "");
4084 cond = LLVMBuildExtractElement(gallivm->builder, cond, loop_state.counter, "");
4085 lp_build_if(&ifthen, gallivm, cond);
4086
4087 LLVMValueRef data = LLVMBuildExtractElement(gallivm->builder, packed, loop_state.counter, "");
4088 LLVMValueRef cast_base_ptr = LLVMBuildExtractElement(gallivm->builder, offset, loop_state.counter, "");
4089 cast_base_ptr = LLVMBuildBitCast(gallivm->builder, cast_base_ptr, LLVMPointerType(LLVMInt32TypeInContext(gallivm->context), 0), "");
4090 data = LLVMBuildBitCast(gallivm->builder, data, LLVMInt32TypeInContext(gallivm->context), "");
4091
4092 if (img_op == LP_IMG_ATOMIC_CAS) {
4093 LLVMValueRef cas_src_ptr = LLVMBuildExtractElement(gallivm->builder, packed2, loop_state.counter, "");
4094 LLVMValueRef cas_src = LLVMBuildBitCast(gallivm->builder, cas_src_ptr, LLVMInt32TypeInContext(gallivm->context), "");
4095 data = LLVMBuildAtomicCmpXchg(gallivm->builder, cast_base_ptr, data,
4096 cas_src,
4097 LLVMAtomicOrderingSequentiallyConsistent,
4098 LLVMAtomicOrderingSequentiallyConsistent,
4099 false);
4100 data = LLVMBuildExtractValue(gallivm->builder, data, 0, "");
4101 } else {
4102 data = LLVMBuildAtomicRMW(gallivm->builder, op,
4103 cast_base_ptr, data,
4104 LLVMAtomicOrderingSequentiallyConsistent,
4105 false);
4106 }
4107
4108 LLVMValueRef temp_res = LLVMBuildLoad(gallivm->builder, atom_res, "");
4109 temp_res = LLVMBuildInsertElement(gallivm->builder, temp_res, data, loop_state.counter, "");
4110 LLVMBuildStore(gallivm->builder, temp_res, atom_res);
4111
4112 lp_build_endif(&ifthen);
4113 lp_build_loop_end_cond(&loop_state, lp_build_const_int32(gallivm, type.length),
4114 NULL, LLVMIntUGE);
4115 atomic_result[0] = LLVMBuildLoad(gallivm->builder, atom_res, "");
4116 }
4117
4118 static void
4119 lp_build_img_op_no_format(struct gallivm_state *gallivm,
4120 const struct lp_img_params *params,
4121 LLVMValueRef outdata[4])
4122 {
4123 /*
4124 * If there's nothing bound, format is NONE, and we must return
4125 * all zero as mandated by d3d10 in this case.
4126 */
4127 if (params->img_op != LP_IMG_STORE) {
4128 LLVMValueRef zero = lp_build_zero(gallivm, params->type);
4129 for (unsigned chan = 0; chan < (params->img_op == LP_IMG_LOAD ? 4 : 1); chan++) {
4130 outdata[chan] = zero;
4131 }
4132 }
4133 }
4134
4135 void
4136 lp_build_img_op_soa(const struct lp_static_texture_state *static_texture_state,
4137 struct lp_sampler_dynamic_state *dynamic_state,
4138 struct gallivm_state *gallivm,
4139 const struct lp_img_params *params,
4140 LLVMValueRef outdata[4])
4141 {
4142 unsigned target = params->target;
4143 unsigned dims = texture_dims(target);
4144 /** regular scalar int type */
4145 struct lp_type int_type, int_coord_type;
4146 struct lp_build_context int_bld, int_coord_bld;
4147 const struct util_format_description *format_desc = util_format_description(static_texture_state->format);
4148 LLVMValueRef x = params->coords[0], y = params->coords[1], z = params->coords[2];
4149 LLVMValueRef ms_index = params->ms_index;
4150 LLVMValueRef row_stride_vec = NULL, img_stride_vec = NULL;
4151 int_type = lp_type_int(32);
4152 int_coord_type = lp_int_type(params->type);
4153 lp_build_context_init(&int_bld, gallivm, int_type);
4154 lp_build_context_init(&int_coord_bld, gallivm, int_coord_type);
4155
4156 if (static_texture_state->format == PIPE_FORMAT_NONE) {
4157 lp_build_img_op_no_format(gallivm, params, outdata);
4158 return;
4159 }
4160 LLVMValueRef offset, i, j;
4161
4162 LLVMValueRef row_stride = dynamic_state->row_stride(dynamic_state, gallivm,
4163 params->context_ptr, params->image_index, NULL);
4164 LLVMValueRef img_stride = dynamic_state->img_stride(dynamic_state, gallivm,
4165 params->context_ptr, params->image_index, NULL);
4166 LLVMValueRef base_ptr = dynamic_state->base_ptr(dynamic_state, gallivm,
4167 params->context_ptr, params->image_index, NULL);
4168 LLVMValueRef width = dynamic_state->width(dynamic_state, gallivm,
4169 params->context_ptr, params->image_index, NULL);
4170 LLVMValueRef height = dynamic_state->height(dynamic_state, gallivm,
4171 params->context_ptr, params->image_index, NULL);
4172 LLVMValueRef depth = dynamic_state->depth(dynamic_state, gallivm,
4173 params->context_ptr, params->image_index, NULL);
4174 LLVMValueRef num_samples = NULL, sample_stride = NULL;
4175 if (ms_index) {
4176 num_samples = dynamic_state->num_samples(dynamic_state, gallivm,
4177 params->context_ptr, params->image_index, NULL);
4178 sample_stride = dynamic_state->sample_stride(dynamic_state, gallivm,
4179 params->context_ptr, params->image_index, NULL);
4180 }
4181
4182 boolean layer_coord = has_layer_coord(target);
4183
4184 width = lp_build_broadcast_scalar(&int_coord_bld, width);
4185 if (dims >= 2) {
4186 height = lp_build_broadcast_scalar(&int_coord_bld, height);
4187 row_stride_vec = lp_build_broadcast_scalar(&int_coord_bld, row_stride);
4188 }
4189 if (dims >= 3 || layer_coord) {
4190 depth = lp_build_broadcast_scalar(&int_coord_bld, depth);
4191 img_stride_vec = lp_build_broadcast_scalar(&int_coord_bld, img_stride);
4192 }
4193
4194 LLVMValueRef out_of_bounds = int_coord_bld.zero;
4195 LLVMValueRef out1;
4196 out1 = lp_build_cmp(&int_coord_bld, PIPE_FUNC_GEQUAL, x, width);
4197 out_of_bounds = lp_build_or(&int_coord_bld, out_of_bounds, out1);
4198
4199 if (dims >= 2) {
4200 out1 = lp_build_cmp(&int_coord_bld, PIPE_FUNC_GEQUAL, y, height);
4201 out_of_bounds = lp_build_or(&int_coord_bld, out_of_bounds, out1);
4202 }
4203 if (dims >= 3) {
4204 out1 = lp_build_cmp(&int_coord_bld, PIPE_FUNC_GEQUAL, z, depth);
4205 out_of_bounds = lp_build_or(&int_coord_bld, out_of_bounds, out1);
4206 }
4207 lp_build_sample_offset(&int_coord_bld,
4208 format_desc,
4209 x, y, z, row_stride_vec, img_stride_vec,
4210 &offset, &i, &j);
4211
4212 if (ms_index) {
4213 out1 = lp_build_cmp(&int_coord_bld, PIPE_FUNC_GEQUAL, ms_index, lp_build_broadcast_scalar(&int_coord_bld, num_samples));
4214 out_of_bounds = lp_build_or(&int_coord_bld, out_of_bounds, out1);
4215
4216 offset = lp_build_add(&int_coord_bld, offset,
4217 lp_build_mul(&int_coord_bld, lp_build_broadcast_scalar(&int_coord_bld, sample_stride),
4218 ms_index));
4219 }
4220 if (params->img_op == LP_IMG_LOAD) {
4221 struct lp_type texel_type = lp_build_texel_type(params->type, format_desc);
4222
4223 offset = lp_build_andnot(&int_coord_bld, offset, out_of_bounds);
4224 struct lp_build_context texel_bld;
4225 lp_build_context_init(&texel_bld, gallivm, texel_type);
4226 lp_build_fetch_rgba_soa(gallivm,
4227 format_desc,
4228 texel_type, TRUE,
4229 base_ptr, offset,
4230 i, j,
4231 NULL,
4232 outdata);
4233
4234 for (unsigned chan = 0; chan < 4; chan++) {
4235 outdata[chan] = lp_build_select(&texel_bld, out_of_bounds,
4236 texel_bld.zero, outdata[chan]);
4237 }
4238 } else if (params->img_op == LP_IMG_STORE) {
4239 lp_build_store_rgba_soa(gallivm, format_desc, params->type, params->exec_mask, base_ptr, offset, out_of_bounds,
4240 params->indata);
4241 } else {
4242 lp_build_do_atomic_soa(gallivm, format_desc, params->type, params->exec_mask, base_ptr, offset, out_of_bounds,
4243 params->img_op, params->op, params->indata, params->indata2, outdata);
4244 }
4245 }
4246
4247 /*
4248 * These functions are for indirect texture access suppoort.
4249 *
4250 * Indirect textures are implemented using a switch statement, that
4251 * takes the texture index and jumps to the sampler functions for
4252 * that texture unit.
4253 */
4254
4255 /*
4256 * Initialise an indexed sampler switch block.
4257 *
4258 * This sets up the switch_info state and adds the LLVM flow control pieces.
4259 */
4260 void
4261 lp_build_sample_array_init_soa(struct lp_build_sample_array_switch *switch_info,
4262 struct gallivm_state *gallivm,
4263 const struct lp_sampler_params *params,
4264 LLVMValueRef idx,
4265 unsigned base, unsigned range)
4266 {
4267 switch_info->gallivm = gallivm;
4268 switch_info->params = *params;
4269 switch_info->base = base;
4270 switch_info->range = range;
4271
4272 /* for generating the switch functions we don't want the texture index offset */
4273 switch_info->params.texture_index_offset = 0;
4274
4275 LLVMBasicBlockRef initial_block = LLVMGetInsertBlock(gallivm->builder);
4276 switch_info->merge_ref = lp_build_insert_new_block(gallivm, "texmerge");
4277
4278 switch_info->switch_ref = LLVMBuildSwitch(gallivm->builder, idx,
4279 switch_info->merge_ref, range - base);
4280
4281 LLVMTypeRef val_type[4];
4282 val_type[0] = val_type[1] = val_type[2] = val_type[3] =
4283 lp_build_vec_type(gallivm, params->type);
4284 LLVMTypeRef ret_type = LLVMStructTypeInContext(gallivm->context, val_type, 4, 0);
4285
4286 LLVMValueRef undef_val = LLVMGetUndef(ret_type);
4287
4288 LLVMPositionBuilderAtEnd(gallivm->builder, switch_info->merge_ref);
4289
4290 switch_info->phi = LLVMBuildPhi(gallivm->builder, ret_type, "");
4291 LLVMAddIncoming(switch_info->phi, &undef_val, &initial_block, 1);
4292 }
4293
4294 /*
4295 * Add an individual entry to the indirect texture switch.
4296 *
4297 * This builds the sample function and links a case for it into the switch statement.
4298 */
4299 void
4300 lp_build_sample_array_case_soa(struct lp_build_sample_array_switch *switch_info,
4301 int idx,
4302 const struct lp_static_texture_state *static_texture_state,
4303 const struct lp_static_sampler_state *static_sampler_state,
4304 struct lp_sampler_dynamic_state *dynamic_texture_state)
4305 {
4306 struct gallivm_state *gallivm = switch_info->gallivm;
4307 LLVMBasicBlockRef this_block = lp_build_insert_new_block(gallivm, "texblock");
4308 LLVMValueRef tex_ret;
4309
4310 LLVMAddCase(switch_info->switch_ref, LLVMConstInt(LLVMInt32TypeInContext(gallivm->context), idx, 0), this_block);
4311 LLVMPositionBuilderAtEnd(gallivm->builder, this_block);
4312
4313 lp_build_sample_soa_func(gallivm, static_texture_state,
4314 static_sampler_state, dynamic_texture_state, &switch_info->params, idx, idx,
4315 &tex_ret);
4316
4317 LLVMAddIncoming(switch_info->phi, &tex_ret, &this_block, 1);
4318 LLVMBuildBr(gallivm->builder, switch_info->merge_ref);
4319 }
4320
4321 /*
4322 * Finish a switch statement.
4323 *
4324 * This handles extract the results from the switch.
4325 */
4326 void lp_build_sample_array_fini_soa(struct lp_build_sample_array_switch *switch_info)
4327 {
4328 struct gallivm_state *gallivm = switch_info->gallivm;
4329
4330 LLVMPositionBuilderAtEnd(gallivm->builder, switch_info->merge_ref);
4331 for (unsigned i = 0; i < 4; i++)
4332 switch_info->params.texel[i] = LLVMBuildExtractValue(gallivm->builder, switch_info->phi, i, "");
4333 }
4334
4335 void
4336 lp_build_image_op_switch_soa(struct lp_build_img_op_array_switch *switch_info,
4337 struct gallivm_state *gallivm,
4338 const struct lp_img_params *params,
4339 LLVMValueRef idx,
4340 unsigned base, unsigned range)
4341 {
4342 switch_info->gallivm = gallivm;
4343 switch_info->params = *params;
4344 switch_info->base = base;
4345 switch_info->range = range;
4346
4347 /* for generating the switch functions we don't want the texture index offset */
4348 switch_info->params.image_index_offset = 0;
4349
4350 LLVMBasicBlockRef initial_block = LLVMGetInsertBlock(gallivm->builder);
4351 switch_info->merge_ref = lp_build_insert_new_block(gallivm, "imgmerge");
4352
4353 switch_info->switch_ref = LLVMBuildSwitch(gallivm->builder, idx,
4354 switch_info->merge_ref, range - base);
4355
4356 if (params->img_op != LP_IMG_STORE) {
4357 LLVMTypeRef ret_type = lp_build_vec_type(gallivm, params->type);
4358 LLVMValueRef undef_val = LLVMGetUndef(ret_type);
4359
4360 LLVMPositionBuilderAtEnd(gallivm->builder, switch_info->merge_ref);
4361
4362 for (unsigned i = 0; i < ((params->img_op == LP_IMG_LOAD) ? 4 : 1); i++) {
4363 switch_info->phi[i] = LLVMBuildPhi(gallivm->builder, ret_type, "");
4364 LLVMAddIncoming(switch_info->phi[i], &undef_val, &initial_block, 1);
4365 }
4366 }
4367 }
4368
4369 void
4370 lp_build_image_op_array_case(struct lp_build_img_op_array_switch *switch_info,
4371 int idx,
4372 const struct lp_static_texture_state *static_texture_state,
4373 struct lp_sampler_dynamic_state *dynamic_state)
4374 {
4375 struct gallivm_state *gallivm = switch_info->gallivm;
4376 LLVMBasicBlockRef this_block = lp_build_insert_new_block(gallivm, "img");
4377 LLVMValueRef tex_ret[4];
4378
4379 LLVMAddCase(switch_info->switch_ref, lp_build_const_int32(gallivm, idx), this_block);
4380 LLVMPositionBuilderAtEnd(gallivm->builder, this_block);
4381
4382 switch_info->params.image_index = idx;
4383
4384 lp_build_img_op_soa(static_texture_state, dynamic_state, switch_info->gallivm, &switch_info->params, tex_ret);
4385 if (switch_info->params.img_op != LP_IMG_STORE) {
4386 for (unsigned i = 0; i < ((switch_info->params.img_op == LP_IMG_LOAD) ? 4 : 1); i++)
4387 tex_ret[i] = LLVMBuildBitCast(gallivm->builder, tex_ret[i], lp_build_vec_type(gallivm, switch_info->params.type), "");
4388
4389 this_block = LLVMGetInsertBlock(gallivm->builder);
4390 for (unsigned i = 0; i < ((switch_info->params.img_op == LP_IMG_LOAD) ? 4 : 1); i++) {
4391 LLVMAddIncoming(switch_info->phi[i], &tex_ret[i], &this_block, 1);
4392 }
4393 }
4394 LLVMBuildBr(gallivm->builder, switch_info->merge_ref);
4395 }
4396
4397 void lp_build_image_op_array_fini_soa(struct lp_build_img_op_array_switch *switch_info)
4398 {
4399 struct gallivm_state *gallivm = switch_info->gallivm;
4400
4401 LLVMPositionBuilderAtEnd(gallivm->builder, switch_info->merge_ref);
4402
4403 if (switch_info->params.img_op != LP_IMG_STORE) {
4404 for (unsigned i = 0; i < ((switch_info->params.img_op == LP_IMG_LOAD) ? 4 : 1); i++) {
4405 switch_info->params.outdata[i] = switch_info->phi[i];
4406 }
4407 }
4408 }