gallivm: some bits of seamless cube filtering implementation
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_sample_soa.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * Texture sampling -- SoA.
31 *
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 * @author Brian Paul <brianp@vmware.com>
34 */
35
36 #include "pipe/p_defines.h"
37 #include "pipe/p_state.h"
38 #include "pipe/p_shader_tokens.h"
39 #include "util/u_debug.h"
40 #include "util/u_dump.h"
41 #include "util/u_memory.h"
42 #include "util/u_math.h"
43 #include "util/u_format.h"
44 #include "util/u_cpu_detect.h"
45 #include "util/u_format_rgb9e5.h"
46 #include "lp_bld_debug.h"
47 #include "lp_bld_type.h"
48 #include "lp_bld_const.h"
49 #include "lp_bld_conv.h"
50 #include "lp_bld_arit.h"
51 #include "lp_bld_bitarit.h"
52 #include "lp_bld_logic.h"
53 #include "lp_bld_printf.h"
54 #include "lp_bld_swizzle.h"
55 #include "lp_bld_flow.h"
56 #include "lp_bld_gather.h"
57 #include "lp_bld_format.h"
58 #include "lp_bld_sample.h"
59 #include "lp_bld_sample_aos.h"
60 #include "lp_bld_struct.h"
61 #include "lp_bld_quad.h"
62 #include "lp_bld_pack.h"
63
64
65 /**
66 * Generate code to fetch a texel from a texture at int coords (x, y, z).
67 * The computation depends on whether the texture is 1D, 2D or 3D.
68 * The result, texel, will be float vectors:
69 * texel[0] = red values
70 * texel[1] = green values
71 * texel[2] = blue values
72 * texel[3] = alpha values
73 */
74 static void
75 lp_build_sample_texel_soa(struct lp_build_sample_context *bld,
76 LLVMValueRef width,
77 LLVMValueRef height,
78 LLVMValueRef depth,
79 LLVMValueRef x,
80 LLVMValueRef y,
81 LLVMValueRef z,
82 LLVMValueRef y_stride,
83 LLVMValueRef z_stride,
84 LLVMValueRef data_ptr,
85 LLVMValueRef mipoffsets,
86 LLVMValueRef texel_out[4])
87 {
88 const struct lp_static_sampler_state *static_state = bld->static_sampler_state;
89 const unsigned dims = bld->dims;
90 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
91 LLVMBuilderRef builder = bld->gallivm->builder;
92 LLVMValueRef offset;
93 LLVMValueRef i, j;
94 LLVMValueRef use_border = NULL;
95
96 /* use_border = x < 0 || x >= width || y < 0 || y >= height */
97 if (lp_sampler_wrap_mode_uses_border_color(static_state->wrap_s,
98 static_state->min_img_filter,
99 static_state->mag_img_filter)) {
100 LLVMValueRef b1, b2;
101 b1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, x, int_coord_bld->zero);
102 b2 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, x, width);
103 use_border = LLVMBuildOr(builder, b1, b2, "b1_or_b2");
104 }
105
106 if (dims >= 2 &&
107 lp_sampler_wrap_mode_uses_border_color(static_state->wrap_t,
108 static_state->min_img_filter,
109 static_state->mag_img_filter)) {
110 LLVMValueRef b1, b2;
111 b1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, y, int_coord_bld->zero);
112 b2 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, y, height);
113 if (use_border) {
114 use_border = LLVMBuildOr(builder, use_border, b1, "ub_or_b1");
115 use_border = LLVMBuildOr(builder, use_border, b2, "ub_or_b2");
116 }
117 else {
118 use_border = LLVMBuildOr(builder, b1, b2, "b1_or_b2");
119 }
120 }
121
122 if (dims == 3 &&
123 lp_sampler_wrap_mode_uses_border_color(static_state->wrap_r,
124 static_state->min_img_filter,
125 static_state->mag_img_filter)) {
126 LLVMValueRef b1, b2;
127 b1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, z, int_coord_bld->zero);
128 b2 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, z, depth);
129 if (use_border) {
130 use_border = LLVMBuildOr(builder, use_border, b1, "ub_or_b1");
131 use_border = LLVMBuildOr(builder, use_border, b2, "ub_or_b2");
132 }
133 else {
134 use_border = LLVMBuildOr(builder, b1, b2, "b1_or_b2");
135 }
136 }
137
138 /* convert x,y,z coords to linear offset from start of texture, in bytes */
139 lp_build_sample_offset(&bld->int_coord_bld,
140 bld->format_desc,
141 x, y, z, y_stride, z_stride,
142 &offset, &i, &j);
143 if (mipoffsets) {
144 offset = lp_build_add(&bld->int_coord_bld, offset, mipoffsets);
145 }
146
147 if (use_border) {
148 /* If we can sample the border color, it means that texcoords may
149 * lie outside the bounds of the texture image. We need to do
150 * something to prevent reading out of bounds and causing a segfault.
151 *
152 * Simply AND the texture coords with !use_border. This will cause
153 * coords which are out of bounds to become zero. Zero's guaranteed
154 * to be inside the texture image.
155 */
156 offset = lp_build_andnot(&bld->int_coord_bld, offset, use_border);
157 }
158
159 lp_build_fetch_rgba_soa(bld->gallivm,
160 bld->format_desc,
161 bld->texel_type,
162 data_ptr, offset,
163 i, j,
164 texel_out);
165
166 /*
167 * Note: if we find an app which frequently samples the texture border
168 * we might want to implement a true conditional here to avoid sampling
169 * the texture whenever possible (since that's quite a bit of code).
170 * Ex:
171 * if (use_border) {
172 * texel = border_color;
173 * }
174 * else {
175 * texel = sample_texture(coord);
176 * }
177 * As it is now, we always sample the texture, then selectively replace
178 * the texel color results with the border color.
179 */
180
181 if (use_border) {
182 /* select texel color or border color depending on use_border. */
183 const struct util_format_description *format_desc = bld->format_desc;
184 int chan;
185 struct lp_type border_type = bld->texel_type;
186 border_type.length = 4;
187 /*
188 * Only replace channels which are actually present. The others should
189 * get optimized away eventually by sampler_view swizzle anyway but it's
190 * easier too.
191 */
192 for (chan = 0; chan < 4; chan++) {
193 unsigned chan_s;
194 /* reverse-map channel... */
195 for (chan_s = 0; chan_s < 4; chan_s++) {
196 if (chan_s == format_desc->swizzle[chan]) {
197 break;
198 }
199 }
200 if (chan_s <= 3) {
201 /* use the already clamped color */
202 LLVMValueRef idx = lp_build_const_int32(bld->gallivm, chan);
203 LLVMValueRef border_chan;
204
205 border_chan = lp_build_extract_broadcast(bld->gallivm,
206 border_type,
207 bld->texel_type,
208 bld->border_color_clamped,
209 idx);
210 texel_out[chan] = lp_build_select(&bld->texel_bld, use_border,
211 border_chan, texel_out[chan]);
212 }
213 }
214 }
215 }
216
217
218 /**
219 * Helper to compute the mirror function for the PIPE_WRAP_MIRROR modes.
220 */
221 static LLVMValueRef
222 lp_build_coord_mirror(struct lp_build_sample_context *bld,
223 LLVMValueRef coord)
224 {
225 struct lp_build_context *coord_bld = &bld->coord_bld;
226 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
227 LLVMValueRef fract, flr, isOdd;
228
229 lp_build_ifloor_fract(coord_bld, coord, &flr, &fract);
230
231 /* isOdd = flr & 1 */
232 isOdd = LLVMBuildAnd(bld->gallivm->builder, flr, int_coord_bld->one, "");
233
234 /* make coord positive or negative depending on isOdd */
235 coord = lp_build_set_sign(coord_bld, fract, isOdd);
236
237 /* convert isOdd to float */
238 isOdd = lp_build_int_to_float(coord_bld, isOdd);
239
240 /* add isOdd to coord */
241 coord = lp_build_add(coord_bld, coord, isOdd);
242
243 return coord;
244 }
245
246
247 /**
248 * Helper to compute the first coord and the weight for
249 * linear wrap repeat npot textures
250 */
251 void
252 lp_build_coord_repeat_npot_linear(struct lp_build_sample_context *bld,
253 LLVMValueRef coord_f,
254 LLVMValueRef length_i,
255 LLVMValueRef length_f,
256 LLVMValueRef *coord0_i,
257 LLVMValueRef *weight_f)
258 {
259 struct lp_build_context *coord_bld = &bld->coord_bld;
260 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
261 LLVMValueRef half = lp_build_const_vec(bld->gallivm, coord_bld->type, 0.5);
262 LLVMValueRef length_minus_one = lp_build_sub(int_coord_bld, length_i,
263 int_coord_bld->one);
264 LLVMValueRef mask;
265 /* wrap with normalized floats is just fract */
266 coord_f = lp_build_fract(coord_bld, coord_f);
267 /* mul by size and subtract 0.5 */
268 coord_f = lp_build_mul(coord_bld, coord_f, length_f);
269 coord_f = lp_build_sub(coord_bld, coord_f, half);
270 /*
271 * we avoided the 0.5/length division before the repeat wrap,
272 * now need to fix up edge cases with selects
273 */
274 /* convert to int, compute lerp weight */
275 lp_build_ifloor_fract(coord_bld, coord_f, coord0_i, weight_f);
276 mask = lp_build_compare(int_coord_bld->gallivm, int_coord_bld->type,
277 PIPE_FUNC_LESS, *coord0_i, int_coord_bld->zero);
278 *coord0_i = lp_build_select(int_coord_bld, mask, length_minus_one, *coord0_i);
279 }
280
281
282 /**
283 * Build LLVM code for texture wrap mode for linear filtering.
284 * \param x0_out returns first integer texcoord
285 * \param x1_out returns second integer texcoord
286 * \param weight_out returns linear interpolation weight
287 */
288 static void
289 lp_build_sample_wrap_linear(struct lp_build_sample_context *bld,
290 LLVMValueRef coord,
291 LLVMValueRef length,
292 LLVMValueRef length_f,
293 LLVMValueRef offset,
294 boolean is_pot,
295 unsigned wrap_mode,
296 LLVMValueRef *x0_out,
297 LLVMValueRef *x1_out,
298 LLVMValueRef *weight_out)
299 {
300 struct lp_build_context *coord_bld = &bld->coord_bld;
301 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
302 LLVMBuilderRef builder = bld->gallivm->builder;
303 LLVMValueRef half = lp_build_const_vec(bld->gallivm, coord_bld->type, 0.5);
304 LLVMValueRef length_minus_one = lp_build_sub(int_coord_bld, length, int_coord_bld->one);
305 LLVMValueRef coord0, coord1, weight;
306
307 switch(wrap_mode) {
308 case PIPE_TEX_WRAP_REPEAT:
309 if (is_pot) {
310 /* mul by size and subtract 0.5 */
311 coord = lp_build_mul(coord_bld, coord, length_f);
312 coord = lp_build_sub(coord_bld, coord, half);
313 if (offset) {
314 offset = lp_build_int_to_float(coord_bld, offset);
315 coord = lp_build_add(coord_bld, coord, offset);
316 }
317 /* convert to int, compute lerp weight */
318 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
319 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
320 /* repeat wrap */
321 coord0 = LLVMBuildAnd(builder, coord0, length_minus_one, "");
322 coord1 = LLVMBuildAnd(builder, coord1, length_minus_one, "");
323 }
324 else {
325 LLVMValueRef mask;
326 if (offset) {
327 offset = lp_build_int_to_float(coord_bld, offset);
328 offset = lp_build_div(coord_bld, offset, length_f);
329 coord = lp_build_add(coord_bld, coord, offset);
330 }
331 lp_build_coord_repeat_npot_linear(bld, coord,
332 length, length_f,
333 &coord0, &weight);
334 mask = lp_build_compare(int_coord_bld->gallivm, int_coord_bld->type,
335 PIPE_FUNC_NOTEQUAL, coord0, length_minus_one);
336 coord1 = LLVMBuildAnd(builder,
337 lp_build_add(int_coord_bld, coord0, int_coord_bld->one),
338 mask, "");
339 }
340 break;
341
342 case PIPE_TEX_WRAP_CLAMP:
343 if (bld->static_sampler_state->normalized_coords) {
344 /* scale coord to length */
345 coord = lp_build_mul(coord_bld, coord, length_f);
346 }
347 if (offset) {
348 offset = lp_build_int_to_float(coord_bld, offset);
349 coord = lp_build_add(coord_bld, coord, offset);
350 }
351
352 /* clamp to [0, length] */
353 coord = lp_build_clamp(coord_bld, coord, coord_bld->zero, length_f);
354
355 coord = lp_build_sub(coord_bld, coord, half);
356
357 /* convert to int, compute lerp weight */
358 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
359 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
360 break;
361
362 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
363 {
364 struct lp_build_context abs_coord_bld = bld->coord_bld;
365 abs_coord_bld.type.sign = FALSE;
366
367 if (bld->static_sampler_state->normalized_coords) {
368 /* mul by tex size */
369 coord = lp_build_mul(coord_bld, coord, length_f);
370 }
371 if (offset) {
372 offset = lp_build_int_to_float(coord_bld, offset);
373 coord = lp_build_add(coord_bld, coord, offset);
374 }
375
376 /* clamp to length max */
377 coord = lp_build_min(coord_bld, coord, length_f);
378 /* subtract 0.5 */
379 coord = lp_build_sub(coord_bld, coord, half);
380 /* clamp to [0, length - 0.5] */
381 coord = lp_build_max(coord_bld, coord, coord_bld->zero);
382 /* convert to int, compute lerp weight */
383 lp_build_ifloor_fract(&abs_coord_bld, coord, &coord0, &weight);
384 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
385 /* coord1 = min(coord1, length-1) */
386 coord1 = lp_build_min(int_coord_bld, coord1, length_minus_one);
387 break;
388 }
389
390 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
391 if (bld->static_sampler_state->normalized_coords) {
392 /* scale coord to length */
393 coord = lp_build_mul(coord_bld, coord, length_f);
394 }
395 if (offset) {
396 offset = lp_build_int_to_float(coord_bld, offset);
397 coord = lp_build_add(coord_bld, coord, offset);
398 }
399 /* was: clamp to [-0.5, length + 0.5], then sub 0.5 */
400 /* can skip clamp (though might not work for very large coord values */
401 coord = lp_build_sub(coord_bld, coord, half);
402 /* convert to int, compute lerp weight */
403 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
404 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
405 break;
406
407 case PIPE_TEX_WRAP_MIRROR_REPEAT:
408 /* compute mirror function */
409 coord = lp_build_coord_mirror(bld, coord);
410
411 /* scale coord to length */
412 coord = lp_build_mul(coord_bld, coord, length_f);
413 coord = lp_build_sub(coord_bld, coord, half);
414 if (offset) {
415 offset = lp_build_int_to_float(coord_bld, offset);
416 coord = lp_build_add(coord_bld, coord, offset);
417 }
418
419 /* convert to int, compute lerp weight */
420 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
421 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
422
423 /* coord0 = max(coord0, 0) */
424 coord0 = lp_build_max(int_coord_bld, coord0, int_coord_bld->zero);
425 /* coord1 = min(coord1, length-1) */
426 coord1 = lp_build_min(int_coord_bld, coord1, length_minus_one);
427 break;
428
429 case PIPE_TEX_WRAP_MIRROR_CLAMP:
430 if (bld->static_sampler_state->normalized_coords) {
431 /* scale coord to length */
432 coord = lp_build_mul(coord_bld, coord, length_f);
433 }
434 if (offset) {
435 offset = lp_build_int_to_float(coord_bld, offset);
436 coord = lp_build_add(coord_bld, coord, offset);
437 }
438 coord = lp_build_abs(coord_bld, coord);
439
440 /* clamp to [0, length] */
441 coord = lp_build_min(coord_bld, coord, length_f);
442
443 coord = lp_build_sub(coord_bld, coord, half);
444
445 /* convert to int, compute lerp weight */
446 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
447 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
448 break;
449
450 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
451 {
452 struct lp_build_context abs_coord_bld = bld->coord_bld;
453 abs_coord_bld.type.sign = FALSE;
454
455 if (bld->static_sampler_state->normalized_coords) {
456 /* scale coord to length */
457 coord = lp_build_mul(coord_bld, coord, length_f);
458 }
459 if (offset) {
460 offset = lp_build_int_to_float(coord_bld, offset);
461 coord = lp_build_add(coord_bld, coord, offset);
462 }
463 coord = lp_build_abs(coord_bld, coord);
464
465 /* clamp to length max */
466 coord = lp_build_min(coord_bld, coord, length_f);
467 /* subtract 0.5 */
468 coord = lp_build_sub(coord_bld, coord, half);
469 /* clamp to [0, length - 0.5] */
470 coord = lp_build_max(coord_bld, coord, coord_bld->zero);
471
472 /* convert to int, compute lerp weight */
473 lp_build_ifloor_fract(&abs_coord_bld, coord, &coord0, &weight);
474 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
475 /* coord1 = min(coord1, length-1) */
476 coord1 = lp_build_min(int_coord_bld, coord1, length_minus_one);
477 }
478 break;
479
480 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
481 {
482 if (bld->static_sampler_state->normalized_coords) {
483 /* scale coord to length */
484 coord = lp_build_mul(coord_bld, coord, length_f);
485 }
486 if (offset) {
487 offset = lp_build_int_to_float(coord_bld, offset);
488 coord = lp_build_add(coord_bld, coord, offset);
489 }
490 coord = lp_build_abs(coord_bld, coord);
491
492 /* was: clamp to [-0.5, length + 0.5] then sub 0.5 */
493 /* skip clamp - always positive, and other side
494 only potentially matters for very large coords */
495 coord = lp_build_sub(coord_bld, coord, half);
496
497 /* convert to int, compute lerp weight */
498 lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
499 coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one);
500 }
501 break;
502
503 default:
504 assert(0);
505 coord0 = NULL;
506 coord1 = NULL;
507 weight = NULL;
508 }
509
510 *x0_out = coord0;
511 *x1_out = coord1;
512 *weight_out = weight;
513 }
514
515
516 /**
517 * Build LLVM code for texture wrap mode for nearest filtering.
518 * \param coord the incoming texcoord (nominally in [0,1])
519 * \param length the texture size along one dimension, as int vector
520 * \param length_f the texture size along one dimension, as float vector
521 * \param offset texel offset along one dimension (as int vector)
522 * \param is_pot if TRUE, length is a power of two
523 * \param wrap_mode one of PIPE_TEX_WRAP_x
524 */
525 static LLVMValueRef
526 lp_build_sample_wrap_nearest(struct lp_build_sample_context *bld,
527 LLVMValueRef coord,
528 LLVMValueRef length,
529 LLVMValueRef length_f,
530 LLVMValueRef offset,
531 boolean is_pot,
532 unsigned wrap_mode)
533 {
534 struct lp_build_context *coord_bld = &bld->coord_bld;
535 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
536 LLVMBuilderRef builder = bld->gallivm->builder;
537 LLVMValueRef length_minus_one = lp_build_sub(int_coord_bld, length, int_coord_bld->one);
538 LLVMValueRef icoord;
539
540 switch(wrap_mode) {
541 case PIPE_TEX_WRAP_REPEAT:
542 if (is_pot) {
543 coord = lp_build_mul(coord_bld, coord, length_f);
544 icoord = lp_build_ifloor(coord_bld, coord);
545 if (offset) {
546 icoord = lp_build_add(int_coord_bld, icoord, offset);
547 }
548 icoord = LLVMBuildAnd(builder, icoord, length_minus_one, "");
549 }
550 else {
551 if (offset) {
552 offset = lp_build_int_to_float(coord_bld, offset);
553 offset = lp_build_div(coord_bld, offset, length_f);
554 coord = lp_build_add(coord_bld, coord, offset);
555 }
556 /* take fraction, unnormalize */
557 coord = lp_build_fract_safe(coord_bld, coord);
558 coord = lp_build_mul(coord_bld, coord, length_f);
559 icoord = lp_build_itrunc(coord_bld, coord);
560 }
561 break;
562
563 case PIPE_TEX_WRAP_CLAMP:
564 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
565 if (bld->static_sampler_state->normalized_coords) {
566 /* scale coord to length */
567 coord = lp_build_mul(coord_bld, coord, length_f);
568 }
569
570 /* floor */
571 /* use itrunc instead since we clamp to 0 anyway */
572 icoord = lp_build_itrunc(coord_bld, coord);
573 if (offset) {
574 icoord = lp_build_add(int_coord_bld, icoord, offset);
575 }
576
577 /* clamp to [0, length - 1]. */
578 icoord = lp_build_clamp(int_coord_bld, icoord, int_coord_bld->zero,
579 length_minus_one);
580 break;
581
582 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
583 if (bld->static_sampler_state->normalized_coords) {
584 /* scale coord to length */
585 coord = lp_build_mul(coord_bld, coord, length_f);
586 }
587 /* no clamp necessary, border masking will handle this */
588 icoord = lp_build_ifloor(coord_bld, coord);
589 if (offset) {
590 icoord = lp_build_add(int_coord_bld, icoord, offset);
591 }
592 break;
593
594 case PIPE_TEX_WRAP_MIRROR_REPEAT:
595 if (offset) {
596 offset = lp_build_int_to_float(coord_bld, offset);
597 offset = lp_build_div(coord_bld, offset, length_f);
598 coord = lp_build_add(coord_bld, coord, offset);
599 }
600 /* compute mirror function */
601 coord = lp_build_coord_mirror(bld, coord);
602
603 /* scale coord to length */
604 assert(bld->static_sampler_state->normalized_coords);
605 coord = lp_build_mul(coord_bld, coord, length_f);
606
607 /* itrunc == ifloor here */
608 icoord = lp_build_itrunc(coord_bld, coord);
609
610 /* clamp to [0, length - 1] */
611 icoord = lp_build_min(int_coord_bld, icoord, length_minus_one);
612 break;
613
614 case PIPE_TEX_WRAP_MIRROR_CLAMP:
615 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
616 if (bld->static_sampler_state->normalized_coords) {
617 /* scale coord to length */
618 coord = lp_build_mul(coord_bld, coord, length_f);
619 }
620 if (offset) {
621 offset = lp_build_int_to_float(coord_bld, offset);
622 coord = lp_build_add(coord_bld, coord, offset);
623 }
624 coord = lp_build_abs(coord_bld, coord);
625
626 /* itrunc == ifloor here */
627 icoord = lp_build_itrunc(coord_bld, coord);
628
629 /* clamp to [0, length - 1] */
630 icoord = lp_build_min(int_coord_bld, icoord, length_minus_one);
631 break;
632
633 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
634 if (bld->static_sampler_state->normalized_coords) {
635 /* scale coord to length */
636 coord = lp_build_mul(coord_bld, coord, length_f);
637 }
638 if (offset) {
639 offset = lp_build_int_to_float(coord_bld, offset);
640 coord = lp_build_add(coord_bld, coord, offset);
641 }
642 coord = lp_build_abs(coord_bld, coord);
643
644 /* itrunc == ifloor here */
645 icoord = lp_build_itrunc(coord_bld, coord);
646 break;
647
648 default:
649 assert(0);
650 icoord = NULL;
651 }
652
653 return icoord;
654 }
655
656
657 /**
658 * Do shadow test/comparison.
659 * \param p shadow ref value
660 * \param texel the texel to compare against
661 */
662 static LLVMValueRef
663 lp_build_sample_comparefunc(struct lp_build_sample_context *bld,
664 LLVMValueRef p,
665 LLVMValueRef texel)
666 {
667 struct lp_build_context *texel_bld = &bld->texel_bld;
668 LLVMValueRef res;
669
670 if (0) {
671 //lp_build_print_value(bld->gallivm, "shadow cmp coord", p);
672 lp_build_print_value(bld->gallivm, "shadow cmp texel", texel);
673 }
674
675 /* result = (p FUNC texel) ? 1 : 0 */
676 /*
677 * honor d3d10 floating point rules here, which state that comparisons
678 * are ordered except NOT_EQUAL which is unordered.
679 */
680 if (bld->static_sampler_state->compare_func != PIPE_FUNC_NOTEQUAL) {
681 res = lp_build_cmp_ordered(texel_bld, bld->static_sampler_state->compare_func,
682 p, texel);
683 }
684 else {
685 res = lp_build_cmp(texel_bld, bld->static_sampler_state->compare_func,
686 p, texel);
687 }
688 return res;
689 }
690
691
692 /**
693 * Generate code to sample a mipmap level with nearest filtering.
694 * If sampling a cube texture, r = cube face in [0,5].
695 */
696 static void
697 lp_build_sample_image_nearest(struct lp_build_sample_context *bld,
698 LLVMValueRef size,
699 LLVMValueRef row_stride_vec,
700 LLVMValueRef img_stride_vec,
701 LLVMValueRef data_ptr,
702 LLVMValueRef mipoffsets,
703 LLVMValueRef *coords,
704 const LLVMValueRef *offsets,
705 LLVMValueRef colors_out[4])
706 {
707 const unsigned dims = bld->dims;
708 LLVMValueRef width_vec;
709 LLVMValueRef height_vec;
710 LLVMValueRef depth_vec;
711 LLVMValueRef flt_size;
712 LLVMValueRef flt_width_vec;
713 LLVMValueRef flt_height_vec;
714 LLVMValueRef flt_depth_vec;
715 LLVMValueRef x, y = NULL, z = NULL;
716
717 lp_build_extract_image_sizes(bld,
718 &bld->int_size_bld,
719 bld->int_coord_type,
720 size,
721 &width_vec, &height_vec, &depth_vec);
722
723 flt_size = lp_build_int_to_float(&bld->float_size_bld, size);
724
725 lp_build_extract_image_sizes(bld,
726 &bld->float_size_bld,
727 bld->coord_type,
728 flt_size,
729 &flt_width_vec, &flt_height_vec, &flt_depth_vec);
730
731 /*
732 * Compute integer texcoords.
733 */
734 x = lp_build_sample_wrap_nearest(bld, coords[0], width_vec,
735 flt_width_vec, offsets[0],
736 bld->static_texture_state->pot_width,
737 bld->static_sampler_state->wrap_s);
738 lp_build_name(x, "tex.x.wrapped");
739
740 if (dims >= 2) {
741 y = lp_build_sample_wrap_nearest(bld, coords[1], height_vec,
742 flt_height_vec, offsets[1],
743 bld->static_texture_state->pot_height,
744 bld->static_sampler_state->wrap_t);
745 lp_build_name(y, "tex.y.wrapped");
746
747 if (dims == 3) {
748 z = lp_build_sample_wrap_nearest(bld, coords[2], depth_vec,
749 flt_depth_vec, offsets[2],
750 bld->static_texture_state->pot_depth,
751 bld->static_sampler_state->wrap_r);
752 lp_build_name(z, "tex.z.wrapped");
753 }
754 }
755 if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
756 bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY ||
757 bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) {
758 z = coords[2];
759 lp_build_name(z, "tex.z.layer");
760 }
761
762 /*
763 * Get texture colors.
764 */
765 lp_build_sample_texel_soa(bld,
766 width_vec, height_vec, depth_vec,
767 x, y, z,
768 row_stride_vec, img_stride_vec,
769 data_ptr, mipoffsets, colors_out);
770
771 if (bld->static_sampler_state->compare_mode != PIPE_TEX_COMPARE_NONE) {
772 LLVMValueRef cmpval;
773 cmpval = lp_build_sample_comparefunc(bld, coords[4], colors_out[0]);
774 /* this is really just a AND 1.0, cmpval but llvm is clever enough */
775 colors_out[0] = lp_build_select(&bld->texel_bld, cmpval,
776 bld->texel_bld.one, bld->texel_bld.zero);
777 colors_out[1] = colors_out[2] = colors_out[3] = colors_out[0];
778 }
779
780 }
781
782
783 /**
784 * Like a lerp, but inputs are 0/~0 masks, so can simplify slightly.
785 */
786 static LLVMValueRef
787 lp_build_masklerp(struct lp_build_context *bld,
788 LLVMValueRef weight,
789 LLVMValueRef mask0,
790 LLVMValueRef mask1)
791 {
792 struct gallivm_state *gallivm = bld->gallivm;
793 LLVMBuilderRef builder = gallivm->builder;
794 LLVMValueRef weight2;
795
796 weight2 = lp_build_sub(bld, bld->one, weight);
797 weight = LLVMBuildBitCast(builder, weight,
798 lp_build_int_vec_type(gallivm, bld->type), "");
799 weight2 = LLVMBuildBitCast(builder, weight2,
800 lp_build_int_vec_type(gallivm, bld->type), "");
801 weight = LLVMBuildAnd(builder, weight, mask1, "");
802 weight2 = LLVMBuildAnd(builder, weight2, mask0, "");
803 weight = LLVMBuildBitCast(builder, weight, bld->vec_type, "");
804 weight2 = LLVMBuildBitCast(builder, weight2, bld->vec_type, "");
805 return lp_build_add(bld, weight, weight2);
806 }
807
808 /**
809 * Like a 2d lerp, but inputs are 0/~0 masks, so can simplify slightly.
810 */
811 static LLVMValueRef
812 lp_build_masklerp2d(struct lp_build_context *bld,
813 LLVMValueRef weight0,
814 LLVMValueRef weight1,
815 LLVMValueRef mask00,
816 LLVMValueRef mask01,
817 LLVMValueRef mask10,
818 LLVMValueRef mask11)
819 {
820 LLVMValueRef val0 = lp_build_masklerp(bld, weight0, mask00, mask01);
821 LLVMValueRef val1 = lp_build_masklerp(bld, weight0, mask10, mask11);
822 return lp_build_lerp(bld, weight1, val0, val1, 0);
823 }
824
825 /**
826 * Generate code to sample a mipmap level with linear filtering.
827 * If sampling a cube texture, r = cube face in [0,5].
828 * If linear_mask is present, only pixels having their mask set
829 * will receive linear filtering, the rest will use nearest.
830 */
831 static void
832 lp_build_sample_image_linear(struct lp_build_sample_context *bld,
833 LLVMValueRef size,
834 LLVMValueRef linear_mask,
835 LLVMValueRef row_stride_vec,
836 LLVMValueRef img_stride_vec,
837 LLVMValueRef data_ptr,
838 LLVMValueRef mipoffsets,
839 LLVMValueRef *coords,
840 const LLVMValueRef *offsets,
841 LLVMValueRef colors_out[4])
842 {
843 const unsigned dims = bld->dims;
844 LLVMValueRef width_vec;
845 LLVMValueRef height_vec;
846 LLVMValueRef depth_vec;
847 LLVMValueRef flt_size;
848 LLVMValueRef flt_width_vec;
849 LLVMValueRef flt_height_vec;
850 LLVMValueRef flt_depth_vec;
851 LLVMValueRef x0, y0 = NULL, z0 = NULL, x1, y1 = NULL, z1 = NULL;
852 LLVMValueRef s_fpart, t_fpart = NULL, r_fpart = NULL;
853 LLVMValueRef neighbors[2][2][4];
854 int chan;
855
856 lp_build_extract_image_sizes(bld,
857 &bld->int_size_bld,
858 bld->int_coord_type,
859 size,
860 &width_vec, &height_vec, &depth_vec);
861
862 flt_size = lp_build_int_to_float(&bld->float_size_bld, size);
863
864 lp_build_extract_image_sizes(bld,
865 &bld->float_size_bld,
866 bld->coord_type,
867 flt_size,
868 &flt_width_vec, &flt_height_vec, &flt_depth_vec);
869
870 /*
871 * Compute integer texcoords.
872 */
873 lp_build_sample_wrap_linear(bld, coords[0], width_vec,
874 flt_width_vec, offsets[0],
875 bld->static_texture_state->pot_width,
876 bld->static_sampler_state->wrap_s,
877 &x0, &x1, &s_fpart);
878 lp_build_name(x0, "tex.x0.wrapped");
879 lp_build_name(x1, "tex.x1.wrapped");
880
881 if (dims >= 2) {
882 lp_build_sample_wrap_linear(bld, coords[1], height_vec,
883 flt_height_vec, offsets[1],
884 bld->static_texture_state->pot_height,
885 bld->static_sampler_state->wrap_t,
886 &y0, &y1, &t_fpart);
887 lp_build_name(y0, "tex.y0.wrapped");
888 lp_build_name(y1, "tex.y1.wrapped");
889
890 if (dims == 3) {
891 lp_build_sample_wrap_linear(bld, coords[2], depth_vec,
892 flt_depth_vec, offsets[2],
893 bld->static_texture_state->pot_depth,
894 bld->static_sampler_state->wrap_r,
895 &z0, &z1, &r_fpart);
896 lp_build_name(z0, "tex.z0.wrapped");
897 lp_build_name(z1, "tex.z1.wrapped");
898 }
899 }
900 if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
901 bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY ||
902 bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) {
903 z0 = z1 = coords[2]; /* cube face or layer */
904 lp_build_name(z0, "tex.z0.layer");
905 lp_build_name(z1, "tex.z1.layer");
906 }
907
908 if (linear_mask) {
909 /*
910 * Whack filter weights into place. Whatever pixel had more weight is
911 * the one which should have been selected by nearest filtering hence
912 * just use 100% weight for it.
913 */
914 struct lp_build_context *c_bld = &bld->coord_bld;
915 LLVMValueRef w1_mask, w1_weight;
916 LLVMValueRef half = lp_build_const_vec(bld->gallivm, c_bld->type, 0.5f);
917
918 w1_mask = lp_build_cmp(c_bld, PIPE_FUNC_GREATER, s_fpart, half);
919 /* this select is really just a "and" */
920 w1_weight = lp_build_select(c_bld, w1_mask, c_bld->one, c_bld->zero);
921 s_fpart = lp_build_select(c_bld, linear_mask, s_fpart, w1_weight);
922 if (dims >= 2) {
923 w1_mask = lp_build_cmp(c_bld, PIPE_FUNC_GREATER, t_fpart, half);
924 w1_weight = lp_build_select(c_bld, w1_mask, c_bld->one, c_bld->zero);
925 t_fpart = lp_build_select(c_bld, linear_mask, t_fpart, w1_weight);
926 if (dims == 3) {
927 w1_mask = lp_build_cmp(c_bld, PIPE_FUNC_GREATER, r_fpart, half);
928 w1_weight = lp_build_select(c_bld, w1_mask, c_bld->one, c_bld->zero);
929 r_fpart = lp_build_select(c_bld, linear_mask, r_fpart, w1_weight);
930 }
931 }
932 }
933
934 /*
935 * Get texture colors.
936 */
937 /* get x0/x1 texels */
938 lp_build_sample_texel_soa(bld,
939 width_vec, height_vec, depth_vec,
940 x0, y0, z0,
941 row_stride_vec, img_stride_vec,
942 data_ptr, mipoffsets, neighbors[0][0]);
943 lp_build_sample_texel_soa(bld,
944 width_vec, height_vec, depth_vec,
945 x1, y0, z0,
946 row_stride_vec, img_stride_vec,
947 data_ptr, mipoffsets, neighbors[0][1]);
948
949 if (dims == 1) {
950 if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
951 /* Interpolate two samples from 1D image to produce one color */
952 for (chan = 0; chan < 4; chan++) {
953 colors_out[chan] = lp_build_lerp(&bld->texel_bld, s_fpart,
954 neighbors[0][0][chan],
955 neighbors[0][1][chan],
956 0);
957 }
958 }
959 else {
960 LLVMValueRef cmpval0, cmpval1;
961 cmpval0 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][0][0]);
962 cmpval1 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
963 /* simplified lerp, AND mask with weight and add */
964 colors_out[0] = lp_build_masklerp(&bld->texel_bld, s_fpart,
965 cmpval0, cmpval1);
966 colors_out[1] = colors_out[2] = colors_out[3] = colors_out[0];
967 }
968 }
969 else {
970 /* 2D/3D texture */
971 LLVMValueRef colors0[4];
972
973 /* get x0/x1 texels at y1 */
974 lp_build_sample_texel_soa(bld,
975 width_vec, height_vec, depth_vec,
976 x0, y1, z0,
977 row_stride_vec, img_stride_vec,
978 data_ptr, mipoffsets, neighbors[1][0]);
979 lp_build_sample_texel_soa(bld,
980 width_vec, height_vec, depth_vec,
981 x1, y1, z0,
982 row_stride_vec, img_stride_vec,
983 data_ptr, mipoffsets, neighbors[1][1]);
984
985 if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
986 /* Bilinear interpolate the four samples from the 2D image / 3D slice */
987 for (chan = 0; chan < 4; chan++) {
988 colors0[chan] = lp_build_lerp_2d(&bld->texel_bld,
989 s_fpart, t_fpart,
990 neighbors[0][0][chan],
991 neighbors[0][1][chan],
992 neighbors[1][0][chan],
993 neighbors[1][1][chan],
994 0);
995 }
996 }
997 else {
998 LLVMValueRef cmpval00, cmpval01, cmpval10, cmpval11;
999 cmpval00 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][0][0]);
1000 cmpval01 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
1001 cmpval10 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][0][0]);
1002 cmpval11 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][1][0]);
1003 colors0[0] = lp_build_masklerp2d(&bld->texel_bld, s_fpart, t_fpart,
1004 cmpval00, cmpval01, cmpval10, cmpval11);
1005 colors0[1] = colors0[2] = colors0[3] = colors0[0];
1006 }
1007
1008 if (dims == 3) {
1009 LLVMValueRef neighbors1[2][2][4];
1010 LLVMValueRef colors1[4];
1011
1012 /* get x0/x1/y0/y1 texels at z1 */
1013 lp_build_sample_texel_soa(bld,
1014 width_vec, height_vec, depth_vec,
1015 x0, y0, z1,
1016 row_stride_vec, img_stride_vec,
1017 data_ptr, mipoffsets, neighbors1[0][0]);
1018 lp_build_sample_texel_soa(bld,
1019 width_vec, height_vec, depth_vec,
1020 x1, y0, z1,
1021 row_stride_vec, img_stride_vec,
1022 data_ptr, mipoffsets, neighbors1[0][1]);
1023 lp_build_sample_texel_soa(bld,
1024 width_vec, height_vec, depth_vec,
1025 x0, y1, z1,
1026 row_stride_vec, img_stride_vec,
1027 data_ptr, mipoffsets, neighbors1[1][0]);
1028 lp_build_sample_texel_soa(bld,
1029 width_vec, height_vec, depth_vec,
1030 x1, y1, z1,
1031 row_stride_vec, img_stride_vec,
1032 data_ptr, mipoffsets, neighbors1[1][1]);
1033
1034 if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
1035 /* Bilinear interpolate the four samples from the second Z slice */
1036 for (chan = 0; chan < 4; chan++) {
1037 colors1[chan] = lp_build_lerp_2d(&bld->texel_bld,
1038 s_fpart, t_fpart,
1039 neighbors1[0][0][chan],
1040 neighbors1[0][1][chan],
1041 neighbors1[1][0][chan],
1042 neighbors1[1][1][chan],
1043 0);
1044 }
1045 /* Linearly interpolate the two samples from the two 3D slices */
1046 for (chan = 0; chan < 4; chan++) {
1047 colors_out[chan] = lp_build_lerp(&bld->texel_bld,
1048 r_fpart,
1049 colors0[chan], colors1[chan],
1050 0);
1051 }
1052 }
1053 else {
1054 LLVMValueRef cmpval00, cmpval01, cmpval10, cmpval11;
1055 cmpval00 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][0][0]);
1056 cmpval01 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
1057 cmpval10 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][0][0]);
1058 cmpval11 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][1][0]);
1059 colors1[0] = lp_build_masklerp2d(&bld->texel_bld, s_fpart, t_fpart,
1060 cmpval00, cmpval01, cmpval10, cmpval11);
1061 /* Linearly interpolate the two samples from the two 3D slices */
1062 colors_out[0] = lp_build_lerp(&bld->texel_bld,
1063 r_fpart,
1064 colors0[0], colors1[0],
1065 0);
1066 colors_out[1] = colors_out[2] = colors_out[3] = colors_out[0];
1067 }
1068 }
1069 else {
1070 /* 2D tex */
1071 for (chan = 0; chan < 4; chan++) {
1072 colors_out[chan] = colors0[chan];
1073 }
1074 }
1075 }
1076 }
1077
1078
1079 /**
1080 * Sample the texture/mipmap using given image filter and mip filter.
1081 * ilevel0 and ilevel1 indicate the two mipmap levels to sample
1082 * from (vectors or scalars).
1083 * If we're using nearest miplevel sampling the '1' values will be null/unused.
1084 */
1085 static void
1086 lp_build_sample_mipmap(struct lp_build_sample_context *bld,
1087 unsigned img_filter,
1088 unsigned mip_filter,
1089 LLVMValueRef *coords,
1090 const LLVMValueRef *offsets,
1091 LLVMValueRef ilevel0,
1092 LLVMValueRef ilevel1,
1093 LLVMValueRef lod_fpart,
1094 LLVMValueRef *colors_out)
1095 {
1096 LLVMBuilderRef builder = bld->gallivm->builder;
1097 LLVMValueRef size0 = NULL;
1098 LLVMValueRef size1 = NULL;
1099 LLVMValueRef row_stride0_vec = NULL;
1100 LLVMValueRef row_stride1_vec = NULL;
1101 LLVMValueRef img_stride0_vec = NULL;
1102 LLVMValueRef img_stride1_vec = NULL;
1103 LLVMValueRef data_ptr0 = NULL;
1104 LLVMValueRef data_ptr1 = NULL;
1105 LLVMValueRef mipoff0 = NULL;
1106 LLVMValueRef mipoff1 = NULL;
1107 LLVMValueRef colors0[4], colors1[4];
1108 unsigned chan;
1109
1110 /* sample the first mipmap level */
1111 lp_build_mipmap_level_sizes(bld, ilevel0,
1112 &size0,
1113 &row_stride0_vec, &img_stride0_vec);
1114 if (bld->num_mips == 1) {
1115 data_ptr0 = lp_build_get_mipmap_level(bld, ilevel0);
1116 }
1117 else {
1118 /* This path should work for num_lods 1 too but slightly less efficient */
1119 data_ptr0 = bld->base_ptr;
1120 mipoff0 = lp_build_get_mip_offsets(bld, ilevel0);
1121 }
1122 if (img_filter == PIPE_TEX_FILTER_NEAREST) {
1123 lp_build_sample_image_nearest(bld, size0,
1124 row_stride0_vec, img_stride0_vec,
1125 data_ptr0, mipoff0, coords, offsets,
1126 colors0);
1127 }
1128 else {
1129 assert(img_filter == PIPE_TEX_FILTER_LINEAR);
1130 lp_build_sample_image_linear(bld, size0, NULL,
1131 row_stride0_vec, img_stride0_vec,
1132 data_ptr0, mipoff0, coords, offsets,
1133 colors0);
1134 }
1135
1136 /* Store the first level's colors in the output variables */
1137 for (chan = 0; chan < 4; chan++) {
1138 LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
1139 }
1140
1141 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
1142 struct lp_build_if_state if_ctx;
1143 LLVMValueRef need_lerp;
1144
1145 /* need_lerp = lod_fpart > 0 */
1146 if (bld->num_lods == 1) {
1147 need_lerp = LLVMBuildFCmp(builder, LLVMRealUGT,
1148 lod_fpart, bld->lodf_bld.zero,
1149 "need_lerp");
1150 }
1151 else {
1152 /*
1153 * We'll do mip filtering if any of the quads (or individual
1154 * pixel in case of per-pixel lod) need it.
1155 * It might be better to split the vectors here and only fetch/filter
1156 * quads which need it (if there's one lod per quad).
1157 */
1158 need_lerp = lp_build_compare(bld->gallivm, bld->lodf_bld.type,
1159 PIPE_FUNC_GREATER,
1160 lod_fpart, bld->lodf_bld.zero);
1161 need_lerp = lp_build_any_true_range(&bld->lodi_bld, bld->num_lods, need_lerp);
1162 }
1163
1164 lp_build_if(&if_ctx, bld->gallivm, need_lerp);
1165 {
1166 /*
1167 * We unfortunately need to clamp lod_fpart here since we can get
1168 * negative values which would screw up filtering if not all
1169 * lod_fpart values have same sign.
1170 */
1171 lod_fpart = lp_build_max(&bld->lodf_bld, lod_fpart,
1172 bld->lodf_bld.zero);
1173 /* sample the second mipmap level */
1174 lp_build_mipmap_level_sizes(bld, ilevel1,
1175 &size1,
1176 &row_stride1_vec, &img_stride1_vec);
1177 if (bld->num_mips == 1) {
1178 data_ptr1 = lp_build_get_mipmap_level(bld, ilevel1);
1179 }
1180 else {
1181 data_ptr1 = bld->base_ptr;
1182 mipoff1 = lp_build_get_mip_offsets(bld, ilevel1);
1183 }
1184 if (img_filter == PIPE_TEX_FILTER_NEAREST) {
1185 lp_build_sample_image_nearest(bld, size1,
1186 row_stride1_vec, img_stride1_vec,
1187 data_ptr1, mipoff1, coords, offsets,
1188 colors1);
1189 }
1190 else {
1191 lp_build_sample_image_linear(bld, size1, NULL,
1192 row_stride1_vec, img_stride1_vec,
1193 data_ptr1, mipoff1, coords, offsets,
1194 colors1);
1195 }
1196
1197 /* interpolate samples from the two mipmap levels */
1198
1199 if (bld->num_lods != bld->coord_type.length)
1200 lod_fpart = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
1201 bld->lodf_bld.type,
1202 bld->texel_bld.type,
1203 lod_fpart);
1204
1205 for (chan = 0; chan < 4; chan++) {
1206 colors0[chan] = lp_build_lerp(&bld->texel_bld, lod_fpart,
1207 colors0[chan], colors1[chan],
1208 0);
1209 LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
1210 }
1211 }
1212 lp_build_endif(&if_ctx);
1213 }
1214 }
1215
1216
1217 /**
1218 * Sample the texture/mipmap using given mip filter, and using
1219 * both nearest and linear filtering at the same time depending
1220 * on linear_mask.
1221 * lod can be per quad but linear_mask is always per pixel.
1222 * ilevel0 and ilevel1 indicate the two mipmap levels to sample
1223 * from (vectors or scalars).
1224 * If we're using nearest miplevel sampling the '1' values will be null/unused.
1225 */
1226 static void
1227 lp_build_sample_mipmap_both(struct lp_build_sample_context *bld,
1228 LLVMValueRef linear_mask,
1229 unsigned mip_filter,
1230 LLVMValueRef *coords,
1231 const LLVMValueRef *offsets,
1232 LLVMValueRef ilevel0,
1233 LLVMValueRef ilevel1,
1234 LLVMValueRef lod_fpart,
1235 LLVMValueRef lod_positive,
1236 LLVMValueRef *colors_out)
1237 {
1238 LLVMBuilderRef builder = bld->gallivm->builder;
1239 LLVMValueRef size0 = NULL;
1240 LLVMValueRef size1 = NULL;
1241 LLVMValueRef row_stride0_vec = NULL;
1242 LLVMValueRef row_stride1_vec = NULL;
1243 LLVMValueRef img_stride0_vec = NULL;
1244 LLVMValueRef img_stride1_vec = NULL;
1245 LLVMValueRef data_ptr0 = NULL;
1246 LLVMValueRef data_ptr1 = NULL;
1247 LLVMValueRef mipoff0 = NULL;
1248 LLVMValueRef mipoff1 = NULL;
1249 LLVMValueRef colors0[4], colors1[4];
1250 unsigned chan;
1251
1252 /* sample the first mipmap level */
1253 lp_build_mipmap_level_sizes(bld, ilevel0,
1254 &size0,
1255 &row_stride0_vec, &img_stride0_vec);
1256 if (bld->num_mips == 1) {
1257 data_ptr0 = lp_build_get_mipmap_level(bld, ilevel0);
1258 }
1259 else {
1260 /* This path should work for num_lods 1 too but slightly less efficient */
1261 data_ptr0 = bld->base_ptr;
1262 mipoff0 = lp_build_get_mip_offsets(bld, ilevel0);
1263 }
1264
1265 lp_build_sample_image_linear(bld, size0, linear_mask,
1266 row_stride0_vec, img_stride0_vec,
1267 data_ptr0, mipoff0, coords, offsets,
1268 colors0);
1269
1270 /* Store the first level's colors in the output variables */
1271 for (chan = 0; chan < 4; chan++) {
1272 LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
1273 }
1274
1275 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
1276 struct lp_build_if_state if_ctx;
1277 LLVMValueRef need_lerp;
1278
1279 /*
1280 * We'll do mip filtering if any of the quads (or individual
1281 * pixel in case of per-pixel lod) need it.
1282 * Note using lod_positive here not lod_fpart since it may be the same
1283 * condition as that used in the outer "if" in the caller hence llvm
1284 * should be able to merge the branches in this case.
1285 */
1286 need_lerp = lp_build_any_true_range(&bld->lodi_bld, bld->num_lods, lod_positive);
1287
1288 lp_build_if(&if_ctx, bld->gallivm, need_lerp);
1289 {
1290 /*
1291 * We unfortunately need to clamp lod_fpart here since we can get
1292 * negative values which would screw up filtering if not all
1293 * lod_fpart values have same sign.
1294 */
1295 lod_fpart = lp_build_max(&bld->lodf_bld, lod_fpart,
1296 bld->lodf_bld.zero);
1297 /* sample the second mipmap level */
1298 lp_build_mipmap_level_sizes(bld, ilevel1,
1299 &size1,
1300 &row_stride1_vec, &img_stride1_vec);
1301 if (bld->num_mips == 1) {
1302 data_ptr1 = lp_build_get_mipmap_level(bld, ilevel1);
1303 }
1304 else {
1305 data_ptr1 = bld->base_ptr;
1306 mipoff1 = lp_build_get_mip_offsets(bld, ilevel1);
1307 }
1308
1309 lp_build_sample_image_linear(bld, size1, linear_mask,
1310 row_stride1_vec, img_stride1_vec,
1311 data_ptr1, mipoff1, coords, offsets,
1312 colors1);
1313
1314 /* interpolate samples from the two mipmap levels */
1315
1316 if (bld->num_lods != bld->coord_type.length)
1317 lod_fpart = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
1318 bld->lodf_bld.type,
1319 bld->texel_bld.type,
1320 lod_fpart);
1321
1322 for (chan = 0; chan < 4; chan++) {
1323 colors0[chan] = lp_build_lerp(&bld->texel_bld, lod_fpart,
1324 colors0[chan], colors1[chan],
1325 0);
1326 LLVMBuildStore(builder, colors0[chan], colors_out[chan]);
1327 }
1328 }
1329 lp_build_endif(&if_ctx);
1330 }
1331 }
1332
1333
1334 /**
1335 * Build (per-coord) layer value.
1336 * Either clamp layer to valid values or fill in optional out_of_bounds
1337 * value and just return value unclamped.
1338 */
1339 static LLVMValueRef
1340 lp_build_layer_coord(struct lp_build_sample_context *bld,
1341 unsigned texture_unit,
1342 LLVMValueRef layer,
1343 LLVMValueRef *out_of_bounds)
1344 {
1345 LLVMValueRef num_layers;
1346 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
1347
1348 num_layers = bld->dynamic_state->depth(bld->dynamic_state,
1349 bld->gallivm, texture_unit);
1350
1351 if (out_of_bounds) {
1352 LLVMValueRef out1, out;
1353 num_layers = lp_build_broadcast_scalar(int_coord_bld, num_layers);
1354 out = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, layer, int_coord_bld->zero);
1355 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, layer, num_layers);
1356 *out_of_bounds = lp_build_or(int_coord_bld, out, out1);
1357 return layer;
1358 }
1359 else {
1360 LLVMValueRef maxlayer;
1361 maxlayer = lp_build_sub(&bld->int_bld, num_layers, bld->int_bld.one);
1362 maxlayer = lp_build_broadcast_scalar(int_coord_bld, maxlayer);
1363 return lp_build_clamp(int_coord_bld, layer, int_coord_bld->zero, maxlayer);
1364 }
1365 }
1366
1367
1368 /**
1369 * Calculate cube face, lod, mip levels.
1370 */
1371 static void
1372 lp_build_sample_common(struct lp_build_sample_context *bld,
1373 unsigned texture_index,
1374 unsigned sampler_index,
1375 LLVMValueRef *coords,
1376 const struct lp_derivatives *derivs, /* optional */
1377 LLVMValueRef lod_bias, /* optional */
1378 LLVMValueRef explicit_lod, /* optional */
1379 LLVMValueRef *lod_pos_or_zero,
1380 LLVMValueRef *lod_fpart,
1381 LLVMValueRef *ilevel0,
1382 LLVMValueRef *ilevel1)
1383 {
1384 const unsigned mip_filter = bld->static_sampler_state->min_mip_filter;
1385 const unsigned min_filter = bld->static_sampler_state->min_img_filter;
1386 const unsigned mag_filter = bld->static_sampler_state->mag_img_filter;
1387 const unsigned target = bld->static_texture_state->target;
1388 LLVMValueRef first_level, cube_rho = NULL;
1389 LLVMValueRef lod_ipart = NULL;
1390
1391 /*
1392 printf("%s mip %d min %d mag %d\n", __FUNCTION__,
1393 mip_filter, min_filter, mag_filter);
1394 */
1395
1396 /*
1397 * Choose cube face, recompute texcoords for the chosen face and
1398 * compute rho here too (as it requires transform of derivatives).
1399 */
1400 if (target == PIPE_TEXTURE_CUBE) {
1401 boolean need_derivs;
1402 need_derivs = ((min_filter != mag_filter ||
1403 mip_filter != PIPE_TEX_MIPFILTER_NONE) &&
1404 !bld->static_sampler_state->min_max_lod_equal &&
1405 !explicit_lod);
1406 lp_build_cube_lookup(bld, coords, derivs, &cube_rho, need_derivs);
1407 }
1408 else if (target == PIPE_TEXTURE_1D_ARRAY ||
1409 target == PIPE_TEXTURE_2D_ARRAY) {
1410 coords[2] = lp_build_iround(&bld->coord_bld, coords[2]);
1411 coords[2] = lp_build_layer_coord(bld, texture_index, coords[2], NULL);
1412 }
1413
1414 if (bld->static_sampler_state->compare_mode != PIPE_TEX_COMPARE_NONE) {
1415 /*
1416 * Clamp p coords to [0,1] for fixed function depth texture format here.
1417 * Technically this is not entirely correct for unorm depth as the ref value
1418 * should be converted to the depth format (quantization!) and comparison
1419 * then done in texture format. This would actually help performance (since
1420 * only need to do it once and could save the per-sample conversion of texels
1421 * to floats instead), but it would need more messy code (would need to push
1422 * at least some bits down to actual fetch so conversion could be skipped,
1423 * and would have ugly interaction with border color, would need to convert
1424 * border color to that format too or do some other tricks to make it work).
1425 */
1426 const struct util_format_description *format_desc = bld->format_desc;
1427 unsigned chan_type;
1428 /* not entirely sure we couldn't end up with non-valid swizzle here */
1429 chan_type = format_desc->swizzle[0] <= UTIL_FORMAT_SWIZZLE_W ?
1430 format_desc->channel[format_desc->swizzle[0]].type :
1431 UTIL_FORMAT_TYPE_FLOAT;
1432 if (chan_type != UTIL_FORMAT_TYPE_FLOAT) {
1433 coords[4] = lp_build_clamp(&bld->coord_bld, coords[4],
1434 bld->coord_bld.zero, bld->coord_bld.one);
1435 }
1436 }
1437
1438 /*
1439 * Compute the level of detail (float).
1440 */
1441 if (min_filter != mag_filter ||
1442 mip_filter != PIPE_TEX_MIPFILTER_NONE) {
1443 /* Need to compute lod either to choose mipmap levels or to
1444 * distinguish between minification/magnification with one mipmap level.
1445 */
1446 lp_build_lod_selector(bld, texture_index, sampler_index,
1447 coords[0], coords[1], coords[2], cube_rho,
1448 derivs, lod_bias, explicit_lod,
1449 mip_filter,
1450 &lod_ipart, lod_fpart, lod_pos_or_zero);
1451 } else {
1452 lod_ipart = bld->lodi_bld.zero;
1453 *lod_pos_or_zero = bld->lodi_bld.zero;
1454 }
1455
1456 if (bld->num_lods != bld->num_mips) {
1457 /* only makes sense if there's just a single mip level */
1458 assert(bld->num_mips == 1);
1459 lod_ipart = lp_build_extract_range(bld->gallivm, lod_ipart, 0, 1);
1460 }
1461
1462 /*
1463 * Compute integer mipmap level(s) to fetch texels from: ilevel0, ilevel1
1464 */
1465 switch (mip_filter) {
1466 default:
1467 assert(0 && "bad mip_filter value in lp_build_sample_soa()");
1468 /* fall-through */
1469 case PIPE_TEX_MIPFILTER_NONE:
1470 /* always use mip level 0 */
1471 if (HAVE_LLVM == 0x0207 && target == PIPE_TEXTURE_CUBE) {
1472 /* XXX this is a work-around for an apparent bug in LLVM 2.7.
1473 * We should be able to set ilevel0 = const(0) but that causes
1474 * bad x86 code to be emitted.
1475 */
1476 assert(lod_ipart);
1477 lp_build_nearest_mip_level(bld, texture_index, lod_ipart, ilevel0, NULL);
1478 }
1479 else {
1480 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
1481 bld->gallivm, texture_index);
1482 first_level = lp_build_broadcast_scalar(&bld->leveli_bld, first_level);
1483 *ilevel0 = first_level;
1484 }
1485 break;
1486 case PIPE_TEX_MIPFILTER_NEAREST:
1487 assert(lod_ipart);
1488 lp_build_nearest_mip_level(bld, texture_index, lod_ipart, ilevel0, NULL);
1489 break;
1490 case PIPE_TEX_MIPFILTER_LINEAR:
1491 assert(lod_ipart);
1492 assert(*lod_fpart);
1493 lp_build_linear_mip_levels(bld, texture_index,
1494 lod_ipart, lod_fpart,
1495 ilevel0, ilevel1);
1496 break;
1497 }
1498 }
1499
1500 static void
1501 lp_build_clamp_border_color(struct lp_build_sample_context *bld,
1502 unsigned sampler_unit)
1503 {
1504 struct gallivm_state *gallivm = bld->gallivm;
1505 LLVMBuilderRef builder = gallivm->builder;
1506 LLVMValueRef border_color_ptr =
1507 bld->dynamic_state->border_color(bld->dynamic_state,
1508 gallivm, sampler_unit);
1509 LLVMValueRef border_color;
1510 const struct util_format_description *format_desc = bld->format_desc;
1511 struct lp_type vec4_type = bld->texel_type;
1512 struct lp_build_context vec4_bld;
1513 LLVMValueRef min_clamp = NULL;
1514 LLVMValueRef max_clamp = NULL;
1515
1516 /*
1517 * For normalized format need to clamp border color (technically
1518 * probably should also quantize the data). Really sucks doing this
1519 * here but can't avoid at least for now since this is part of
1520 * sampler state and texture format is part of sampler_view state.
1521 * GL expects also expects clamping for uint/sint formats too so
1522 * do that as well (d3d10 can't end up here with uint/sint since it
1523 * only supports them with ld).
1524 */
1525 vec4_type.length = 4;
1526 lp_build_context_init(&vec4_bld, gallivm, vec4_type);
1527
1528 /*
1529 * Vectorized clamping of border color. Loading is a bit of a hack since
1530 * we just cast the pointer to float array to pointer to vec4
1531 * (int or float).
1532 */
1533 border_color_ptr = lp_build_array_get_ptr(gallivm, border_color_ptr,
1534 lp_build_const_int32(gallivm, 0));
1535 border_color_ptr = LLVMBuildBitCast(builder, border_color_ptr,
1536 LLVMPointerType(vec4_bld.vec_type, 0), "");
1537 border_color = LLVMBuildLoad(builder, border_color_ptr, "");
1538 /* we don't have aligned type in the dynamic state unfortunately */
1539 lp_set_load_alignment(border_color, 4);
1540
1541 /*
1542 * Instead of having some incredibly complex logic which will try to figure out
1543 * clamping necessary for each channel, simply use the first channel, and treat
1544 * mixed signed/unsigned normalized formats specially.
1545 * (Mixed non-normalized, which wouldn't work at all here, do not exist for a
1546 * good reason.)
1547 */
1548 if (format_desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
1549 int chan;
1550 /* d/s needs special handling because both present means just sampling depth */
1551 if (util_format_is_depth_and_stencil(format_desc->format)) {
1552 chan = format_desc->swizzle[0];
1553 }
1554 else {
1555 chan = util_format_get_first_non_void_channel(format_desc->format);
1556 }
1557 if (chan >= 0 && chan <= UTIL_FORMAT_SWIZZLE_W) {
1558 unsigned chan_type = format_desc->channel[chan].type;
1559 unsigned chan_norm = format_desc->channel[chan].normalized;
1560 unsigned chan_pure = format_desc->channel[chan].pure_integer;
1561 if (chan_type == UTIL_FORMAT_TYPE_SIGNED) {
1562 if (chan_norm) {
1563 min_clamp = lp_build_const_vec(gallivm, vec4_type, -1.0F);
1564 max_clamp = vec4_bld.one;
1565 }
1566 else if (chan_pure) {
1567 /*
1568 * Border color was stored as int, hence need min/max clamp
1569 * only if chan has less than 32 bits..
1570 */
1571 unsigned chan_size = format_desc->channel[chan].size;
1572 if (chan_size < 32) {
1573 min_clamp = lp_build_const_int_vec(gallivm, vec4_type,
1574 0 - (1 << (chan_size - 1)));
1575 max_clamp = lp_build_const_int_vec(gallivm, vec4_type,
1576 (1 << (chan_size - 1)) - 1);
1577 }
1578 }
1579 /* TODO: no idea about non-pure, non-normalized! */
1580 }
1581 else if (chan_type == UTIL_FORMAT_TYPE_UNSIGNED) {
1582 if (chan_norm) {
1583 min_clamp = vec4_bld.zero;
1584 max_clamp = vec4_bld.one;
1585 }
1586 /*
1587 * Need a ugly hack here, because we don't have Z32_FLOAT_X8X24
1588 * we use Z32_FLOAT_S8X24 to imply sampling depth component
1589 * and ignoring stencil, which will blow up here if we try to
1590 * do a uint clamp in a float texel build...
1591 * And even if we had that format, mesa st also thinks using z24s8
1592 * means depth sampling ignoring stencil.
1593 */
1594 else if (chan_pure) {
1595 /*
1596 * Border color was stored as uint, hence never need min
1597 * clamp, and only need max clamp if chan has less than 32 bits.
1598 */
1599 unsigned chan_size = format_desc->channel[chan].size;
1600 if (chan_size < 32) {
1601 max_clamp = lp_build_const_int_vec(gallivm, vec4_type,
1602 (1 << chan_size) - 1);
1603 }
1604 /* TODO: no idea about non-pure, non-normalized! */
1605 }
1606 }
1607 else if (chan_type == UTIL_FORMAT_TYPE_FIXED) {
1608 /* TODO: I have no idea what clamp this would need if any! */
1609 }
1610 }
1611 /* mixed plain formats (or different pure size) */
1612 switch (format_desc->format) {
1613 case PIPE_FORMAT_B10G10R10A2_UINT:
1614 case PIPE_FORMAT_R10G10B10A2_UINT:
1615 {
1616 unsigned max10 = (1 << 10) - 1;
1617 max_clamp = lp_build_const_aos(gallivm, vec4_type, max10, max10,
1618 max10, (1 << 2) - 1, NULL);
1619 }
1620 break;
1621 case PIPE_FORMAT_R10SG10SB10SA2U_NORM:
1622 min_clamp = lp_build_const_aos(gallivm, vec4_type, -1.0F, -1.0F,
1623 -1.0F, 0.0F, NULL);
1624 max_clamp = vec4_bld.one;
1625 break;
1626 case PIPE_FORMAT_R8SG8SB8UX8U_NORM:
1627 case PIPE_FORMAT_R5SG5SB6U_NORM:
1628 min_clamp = lp_build_const_aos(gallivm, vec4_type, -1.0F, -1.0F,
1629 0.0F, 0.0F, NULL);
1630 max_clamp = vec4_bld.one;
1631 break;
1632 default:
1633 break;
1634 }
1635 }
1636 else {
1637 /* cannot figure this out from format description */
1638 if (format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
1639 /* s3tc formats are always unorm */
1640 min_clamp = vec4_bld.zero;
1641 max_clamp = vec4_bld.one;
1642 }
1643 else if (format_desc->layout == UTIL_FORMAT_LAYOUT_RGTC ||
1644 format_desc->layout == UTIL_FORMAT_LAYOUT_ETC) {
1645 switch (format_desc->format) {
1646 case PIPE_FORMAT_RGTC1_UNORM:
1647 case PIPE_FORMAT_RGTC2_UNORM:
1648 case PIPE_FORMAT_LATC1_UNORM:
1649 case PIPE_FORMAT_LATC2_UNORM:
1650 case PIPE_FORMAT_ETC1_RGB8:
1651 min_clamp = vec4_bld.zero;
1652 max_clamp = vec4_bld.one;
1653 break;
1654 case PIPE_FORMAT_RGTC1_SNORM:
1655 case PIPE_FORMAT_RGTC2_SNORM:
1656 case PIPE_FORMAT_LATC1_SNORM:
1657 case PIPE_FORMAT_LATC2_SNORM:
1658 min_clamp = lp_build_const_vec(gallivm, vec4_type, -1.0F);
1659 max_clamp = vec4_bld.one;
1660 break;
1661 default:
1662 assert(0);
1663 break;
1664 }
1665 }
1666 /*
1667 * all others from subsampled/other group, though we don't care
1668 * about yuv (and should not have any from zs here)
1669 */
1670 else if (format_desc->colorspace != UTIL_FORMAT_COLORSPACE_YUV){
1671 switch (format_desc->format) {
1672 case PIPE_FORMAT_R8G8_B8G8_UNORM:
1673 case PIPE_FORMAT_G8R8_G8B8_UNORM:
1674 case PIPE_FORMAT_G8R8_B8R8_UNORM:
1675 case PIPE_FORMAT_R8G8_R8B8_UNORM:
1676 case PIPE_FORMAT_R1_UNORM: /* doesn't make sense but ah well */
1677 min_clamp = vec4_bld.zero;
1678 max_clamp = vec4_bld.one;
1679 break;
1680 case PIPE_FORMAT_R8G8Bx_SNORM:
1681 min_clamp = lp_build_const_vec(gallivm, vec4_type, -1.0F);
1682 max_clamp = vec4_bld.one;
1683 break;
1684 /*
1685 * Note smallfloat formats usually don't need clamping
1686 * (they still have infinite range) however this is not
1687 * true for r11g11b10 and r9g9b9e5, which can't represent
1688 * negative numbers (and additionally r9g9b9e5 can't represent
1689 * very large numbers). d3d10 seems happy without clamping in
1690 * this case, but gl spec is pretty clear: "for floating
1691 * point and integer formats, border values are clamped to
1692 * the representable range of the format" so do that here.
1693 */
1694 case PIPE_FORMAT_R11G11B10_FLOAT:
1695 min_clamp = vec4_bld.zero;
1696 break;
1697 case PIPE_FORMAT_R9G9B9E5_FLOAT:
1698 min_clamp = vec4_bld.zero;
1699 max_clamp = lp_build_const_vec(gallivm, vec4_type, MAX_RGB9E5);
1700 break;
1701 default:
1702 assert(0);
1703 break;
1704 }
1705 }
1706 }
1707
1708 if (min_clamp) {
1709 border_color = lp_build_max(&vec4_bld, border_color, min_clamp);
1710 }
1711 if (max_clamp) {
1712 border_color = lp_build_min(&vec4_bld, border_color, max_clamp);
1713 }
1714
1715 bld->border_color_clamped = border_color;
1716 }
1717
1718
1719 /**
1720 * General texture sampling codegen.
1721 * This function handles texture sampling for all texture targets (1D,
1722 * 2D, 3D, cube) and all filtering modes.
1723 */
1724 static void
1725 lp_build_sample_general(struct lp_build_sample_context *bld,
1726 unsigned sampler_unit,
1727 LLVMValueRef *coords,
1728 const LLVMValueRef *offsets,
1729 LLVMValueRef lod_positive,
1730 LLVMValueRef lod_fpart,
1731 LLVMValueRef ilevel0,
1732 LLVMValueRef ilevel1,
1733 LLVMValueRef *colors_out)
1734 {
1735 LLVMBuilderRef builder = bld->gallivm->builder;
1736 const struct lp_static_sampler_state *sampler_state = bld->static_sampler_state;
1737 const unsigned mip_filter = sampler_state->min_mip_filter;
1738 const unsigned min_filter = sampler_state->min_img_filter;
1739 const unsigned mag_filter = sampler_state->mag_img_filter;
1740 LLVMValueRef texels[4];
1741 unsigned chan;
1742
1743 /* if we need border color, (potentially) clamp it now */
1744 if (lp_sampler_wrap_mode_uses_border_color(sampler_state->wrap_s,
1745 min_filter,
1746 mag_filter) ||
1747 (bld->dims > 1 &&
1748 lp_sampler_wrap_mode_uses_border_color(sampler_state->wrap_t,
1749 min_filter,
1750 mag_filter)) ||
1751 (bld->dims > 2 &&
1752 lp_sampler_wrap_mode_uses_border_color(sampler_state->wrap_r,
1753 min_filter,
1754 mag_filter))) {
1755 lp_build_clamp_border_color(bld, sampler_unit);
1756 }
1757
1758
1759 /*
1760 * Get/interpolate texture colors.
1761 */
1762
1763 for (chan = 0; chan < 4; ++chan) {
1764 texels[chan] = lp_build_alloca(bld->gallivm, bld->texel_bld.vec_type, "");
1765 lp_build_name(texels[chan], "sampler%u_texel_%c_var", sampler_unit, "xyzw"[chan]);
1766 }
1767
1768 if (min_filter == mag_filter) {
1769 /* no need to distinguish between minification and magnification */
1770 lp_build_sample_mipmap(bld, min_filter, mip_filter,
1771 coords, offsets,
1772 ilevel0, ilevel1, lod_fpart,
1773 texels);
1774 }
1775 else {
1776 /*
1777 * Could also get rid of the if-logic and always use mipmap_both, both
1778 * for the single lod and multi-lod case if nothing really uses this.
1779 */
1780 if (bld->num_lods == 1) {
1781 /* Emit conditional to choose min image filter or mag image filter
1782 * depending on the lod being > 0 or <= 0, respectively.
1783 */
1784 struct lp_build_if_state if_ctx;
1785
1786 lod_positive = LLVMBuildTrunc(builder, lod_positive,
1787 LLVMInt1TypeInContext(bld->gallivm->context), "");
1788
1789 lp_build_if(&if_ctx, bld->gallivm, lod_positive);
1790 {
1791 /* Use the minification filter */
1792 lp_build_sample_mipmap(bld, min_filter, mip_filter,
1793 coords, offsets,
1794 ilevel0, ilevel1, lod_fpart,
1795 texels);
1796 }
1797 lp_build_else(&if_ctx);
1798 {
1799 /* Use the magnification filter */
1800 lp_build_sample_mipmap(bld, mag_filter, PIPE_TEX_MIPFILTER_NONE,
1801 coords, offsets,
1802 ilevel0, NULL, NULL,
1803 texels);
1804 }
1805 lp_build_endif(&if_ctx);
1806 }
1807 else {
1808 LLVMValueRef need_linear, linear_mask;
1809 unsigned mip_filter_for_nearest;
1810 struct lp_build_if_state if_ctx;
1811
1812 if (min_filter == PIPE_TEX_FILTER_LINEAR) {
1813 linear_mask = lod_positive;
1814 mip_filter_for_nearest = PIPE_TEX_MIPFILTER_NONE;
1815 }
1816 else {
1817 linear_mask = lp_build_not(&bld->lodi_bld, lod_positive);
1818 mip_filter_for_nearest = mip_filter;
1819 }
1820 need_linear = lp_build_any_true_range(&bld->lodi_bld, bld->num_lods,
1821 linear_mask);
1822
1823 if (bld->num_lods != bld->coord_type.length) {
1824 linear_mask = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
1825 bld->lodi_type,
1826 bld->int_coord_type,
1827 linear_mask);
1828 }
1829
1830 lp_build_if(&if_ctx, bld->gallivm, need_linear);
1831 {
1832 /*
1833 * Do sampling with both filters simultaneously. This means using
1834 * a linear filter and doing some tricks (with weights) for the pixels
1835 * which need nearest filter.
1836 * Note that it's probably rare some pixels need nearest and some
1837 * linear filter but the fixups required for the nearest pixels
1838 * aren't all that complicated so just always run a combined path
1839 * if at least some pixels require linear.
1840 */
1841 lp_build_sample_mipmap_both(bld, linear_mask, mip_filter,
1842 coords, offsets,
1843 ilevel0, ilevel1,
1844 lod_fpart, lod_positive,
1845 texels);
1846 }
1847 lp_build_else(&if_ctx);
1848 {
1849 /*
1850 * All pixels require just nearest filtering, which is way
1851 * cheaper than linear, hence do a separate path for that.
1852 */
1853 lp_build_sample_mipmap(bld, PIPE_TEX_FILTER_NEAREST,
1854 mip_filter_for_nearest,
1855 coords, offsets,
1856 ilevel0, ilevel1, lod_fpart,
1857 texels);
1858 }
1859 lp_build_endif(&if_ctx);
1860 }
1861 }
1862
1863 for (chan = 0; chan < 4; ++chan) {
1864 colors_out[chan] = LLVMBuildLoad(builder, texels[chan], "");
1865 lp_build_name(colors_out[chan], "sampler%u_texel_%c", sampler_unit, "xyzw"[chan]);
1866 }
1867 }
1868
1869
1870 /**
1871 * Texel fetch function.
1872 * In contrast to general sampling there is no filtering, no coord minification,
1873 * lod (if any) is always explicit uint, coords are uints (in terms of texel units)
1874 * directly to be applied to the selected mip level (after adding texel offsets).
1875 * This function handles texel fetch for all targets where texel fetch is supported
1876 * (no cube maps, but 1d, 2d, 3d are supported, arrays and buffers should be too).
1877 */
1878 static void
1879 lp_build_fetch_texel(struct lp_build_sample_context *bld,
1880 unsigned texture_unit,
1881 const LLVMValueRef *coords,
1882 LLVMValueRef explicit_lod,
1883 const LLVMValueRef *offsets,
1884 LLVMValueRef *colors_out)
1885 {
1886 struct lp_build_context *perquadi_bld = &bld->lodi_bld;
1887 struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
1888 unsigned dims = bld->dims, chan;
1889 unsigned target = bld->static_texture_state->target;
1890 boolean out_of_bound_ret_zero = TRUE;
1891 LLVMValueRef size, ilevel;
1892 LLVMValueRef row_stride_vec = NULL, img_stride_vec = NULL;
1893 LLVMValueRef x = coords[0], y = coords[1], z = coords[2];
1894 LLVMValueRef width, height, depth, i, j;
1895 LLVMValueRef offset, out_of_bounds, out1;
1896
1897 out_of_bounds = int_coord_bld->zero;
1898
1899 if (explicit_lod && bld->static_texture_state->target != PIPE_BUFFER) {
1900 if (bld->num_mips != int_coord_bld->type.length) {
1901 ilevel = lp_build_pack_aos_scalars(bld->gallivm, int_coord_bld->type,
1902 perquadi_bld->type, explicit_lod, 0);
1903 }
1904 else {
1905 ilevel = explicit_lod;
1906 }
1907 lp_build_nearest_mip_level(bld, texture_unit, ilevel, &ilevel,
1908 out_of_bound_ret_zero ? &out_of_bounds : NULL);
1909 }
1910 else {
1911 assert(bld->num_mips == 1);
1912 if (bld->static_texture_state->target != PIPE_BUFFER) {
1913 ilevel = bld->dynamic_state->first_level(bld->dynamic_state,
1914 bld->gallivm, texture_unit);
1915 }
1916 else {
1917 ilevel = lp_build_const_int32(bld->gallivm, 0);
1918 }
1919 }
1920 lp_build_mipmap_level_sizes(bld, ilevel,
1921 &size,
1922 &row_stride_vec, &img_stride_vec);
1923 lp_build_extract_image_sizes(bld, &bld->int_size_bld, int_coord_bld->type,
1924 size, &width, &height, &depth);
1925
1926 if (target == PIPE_TEXTURE_1D_ARRAY ||
1927 target == PIPE_TEXTURE_2D_ARRAY) {
1928 if (out_of_bound_ret_zero) {
1929 z = lp_build_layer_coord(bld, texture_unit, z, &out1);
1930 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
1931 }
1932 else {
1933 z = lp_build_layer_coord(bld, texture_unit, z, NULL);
1934 }
1935 }
1936
1937 /* This is a lot like border sampling */
1938 if (offsets[0]) {
1939 /*
1940 * coords are really unsigned, offsets are signed, but I don't think
1941 * exceeding 31 bits is possible
1942 */
1943 x = lp_build_add(int_coord_bld, x, offsets[0]);
1944 }
1945 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, x, int_coord_bld->zero);
1946 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
1947 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, x, width);
1948 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
1949
1950 if (dims >= 2) {
1951 if (offsets[1]) {
1952 y = lp_build_add(int_coord_bld, y, offsets[1]);
1953 }
1954 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, y, int_coord_bld->zero);
1955 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
1956 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, y, height);
1957 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
1958
1959 if (dims >= 3) {
1960 if (offsets[2]) {
1961 z = lp_build_add(int_coord_bld, z, offsets[2]);
1962 }
1963 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_LESS, z, int_coord_bld->zero);
1964 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
1965 out1 = lp_build_cmp(int_coord_bld, PIPE_FUNC_GEQUAL, z, depth);
1966 out_of_bounds = lp_build_or(int_coord_bld, out_of_bounds, out1);
1967 }
1968 }
1969
1970 lp_build_sample_offset(int_coord_bld,
1971 bld->format_desc,
1972 x, y, z, row_stride_vec, img_stride_vec,
1973 &offset, &i, &j);
1974
1975 if (bld->static_texture_state->target != PIPE_BUFFER) {
1976 offset = lp_build_add(int_coord_bld, offset,
1977 lp_build_get_mip_offsets(bld, ilevel));
1978 }
1979
1980 offset = lp_build_andnot(int_coord_bld, offset, out_of_bounds);
1981
1982 lp_build_fetch_rgba_soa(bld->gallivm,
1983 bld->format_desc,
1984 bld->texel_type,
1985 bld->base_ptr, offset,
1986 i, j,
1987 colors_out);
1988
1989 if (out_of_bound_ret_zero) {
1990 /*
1991 * Only needed for ARB_robust_buffer_access_behavior and d3d10.
1992 * Could use min/max above instead of out-of-bounds comparisons
1993 * if we don't care about the result returned for out-of-bounds.
1994 */
1995 for (chan = 0; chan < 4; chan++) {
1996 colors_out[chan] = lp_build_select(&bld->texel_bld, out_of_bounds,
1997 bld->texel_bld.zero, colors_out[chan]);
1998 }
1999 }
2000 }
2001
2002
2003 /**
2004 * Just set texels to white instead of actually sampling the texture.
2005 * For debugging.
2006 */
2007 void
2008 lp_build_sample_nop(struct gallivm_state *gallivm,
2009 struct lp_type type,
2010 const LLVMValueRef *coords,
2011 LLVMValueRef texel_out[4])
2012 {
2013 LLVMValueRef one = lp_build_one(gallivm, type);
2014 unsigned chan;
2015
2016 for (chan = 0; chan < 4; chan++) {
2017 texel_out[chan] = one;
2018 }
2019 }
2020
2021
2022 /**
2023 * Build texture sampling code.
2024 * 'texel' will return a vector of four LLVMValueRefs corresponding to
2025 * R, G, B, A.
2026 * \param type vector float type to use for coords, etc.
2027 * \param is_fetch if this is a texel fetch instruction.
2028 * \param derivs partial derivatives of (s,t,r,q) with respect to x and y
2029 */
2030 void
2031 lp_build_sample_soa(struct gallivm_state *gallivm,
2032 const struct lp_static_texture_state *static_texture_state,
2033 const struct lp_static_sampler_state *static_sampler_state,
2034 struct lp_sampler_dynamic_state *dynamic_state,
2035 struct lp_type type,
2036 boolean is_fetch,
2037 unsigned texture_index,
2038 unsigned sampler_index,
2039 const LLVMValueRef *coords,
2040 const LLVMValueRef *offsets,
2041 const struct lp_derivatives *derivs, /* optional */
2042 LLVMValueRef lod_bias, /* optional */
2043 LLVMValueRef explicit_lod, /* optional */
2044 enum lp_sampler_lod_property lod_property,
2045 LLVMValueRef texel_out[4])
2046 {
2047 unsigned target = static_texture_state->target;
2048 unsigned dims = texture_dims(target);
2049 unsigned num_quads = type.length / 4;
2050 unsigned mip_filter, min_img_filter, mag_img_filter, i;
2051 struct lp_build_sample_context bld;
2052 struct lp_static_sampler_state derived_sampler_state = *static_sampler_state;
2053 LLVMTypeRef i32t = LLVMInt32TypeInContext(gallivm->context);
2054 LLVMBuilderRef builder = gallivm->builder;
2055 LLVMValueRef tex_width, newcoords[5];
2056
2057 if (0) {
2058 enum pipe_format fmt = static_texture_state->format;
2059 debug_printf("Sample from %s\n", util_format_name(fmt));
2060 }
2061
2062 if (static_texture_state->format == PIPE_FORMAT_NONE) {
2063 /*
2064 * If there's nothing bound, format is NONE, and we must return
2065 * all zero as mandated by d3d10 in this case.
2066 */
2067 unsigned chan;
2068 LLVMValueRef zero = lp_build_const_vec(gallivm, type, 0.0F);
2069 for (chan = 0; chan < 4; chan++) {
2070 texel_out[chan] = zero;
2071 }
2072 return;
2073 }
2074
2075 assert(type.floating);
2076
2077 /* Setup our build context */
2078 memset(&bld, 0, sizeof bld);
2079 bld.gallivm = gallivm;
2080 bld.static_sampler_state = &derived_sampler_state;
2081 bld.static_texture_state = static_texture_state;
2082 bld.dynamic_state = dynamic_state;
2083 bld.format_desc = util_format_description(static_texture_state->format);
2084 bld.dims = dims;
2085
2086 bld.vector_width = lp_type_width(type);
2087
2088 bld.float_type = lp_type_float(32);
2089 bld.int_type = lp_type_int(32);
2090 bld.coord_type = type;
2091 bld.int_coord_type = lp_int_type(type);
2092 bld.float_size_in_type = lp_type_float(32);
2093 bld.float_size_in_type.length = dims > 1 ? 4 : 1;
2094 bld.int_size_in_type = lp_int_type(bld.float_size_in_type);
2095 bld.texel_type = type;
2096
2097 /* always using the first channel hopefully should be safe,
2098 * if not things WILL break in other places anyway.
2099 */
2100 if (bld.format_desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB &&
2101 bld.format_desc->channel[0].pure_integer) {
2102 if (bld.format_desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED) {
2103 bld.texel_type = lp_type_int_vec(type.width, type.width * type.length);
2104 }
2105 else if (bld.format_desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED) {
2106 bld.texel_type = lp_type_uint_vec(type.width, type.width * type.length);
2107 }
2108 }
2109 else if (util_format_has_stencil(bld.format_desc) &&
2110 !util_format_has_depth(bld.format_desc)) {
2111 /* for stencil only formats, sample stencil (uint) */
2112 bld.texel_type = lp_type_int_vec(type.width, type.width * type.length);
2113 }
2114
2115 if (!static_texture_state->level_zero_only) {
2116 derived_sampler_state.min_mip_filter = static_sampler_state->min_mip_filter;
2117 } else {
2118 derived_sampler_state.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
2119 }
2120 mip_filter = derived_sampler_state.min_mip_filter;
2121
2122 if (0) {
2123 debug_printf(" .min_mip_filter = %u\n", derived_sampler_state.min_mip_filter);
2124 }
2125
2126 if ((static_texture_state->target == PIPE_TEXTURE_CUBE ||
2127 static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) &&
2128 static_sampler_state->seamless_cube_map)
2129 {
2130 /*
2131 * Seamless filtering ignores wrap modes.
2132 * Setting to CLAMP_TO_EDGE is correct for nearest filtering, for
2133 * bilinear it's not correct but way better than using for instance repeat.
2134 */
2135 derived_sampler_state.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
2136 derived_sampler_state.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
2137 }
2138
2139 min_img_filter = derived_sampler_state.min_img_filter;
2140 mag_img_filter = derived_sampler_state.mag_img_filter;
2141
2142
2143 /*
2144 * This is all a bit complicated different paths are chosen for performance
2145 * reasons.
2146 * Essentially, there can be 1 lod per element, 1 lod per quad or 1 lod for
2147 * everything (the last two options are equivalent for 4-wide case).
2148 * If there's per-quad lod but we split to 4-wide so we can use AoS, per-quad
2149 * lod is calculated then the lod value extracted afterwards so making this
2150 * case basically the same as far as lod handling is concerned for the
2151 * further sample/filter code as the 1 lod for everything case.
2152 * Different lod handling mostly shows up when building mipmap sizes
2153 * (lp_build_mipmap_level_sizes() and friends) and also in filtering
2154 * (getting the fractional part of the lod to the right texels).
2155 */
2156
2157 /*
2158 * There are other situations where at least the multiple int lods could be
2159 * avoided like min and max lod being equal.
2160 */
2161 bld.num_mips = bld.num_lods = 1;
2162 if (lod_property == LP_SAMPLER_LOD_PER_ELEMENT &&
2163 (explicit_lod || lod_bias ||
2164 (derivs && static_texture_state->target != PIPE_TEXTURE_CUBE))) {
2165 if ((is_fetch && target != PIPE_BUFFER) ||
2166 (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
2167 bld.num_mips = type.length;
2168 bld.num_lods = type.length;
2169 }
2170 else if (!is_fetch && min_img_filter != mag_img_filter) {
2171 bld.num_mips = 1;
2172 bld.num_lods = type.length;
2173 }
2174 }
2175 /* TODO: for true scalar_lod should only use 1 lod value */
2176 else if ((is_fetch && explicit_lod && target != PIPE_BUFFER) ||
2177 (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
2178 bld.num_mips = num_quads;
2179 bld.num_lods = num_quads;
2180 }
2181 else if (!is_fetch && min_img_filter != mag_img_filter) {
2182 bld.num_mips = 1;
2183 bld.num_lods = num_quads;
2184 }
2185
2186
2187 bld.lodf_type = type;
2188 /* we want native vector size to be able to use our intrinsics */
2189 if (bld.num_lods != type.length) {
2190 /* TODO: this currently always has to be per-quad or per-element */
2191 bld.lodf_type.length = type.length > 4 ? ((type.length + 15) / 16) * 4 : 1;
2192 }
2193 bld.lodi_type = lp_int_type(bld.lodf_type);
2194 bld.levelf_type = bld.lodf_type;
2195 if (bld.num_mips == 1) {
2196 bld.levelf_type.length = 1;
2197 }
2198 bld.leveli_type = lp_int_type(bld.levelf_type);
2199 bld.float_size_type = bld.float_size_in_type;
2200 /* Note: size vectors may not be native. They contain minified w/h/d/_ values,
2201 * with per-element lod that is w0/h0/d0/_/w1/h1/d1_/... so up to 8x4f32 */
2202 if (bld.num_mips > 1) {
2203 bld.float_size_type.length = bld.num_mips == type.length ?
2204 bld.num_mips * bld.float_size_in_type.length :
2205 type.length;
2206 }
2207 bld.int_size_type = lp_int_type(bld.float_size_type);
2208
2209 lp_build_context_init(&bld.float_bld, gallivm, bld.float_type);
2210 lp_build_context_init(&bld.float_vec_bld, gallivm, type);
2211 lp_build_context_init(&bld.int_bld, gallivm, bld.int_type);
2212 lp_build_context_init(&bld.coord_bld, gallivm, bld.coord_type);
2213 lp_build_context_init(&bld.int_coord_bld, gallivm, bld.int_coord_type);
2214 lp_build_context_init(&bld.int_size_in_bld, gallivm, bld.int_size_in_type);
2215 lp_build_context_init(&bld.float_size_in_bld, gallivm, bld.float_size_in_type);
2216 lp_build_context_init(&bld.int_size_bld, gallivm, bld.int_size_type);
2217 lp_build_context_init(&bld.float_size_bld, gallivm, bld.float_size_type);
2218 lp_build_context_init(&bld.texel_bld, gallivm, bld.texel_type);
2219 lp_build_context_init(&bld.levelf_bld, gallivm, bld.levelf_type);
2220 lp_build_context_init(&bld.leveli_bld, gallivm, bld.leveli_type);
2221 lp_build_context_init(&bld.lodf_bld, gallivm, bld.lodf_type);
2222 lp_build_context_init(&bld.lodi_bld, gallivm, bld.lodi_type);
2223
2224 /* Get the dynamic state */
2225 tex_width = dynamic_state->width(dynamic_state, gallivm, texture_index);
2226 bld.row_stride_array = dynamic_state->row_stride(dynamic_state, gallivm, texture_index);
2227 bld.img_stride_array = dynamic_state->img_stride(dynamic_state, gallivm, texture_index);
2228 bld.base_ptr = dynamic_state->base_ptr(dynamic_state, gallivm, texture_index);
2229 bld.mip_offsets = dynamic_state->mip_offsets(dynamic_state, gallivm, texture_index);
2230 /* Note that mip_offsets is an array[level] of offsets to texture images */
2231
2232 /* width, height, depth as single int vector */
2233 if (dims <= 1) {
2234 bld.int_size = tex_width;
2235 }
2236 else {
2237 bld.int_size = LLVMBuildInsertElement(builder, bld.int_size_in_bld.undef,
2238 tex_width, LLVMConstInt(i32t, 0, 0), "");
2239 if (dims >= 2) {
2240 LLVMValueRef tex_height =
2241 dynamic_state->height(dynamic_state, gallivm, texture_index);
2242 bld.int_size = LLVMBuildInsertElement(builder, bld.int_size,
2243 tex_height, LLVMConstInt(i32t, 1, 0), "");
2244 if (dims >= 3) {
2245 LLVMValueRef tex_depth =
2246 dynamic_state->depth(dynamic_state, gallivm, texture_index);
2247 bld.int_size = LLVMBuildInsertElement(builder, bld.int_size,
2248 tex_depth, LLVMConstInt(i32t, 2, 0), "");
2249 }
2250 }
2251 }
2252
2253 for (i = 0; i < 5; i++) {
2254 newcoords[i] = coords[i];
2255 }
2256
2257 if (0) {
2258 /* For debug: no-op texture sampling */
2259 lp_build_sample_nop(gallivm,
2260 bld.texel_type,
2261 newcoords,
2262 texel_out);
2263 }
2264
2265 else if (is_fetch) {
2266 lp_build_fetch_texel(&bld, texture_index, newcoords,
2267 explicit_lod, offsets,
2268 texel_out);
2269 }
2270
2271 else {
2272 LLVMValueRef lod_fpart = NULL, lod_positive = NULL;
2273 LLVMValueRef ilevel0 = NULL, ilevel1 = NULL;
2274 boolean use_aos = util_format_fits_8unorm(bld.format_desc) &&
2275 /* not sure this is strictly needed or simply impossible */
2276 derived_sampler_state.compare_mode == PIPE_TEX_COMPARE_NONE &&
2277 lp_is_simple_wrap_mode(derived_sampler_state.wrap_s);
2278
2279 use_aos &= bld.num_lods <= num_quads ||
2280 derived_sampler_state.min_img_filter ==
2281 derived_sampler_state.mag_img_filter;
2282 if (dims > 1) {
2283 use_aos &= lp_is_simple_wrap_mode(derived_sampler_state.wrap_t);
2284 if (dims > 2) {
2285 use_aos &= lp_is_simple_wrap_mode(derived_sampler_state.wrap_r);
2286 }
2287 }
2288
2289 if ((gallivm_debug & GALLIVM_DEBUG_PERF) &&
2290 !use_aos && util_format_fits_8unorm(bld.format_desc)) {
2291 debug_printf("%s: using floating point linear filtering for %s\n",
2292 __FUNCTION__, bld.format_desc->short_name);
2293 debug_printf(" min_img %d mag_img %d mip %d wraps %d wrapt %d wrapr %d\n",
2294 derived_sampler_state.min_img_filter,
2295 derived_sampler_state.mag_img_filter,
2296 derived_sampler_state.min_mip_filter,
2297 derived_sampler_state.wrap_s,
2298 derived_sampler_state.wrap_t,
2299 derived_sampler_state.wrap_r);
2300 }
2301
2302 lp_build_sample_common(&bld, texture_index, sampler_index,
2303 newcoords,
2304 derivs, lod_bias, explicit_lod,
2305 &lod_positive, &lod_fpart,
2306 &ilevel0, &ilevel1);
2307
2308 /*
2309 * we only try 8-wide sampling with soa as it appears to
2310 * be a loss with aos with AVX (but it should work, except
2311 * for conformance if min_filter != mag_filter if num_lods > 1).
2312 * (It should be faster if we'd support avx2)
2313 */
2314 if (num_quads == 1 || !use_aos) {
2315 if (use_aos) {
2316 /* do sampling/filtering with fixed pt arithmetic */
2317 lp_build_sample_aos(&bld, sampler_index,
2318 newcoords[0], newcoords[1],
2319 newcoords[2],
2320 offsets, lod_positive, lod_fpart,
2321 ilevel0, ilevel1,
2322 texel_out);
2323 }
2324
2325 else {
2326 lp_build_sample_general(&bld, sampler_index,
2327 newcoords, offsets,
2328 lod_positive, lod_fpart,
2329 ilevel0, ilevel1,
2330 texel_out);
2331 }
2332 }
2333 else {
2334 unsigned j;
2335 struct lp_build_sample_context bld4;
2336 struct lp_type type4 = type;
2337 unsigned i;
2338 LLVMValueRef texelout4[4];
2339 LLVMValueRef texelouttmp[4][LP_MAX_VECTOR_LENGTH/16];
2340
2341 type4.length = 4;
2342
2343 /* Setup our build context */
2344 memset(&bld4, 0, sizeof bld4);
2345 bld4.gallivm = bld.gallivm;
2346 bld4.static_texture_state = bld.static_texture_state;
2347 bld4.static_sampler_state = bld.static_sampler_state;
2348 bld4.dynamic_state = bld.dynamic_state;
2349 bld4.format_desc = bld.format_desc;
2350 bld4.dims = bld.dims;
2351 bld4.row_stride_array = bld.row_stride_array;
2352 bld4.img_stride_array = bld.img_stride_array;
2353 bld4.base_ptr = bld.base_ptr;
2354 bld4.mip_offsets = bld.mip_offsets;
2355 bld4.int_size = bld.int_size;
2356
2357 bld4.vector_width = lp_type_width(type4);
2358
2359 bld4.float_type = lp_type_float(32);
2360 bld4.int_type = lp_type_int(32);
2361 bld4.coord_type = type4;
2362 bld4.int_coord_type = lp_int_type(type4);
2363 bld4.float_size_in_type = lp_type_float(32);
2364 bld4.float_size_in_type.length = dims > 1 ? 4 : 1;
2365 bld4.int_size_in_type = lp_int_type(bld4.float_size_in_type);
2366 bld4.texel_type = bld.texel_type;
2367 bld4.texel_type.length = 4;
2368
2369 bld4.num_mips = bld4.num_lods = 1;
2370 if (lod_property == LP_SAMPLER_LOD_PER_ELEMENT &&
2371 (explicit_lod || lod_bias ||
2372 (derivs && static_texture_state->target != PIPE_TEXTURE_CUBE))) {
2373 if ((is_fetch && target != PIPE_BUFFER) ||
2374 (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
2375 bld4.num_mips = type4.length;
2376 bld4.num_lods = type4.length;
2377 }
2378 else if (!is_fetch && min_img_filter != mag_img_filter) {
2379 bld4.num_mips = 1;
2380 bld4.num_lods = type4.length;
2381 }
2382 }
2383
2384 /* we want native vector size to be able to use our intrinsics */
2385 bld4.lodf_type = type4;
2386 if (bld4.num_lods != type4.length) {
2387 bld4.lodf_type.length = 1;
2388 }
2389 bld4.lodi_type = lp_int_type(bld4.lodf_type);
2390 bld4.levelf_type = type4;
2391 if (bld4.num_mips != type4.length) {
2392 bld4.levelf_type.length = 1;
2393 }
2394 bld4.leveli_type = lp_int_type(bld4.levelf_type);
2395 bld4.float_size_type = bld4.float_size_in_type;
2396 if (bld4.num_mips > 1) {
2397 bld4.float_size_type.length = bld4.num_mips == type4.length ?
2398 bld4.num_mips * bld4.float_size_in_type.length :
2399 type4.length;
2400 }
2401 bld4.int_size_type = lp_int_type(bld4.float_size_type);
2402
2403 lp_build_context_init(&bld4.float_bld, gallivm, bld4.float_type);
2404 lp_build_context_init(&bld4.float_vec_bld, gallivm, type4);
2405 lp_build_context_init(&bld4.int_bld, gallivm, bld4.int_type);
2406 lp_build_context_init(&bld4.coord_bld, gallivm, bld4.coord_type);
2407 lp_build_context_init(&bld4.int_coord_bld, gallivm, bld4.int_coord_type);
2408 lp_build_context_init(&bld4.int_size_in_bld, gallivm, bld4.int_size_in_type);
2409 lp_build_context_init(&bld4.float_size_in_bld, gallivm, bld4.float_size_in_type);
2410 lp_build_context_init(&bld4.int_size_bld, gallivm, bld4.int_size_type);
2411 lp_build_context_init(&bld4.float_size_bld, gallivm, bld4.float_size_type);
2412 lp_build_context_init(&bld4.texel_bld, gallivm, bld4.texel_type);
2413 lp_build_context_init(&bld4.levelf_bld, gallivm, bld4.levelf_type);
2414 lp_build_context_init(&bld4.leveli_bld, gallivm, bld4.leveli_type);
2415 lp_build_context_init(&bld4.lodf_bld, gallivm, bld4.lodf_type);
2416 lp_build_context_init(&bld4.lodi_bld, gallivm, bld4.lodi_type);
2417
2418 for (i = 0; i < num_quads; i++) {
2419 LLVMValueRef s4, t4, r4;
2420 LLVMValueRef lod_positive4, lod_fpart4 = NULL;
2421 LLVMValueRef ilevel04, ilevel14 = NULL;
2422 LLVMValueRef offsets4[4] = { NULL };
2423 unsigned num_lods = bld4.num_lods;
2424
2425 s4 = lp_build_extract_range(gallivm, newcoords[0], 4*i, 4);
2426 t4 = lp_build_extract_range(gallivm, newcoords[1], 4*i, 4);
2427 r4 = lp_build_extract_range(gallivm, newcoords[2], 4*i, 4);
2428
2429 if (offsets[0]) {
2430 offsets4[0] = lp_build_extract_range(gallivm, offsets[0], 4*i, 4);
2431 if (dims > 1) {
2432 offsets4[1] = lp_build_extract_range(gallivm, offsets[1], 4*i, 4);
2433 if (dims > 2) {
2434 offsets4[2] = lp_build_extract_range(gallivm, offsets[2], 4*i, 4);
2435 }
2436 }
2437 }
2438 lod_positive4 = lp_build_extract_range(gallivm, lod_positive, num_lods * i, num_lods);
2439 ilevel04 = bld.num_mips == 1 ? ilevel0 :
2440 lp_build_extract_range(gallivm, ilevel0, num_lods * i, num_lods);
2441 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
2442 ilevel14 = lp_build_extract_range(gallivm, ilevel1, num_lods * i, num_lods);
2443 lod_fpart4 = lp_build_extract_range(gallivm, lod_fpart, num_lods * i, num_lods);
2444 }
2445
2446 if (use_aos) {
2447 /* do sampling/filtering with fixed pt arithmetic */
2448 lp_build_sample_aos(&bld4, sampler_index,
2449 s4, t4, r4, offsets4,
2450 lod_positive4, lod_fpart4,
2451 ilevel04, ilevel14,
2452 texelout4);
2453 }
2454
2455 else {
2456 /* this path is currently unreachable and hence might break easily... */
2457 LLVMValueRef newcoords4[5];
2458 newcoords4[0] = s4;
2459 newcoords4[1] = t4;
2460 newcoords4[2] = r4;
2461 newcoords4[3] = lp_build_extract_range(gallivm, newcoords[3], 4*i, 4);
2462 newcoords4[4] = lp_build_extract_range(gallivm, newcoords[4], 4*i, 4);
2463
2464 lp_build_sample_general(&bld4, sampler_index,
2465 newcoords4, offsets4,
2466 lod_positive4, lod_fpart4,
2467 ilevel04, ilevel14,
2468 texelout4);
2469 }
2470 for (j = 0; j < 4; j++) {
2471 texelouttmp[j][i] = texelout4[j];
2472 }
2473 }
2474
2475 for (j = 0; j < 4; j++) {
2476 texel_out[j] = lp_build_concat(gallivm, texelouttmp[j], type4, num_quads);
2477 }
2478 }
2479 }
2480
2481 if (target != PIPE_BUFFER) {
2482 apply_sampler_swizzle(&bld, texel_out);
2483 }
2484
2485 /*
2486 * texel type can be a (32bit) int/uint (for pure int formats only),
2487 * however we are expected to always return floats (storage is untyped).
2488 */
2489 if (!bld.texel_type.floating) {
2490 unsigned chan;
2491 for (chan = 0; chan < 4; chan++) {
2492 texel_out[chan] = LLVMBuildBitCast(builder, texel_out[chan],
2493 lp_build_vec_type(gallivm, type), "");
2494 }
2495 }
2496 }
2497
2498 void
2499 lp_build_size_query_soa(struct gallivm_state *gallivm,
2500 const struct lp_static_texture_state *static_state,
2501 struct lp_sampler_dynamic_state *dynamic_state,
2502 struct lp_type int_type,
2503 unsigned texture_unit,
2504 unsigned target,
2505 boolean is_sviewinfo,
2506 enum lp_sampler_lod_property lod_property,
2507 LLVMValueRef explicit_lod,
2508 LLVMValueRef *sizes_out)
2509 {
2510 LLVMValueRef lod, level, size;
2511 LLVMValueRef first_level = NULL;
2512 int dims, i;
2513 boolean has_array;
2514 unsigned num_lods = 1;
2515 struct lp_build_context bld_int_vec4;
2516
2517 if (static_state->format == PIPE_FORMAT_NONE) {
2518 /*
2519 * If there's nothing bound, format is NONE, and we must return
2520 * all zero as mandated by d3d10 in this case.
2521 */
2522 unsigned chan;
2523 LLVMValueRef zero = lp_build_const_vec(gallivm, int_type, 0.0F);
2524 for (chan = 0; chan < 4; chan++) {
2525 sizes_out[chan] = zero;
2526 }
2527 return;
2528 }
2529
2530 /*
2531 * Do some sanity verification about bound texture and shader dcl target.
2532 * Not entirely sure what's possible but assume array/non-array
2533 * always compatible (probably not ok for OpenGL but d3d10 has no
2534 * distinction of arrays at the resource level).
2535 * Everything else looks bogus (though not entirely sure about rect/2d).
2536 * Currently disabled because it causes assertion failures if there's
2537 * nothing bound (or rather a dummy texture, not that this case would
2538 * return the right values).
2539 */
2540 if (0 && static_state->target != target) {
2541 if (static_state->target == PIPE_TEXTURE_1D)
2542 assert(target == PIPE_TEXTURE_1D_ARRAY);
2543 else if (static_state->target == PIPE_TEXTURE_1D_ARRAY)
2544 assert(target == PIPE_TEXTURE_1D);
2545 else if (static_state->target == PIPE_TEXTURE_2D)
2546 assert(target == PIPE_TEXTURE_2D_ARRAY);
2547 else if (static_state->target == PIPE_TEXTURE_2D_ARRAY)
2548 assert(target == PIPE_TEXTURE_2D);
2549 else if (static_state->target == PIPE_TEXTURE_CUBE)
2550 assert(target == PIPE_TEXTURE_CUBE_ARRAY);
2551 else if (static_state->target == PIPE_TEXTURE_CUBE_ARRAY)
2552 assert(target == PIPE_TEXTURE_CUBE);
2553 else
2554 assert(0);
2555 }
2556
2557 dims = texture_dims(target);
2558
2559 switch (target) {
2560 case PIPE_TEXTURE_1D_ARRAY:
2561 case PIPE_TEXTURE_2D_ARRAY:
2562 has_array = TRUE;
2563 break;
2564 default:
2565 has_array = FALSE;
2566 break;
2567 }
2568
2569 assert(!int_type.floating);
2570
2571 lp_build_context_init(&bld_int_vec4, gallivm, lp_type_int_vec(32, 128));
2572
2573 if (explicit_lod) {
2574 /* FIXME: this needs to honor per-element lod */
2575 lod = LLVMBuildExtractElement(gallivm->builder, explicit_lod, lp_build_const_int32(gallivm, 0), "");
2576 first_level = dynamic_state->first_level(dynamic_state, gallivm, texture_unit);
2577 level = LLVMBuildAdd(gallivm->builder, lod, first_level, "level");
2578 lod = lp_build_broadcast_scalar(&bld_int_vec4, level);
2579 } else {
2580 lod = bld_int_vec4.zero;
2581 }
2582
2583 size = bld_int_vec4.undef;
2584
2585 size = LLVMBuildInsertElement(gallivm->builder, size,
2586 dynamic_state->width(dynamic_state, gallivm, texture_unit),
2587 lp_build_const_int32(gallivm, 0), "");
2588
2589 if (dims >= 2) {
2590 size = LLVMBuildInsertElement(gallivm->builder, size,
2591 dynamic_state->height(dynamic_state, gallivm, texture_unit),
2592 lp_build_const_int32(gallivm, 1), "");
2593 }
2594
2595 if (dims >= 3) {
2596 size = LLVMBuildInsertElement(gallivm->builder, size,
2597 dynamic_state->depth(dynamic_state, gallivm, texture_unit),
2598 lp_build_const_int32(gallivm, 2), "");
2599 }
2600
2601 size = lp_build_minify(&bld_int_vec4, size, lod);
2602
2603 if (has_array)
2604 size = LLVMBuildInsertElement(gallivm->builder, size,
2605 dynamic_state->depth(dynamic_state, gallivm, texture_unit),
2606 lp_build_const_int32(gallivm, dims), "");
2607
2608 /*
2609 * d3d10 requires zero for x/y/z values (but not w, i.e. mip levels)
2610 * if level is out of bounds (note this can't cover unbound texture
2611 * here, which also requires returning zero).
2612 */
2613 if (explicit_lod && is_sviewinfo) {
2614 LLVMValueRef last_level, out, out1;
2615 struct lp_build_context leveli_bld;
2616
2617 /* everything is scalar for now */
2618 lp_build_context_init(&leveli_bld, gallivm, lp_type_int_vec(32, 32));
2619 last_level = dynamic_state->last_level(dynamic_state, gallivm, texture_unit);
2620
2621 out = lp_build_cmp(&leveli_bld, PIPE_FUNC_LESS, level, first_level);
2622 out1 = lp_build_cmp(&leveli_bld, PIPE_FUNC_GREATER, level, last_level);
2623 out = lp_build_or(&leveli_bld, out, out1);
2624 if (num_lods == 1) {
2625 out = lp_build_broadcast_scalar(&bld_int_vec4, out);
2626 }
2627 else {
2628 /* TODO */
2629 assert(0);
2630 }
2631 size = lp_build_andnot(&bld_int_vec4, size, out);
2632 }
2633 for (i = 0; i < dims + (has_array ? 1 : 0); i++) {
2634 sizes_out[i] = lp_build_extract_broadcast(gallivm, bld_int_vec4.type, int_type,
2635 size,
2636 lp_build_const_int32(gallivm, i));
2637 }
2638 if (is_sviewinfo) {
2639 for (; i < 4; i++) {
2640 sizes_out[i] = lp_build_const_vec(gallivm, int_type, 0.0);
2641 }
2642 }
2643
2644 /*
2645 * if there's no explicit_lod (buffers, rects) queries requiring nr of
2646 * mips would be illegal.
2647 */
2648 if (is_sviewinfo && explicit_lod) {
2649 struct lp_build_context bld_int_scalar;
2650 LLVMValueRef num_levels;
2651 lp_build_context_init(&bld_int_scalar, gallivm, lp_type_int(32));
2652
2653 if (static_state->level_zero_only) {
2654 num_levels = bld_int_scalar.one;
2655 }
2656 else {
2657 LLVMValueRef last_level;
2658
2659 last_level = dynamic_state->last_level(dynamic_state, gallivm, texture_unit);
2660 num_levels = lp_build_sub(&bld_int_scalar, last_level, first_level);
2661 num_levels = lp_build_add(&bld_int_scalar, num_levels, bld_int_scalar.one);
2662 }
2663 sizes_out[3] = lp_build_broadcast(gallivm, lp_build_vec_type(gallivm, int_type),
2664 num_levels);
2665 }
2666 }